python_code
stringlengths 0
229k
|
---|
import torch
from torchaudio_unittest import common_utils
from .kaldi_compatibility_impl import Kaldi
class TestKaldiFloat32(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat64(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .librosa_compatibility_test_impl import TransformsTestBase
@skipIfNoCuda
class TestTransforms(TransformsTestBase, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .librosa_compatibility_test_impl import TransformsTestBase
class TestTransforms(TransformsTestBase, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
"""Generate opus file for testing load functions"""
import argparse
import subprocess
import scipy.io.wavfile
import torch
def _parse_args():
parser = argparse.ArgumentParser(
description='Generate opus files for test'
)
parser.add_argument('--num-channels', required=True, type=int)
parser.add_argument('--compression-level', required=True, type=int, choices=list(range(11)))
parser.add_argument('--bitrate', default='96k')
return parser.parse_args()
def convert_to_opus(
src_path, dst_path,
*, bitrate, compression_level):
"""Convert audio file with `ffmpeg` command."""
command = ['ffmpeg', '-y', '-i', src_path, '-c:a', 'libopus', '-b:a', bitrate]
if compression_level is not None:
command += ['-compression_level', str(compression_level)]
command += [dst_path]
print(' '.join(command))
subprocess.run(command, check=True)
def _generate(num_channels, compression_level, bitrate):
org_path = 'original.wav'
ops_path = f'{bitrate}_{compression_level}_{num_channels}ch.opus'
# Note: ffmpeg forces sample rate 48k Hz for opus https://stackoverflow.com/a/39186779
# 1. generate original wav
data = torch.linspace(-32768, 32767, 32768, dtype=torch.int16).repeat([num_channels, 1]).t()
scipy.io.wavfile.write(org_path, 48000, data.numpy())
# 2. convert to opus
convert_to_opus(org_path, ops_path, bitrate=bitrate, compression_level=compression_level)
def _main():
args = _parse_args()
_generate(args.num_channels, args.compression_level, args.bitrate)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Generate the conf JSON from fairseq pretrained weight file, that is consumed by unit tests
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/wav2vec
2. Download the dict from https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt
and put it in the same directory as parameter files.
3. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
# Pretrained
python generate_wav2vec2_model_config.py \
--model-file wav2vec_small.pt \
> wav2vec_small.json
python generate_wav2vec2_model_config.py \
--model-file libri960_big.pt \
> libri960_big.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_vox_new.pt \
> wav2vec_vox_new.json
# Fine-tuned
python generate_wav2vec2_model_config.py \
--model-file wav2vec_small_960h.pt \
> wav2vec_small_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_big_960h.pt \
> wav2vec_large_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec2_vox_960h_new.pt \
> wav2vec_large_lv60_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_vox_960h_pl.pt \
> wav2vec_large_lv60_self_960h.json
```
"""
import os
import json
import argparse
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--model-file',
required=True,
help=(
'A point file from '
'https://github.com/pytorch/fairseq/tree/main/examples/wav2vec'
)
)
parser.add_argument(
'--dict-dir',
help=(
'Directory where `dict.ltr.txt` file is found. '
'Default: the directory of the given model.'
)
)
args = parser.parse_args()
if args.dict_dir is None:
args.dict_dir = os.path.dirname(args.model_file)
return args
def _to_json(conf):
import yaml
from omegaconf import OmegaConf
return yaml.safe_load(OmegaConf.to_yaml(conf))
def _load(model_file, dict_dir):
import fairseq
overrides = {'data': dict_dir}
_, args, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[model_file], arg_overrides=overrides
)
return _to_json(args['model'])
def _main():
args = _parse_args()
conf = _load(args.model_file, args.dict_dir)
if conf['_name'] == 'wav2vec_ctc':
del conf['data']
del conf['w2v_args']['task']['data']
conf['w2v_args'] = {
key: conf['w2v_args'][key] for key in ['model', 'task']
}
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
python generate_hubert_model_config.py \
--model-file hubert_base_ls960.pt \
> hubert_base_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
```
"""
import json
import argparse
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--model-file',
required=True,
help=(
'A pt file from '
'https://github.com/pytorch/fairseq/tree/main/examples/hubert'
)
)
return parser.parse_args()
def _load(model_file):
import fairseq
from omegaconf import OmegaConf
models, cfg, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
model = models[0]
cfg = OmegaConf.to_container(cfg)
return model, cfg
def _main():
args = _parse_args()
model, cfg = _load(args.model_file)
if model.__class__.__name__ == 'HubertModel':
cfg['task']['data'] = '/foo/bar'
cfg['task']['label_dir'] = None
conf = {
'_name': 'hubert',
'model': cfg['model'],
'task': cfg['task'],
'num_classes': model.num_classes,
}
elif model.__class__.__name__ == 'HubertCtc':
conf = cfg['model']
del conf['w2v_path']
keep = ['_name', 'task', 'model']
for key in list(k for k in conf['w2v_args'] if k not in keep):
del conf['w2v_args'][key]
conf['data'] = '/foo/bar/'
conf['w2v_args']['task']['data'] = '/foo/bar'
conf['w2v_args']['task']['labels'] = []
conf['w2v_args']['task']['label_dir'] = '/foo/bar'
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == '__main__':
_main()
|
import os
import json
from transformers import Wav2Vec2Model
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
def _main():
keys = [
# pretrained
"facebook/wav2vec2-base",
"facebook/wav2vec2-large",
"facebook/wav2vec2-large-lv60",
"facebook/wav2vec2-base-10k-voxpopuli",
"facebook/wav2vec2-large-xlsr-53",
# finetuned
"facebook/wav2vec2-base-960h",
"facebook/wav2vec2-large-960h",
"facebook/wav2vec2-large-960h-lv60",
"facebook/wav2vec2-large-960h-lv60-self",
"facebook/wav2vec2-large-xlsr-53-german",
]
for key in keys:
path = os.path.join(_THIS_DIR, f'{key}.json')
print('Generating ', path)
cfg = Wav2Vec2Model.from_pretrained(key).config
cfg = json.loads(cfg.to_json_string())
del cfg['_name_or_path']
with open(path, 'w') as file_:
file_.write(json.dumps(cfg, indent=4, sort_keys=True))
file_.write('\n')
if __name__ == '__main__':
_main()
|
from typing import Callable, Tuple
from functools import partial
import torch
from parameterized import parameterized
from torch import Tensor
import torchaudio.functional as F
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
rnnt_utils,
)
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: Callable[..., Tensor],
inputs: Tuple[torch.Tensor],
*,
enable_all_grad: bool = True,
):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=self.dtype, device=self.device)
if enable_all_grad:
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_)
def test_lfilter_x(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
x.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
a.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
b.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.lfilter, (x, a, b))
def test_lfilter_filterbanks(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=3)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(partial(F.lfilter, batching=False), (x, a, b))
def test_lfilter_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(F.lfilter, (x, a, b))
def test_filtfilt_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
a.requires_grad = True
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
b.requires_grad = True
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.filtfilt, (x, a, b))
def test_filtfilt_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(F.filtfilt, (x, a, b))
def test_biquad(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.biquad, (x, b[0], b[1], b[2], a[0], a[1], a[2]))
@parameterized.expand([
(800, 0.7, True),
(800, 0.7, False),
])
def test_band_biquad(self, central_freq, Q, noise):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.band_biquad, (x, sr, central_freq, Q, noise))
@parameterized.expand([
(800, 0.7, 10),
(800, 0.7, -10),
])
def test_bass_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.bass_biquad, (x, sr, gain, central_freq, Q))
@parameterized.expand([
(3000, 0.7, 10),
(3000, 0.7, -10),
])
def test_treble_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.treble_biquad, (x, sr, gain, central_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_allpass_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.allpass_biquad, (x, sr, central_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_lowpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
Q = torch.tensor(Q)
self.assert_grad(F.lowpass_biquad, (x, sr, cutoff_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_highpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
Q = torch.tensor(Q)
self.assert_grad(F.highpass_biquad, (x, sr, cutoff_freq, Q))
@parameterized.expand([
(800, 0.7, True),
(800, 0.7, False),
])
def test_bandpass_biquad(self, central_freq, Q, const_skirt_gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.bandpass_biquad, (x, sr, central_freq, Q, const_skirt_gain))
@parameterized.expand([
(800, 0.7, 10),
(800, 0.7, -10),
])
def test_equalizer_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.equalizer_biquad, (x, sr, central_freq, gain, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_bandreject_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q))
class AutogradFloat32(TestBaseMixin):
def assert_grad(
self,
transform: Callable[..., Tensor],
inputs: Tuple[torch.Tensor],
enable_all_grad: bool = True,
):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=self.dtype, device=self.device)
if enable_all_grad:
i.requires_grad = True
inputs_.append(i)
# gradcheck with float32 requires higher atol and epsilon
assert gradcheck(transform, inputs, eps=1e-3, atol=1e-3, nondet_tol=0.)
@parameterized.expand([
(rnnt_utils.get_B1_T10_U3_D4_data, ),
(rnnt_utils.get_B2_T4_U3_D3_data, ),
(rnnt_utils.get_B1_T2_U3_D5_data, ),
])
def test_rnnt_loss(self, data_func):
def get_data(data_func, device):
data = data_func()
if type(data) == tuple:
data = data[0]
return data
data = get_data(data_func, self.device)
inputs = (
data["logits"].to(torch.float32), # logits
data["targets"], # targets
data["logit_lengths"], # logit_lengths
data["target_lengths"], # target_lengths
data["blank"], # blank
-1, # clamp
)
self.assert_grad(F.rnnt_loss, inputs, enable_all_grad=False)
|
import torch
import unittest
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .functional_impl import Functional
@skipIfNoCuda
class TestFunctionalFloat32(Functional, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
@skipIfNoCuda
class TestLFilterFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
import torchaudio.functional as F
from torchaudio_unittest.common_utils import (
skipIfNoSox,
skipIfNoExec,
TempDirMixin,
TorchaudioTestCase,
get_asset_path,
sox_utils,
load_wav,
save_wav,
get_whitenoise,
)
@skipIfNoSox
@skipIfNoExec('sox')
class TestFunctionalFiltering(TempDirMixin, TorchaudioTestCase):
def run_sox_effect(self, input_file, effect):
output_file = self.get_temp_path('expected.wav')
sox_utils.run_sox_effect(input_file, output_file, [str(e) for e in effect])
return load_wav(output_file)
def assert_sox_effect(self, result, input_path, effects, atol=1e-04, rtol=1e-5):
expected, _ = self.run_sox_effect(input_path, effects)
self.assertEqual(result, expected, atol=atol, rtol=rtol)
def get_whitenoise(self, sample_rate=8000):
noise = get_whitenoise(
sample_rate=sample_rate, duration=3, scale_factor=0.9,
)
path = self.get_temp_path("whitenoise.wav")
save_wav(path, noise, sample_rate)
return noise, path
def test_gain(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.gain(data, 3)
self.assert_sox_effect(result, path, ['gain', 3])
def test_dither(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.dither(data)
self.assert_sox_effect(result, path, ['dither'])
def test_dither_noise(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.dither(data, noise_shaping=True)
self.assert_sox_effect(result, path, ['dither', '-s'], atol=1.5e-4)
def test_lowpass(self):
cutoff_freq = 3000
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.lowpass_biquad(data, sample_rate, cutoff_freq)
self.assert_sox_effect(result, path, ['lowpass', cutoff_freq], atol=1.5e-4)
def test_highpass(self):
cutoff_freq = 2000
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.highpass_biquad(data, sample_rate, cutoff_freq)
self.assert_sox_effect(result, path, ['highpass', cutoff_freq], atol=1.5e-4)
def test_allpass(self):
central_freq = 1000
q = 0.707
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.allpass_biquad(data, sample_rate, central_freq, q)
self.assert_sox_effect(result, path, ['allpass', central_freq, f'{q}q'])
def test_bandpass_with_csg(self):
central_freq = 1000
q = 0.707
const_skirt_gain = True
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandpass_biquad(data, sample_rate, central_freq, q, const_skirt_gain)
self.assert_sox_effect(result, path, ['bandpass', '-c', central_freq, f'{q}q'])
def test_bandpass_without_csg(self):
central_freq = 1000
q = 0.707
const_skirt_gain = False
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandpass_biquad(data, sample_rate, central_freq, q, const_skirt_gain)
self.assert_sox_effect(result, path, ['bandpass', central_freq, f'{q}q'])
def test_bandreject(self):
central_freq = 1000
q = 0.707
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandreject_biquad(data, sample_rate, central_freq, q)
self.assert_sox_effect(result, path, ['bandreject', central_freq, f'{q}q'])
def test_band_with_noise(self):
central_freq = 1000
q = 0.707
noise = True
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.band_biquad(data, sample_rate, central_freq, q, noise)
self.assert_sox_effect(result, path, ['band', '-n', central_freq, f'{q}q'])
def test_band_without_noise(self):
central_freq = 1000
q = 0.707
noise = False
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.band_biquad(data, sample_rate, central_freq, q, noise)
self.assert_sox_effect(result, path, ['band', central_freq, f'{q}q'])
def test_treble(self):
central_freq = 1000
q = 0.707
gain = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.treble_biquad(data, sample_rate, gain, central_freq, q)
self.assert_sox_effect(result, path, ['treble', gain, central_freq, f'{q}q'])
def test_bass(self):
central_freq = 1000
q = 0.707
gain = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bass_biquad(data, sample_rate, gain, central_freq, q)
self.assert_sox_effect(result, path, ['bass', gain, central_freq, f'{q}q'], atol=1.5e-4)
def test_deemph(self):
sample_rate = 44100
data, path = self.get_whitenoise(sample_rate)
result = F.deemph_biquad(data, sample_rate)
self.assert_sox_effect(result, path, ['deemph'])
def test_riaa(self):
sample_rate = 44100
data, path = self.get_whitenoise(sample_rate)
result = F.riaa_biquad(data, sample_rate)
self.assert_sox_effect(result, path, ['riaa'])
def test_contrast(self):
enhancement_amount = 80.
data, path = self.get_whitenoise()
result = F.contrast(data, enhancement_amount)
self.assert_sox_effect(result, path, ['contrast', enhancement_amount])
def test_dcshift_with_limiter(self):
shift = 0.5
limiter_gain = 0.05
data, path = self.get_whitenoise()
result = F.dcshift(data, shift, limiter_gain)
self.assert_sox_effect(result, path, ['dcshift', shift, limiter_gain])
def test_dcshift_without_limiter(self):
shift = 0.6
data, path = self.get_whitenoise()
result = F.dcshift(data, shift)
self.assert_sox_effect(result, path, ['dcshift', shift])
def test_overdrive(self):
gain = 30
colour = 40
data, path = self.get_whitenoise()
result = F.overdrive(data, gain, colour)
self.assert_sox_effect(result, path, ['overdrive', gain, colour])
def test_phaser_sine(self):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.phaser(data, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=True)
self.assert_sox_effect(result, path, ['phaser', gain_in, gain_out, delay_ms, decay, speed, '-s'])
def test_phaser_triangle(self):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.phaser(data, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=False)
self.assert_sox_effect(result, path, ['phaser', gain_in, gain_out, delay_ms, decay, speed, '-t'])
def test_flanger_triangle_linear(self):
delay = 0.6
depth = 0.87
regen = 3.0
width = 0.9
speed = 0.5
phase = 30
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='triangular', interpolation='linear')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'triangle', phase, 'linear'])
def test_flanger_triangle_quad(self):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.4
speed = 0.5
phase = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='triangular', interpolation='quadratic')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'triangle', phase, 'quadratic'])
def test_flanger_sine_linear(self):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.23
speed = 1.3
phase = 60
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='sinusoidal', interpolation='linear')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'sine', phase, 'linear'])
def test_flanger_sine_quad(self):
delay = 0.9
depth = 0.9
regen = 4.0
width = 0.23
speed = 1.3
phase = 25
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='sinusoidal', interpolation='quadratic')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'sine', phase, 'quadratic'])
def test_equalizer(self):
center_freq = 300
q = 0.707
gain = 1
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.equalizer_biquad(data, sample_rate, center_freq, gain, q)
self.assert_sox_effect(result, path, ['equalizer', center_freq, q, gain])
def test_perf_biquad_filtering(self):
b0 = 0.4
b1 = 0.2
b2 = 0.9
a0 = 0.7
a1 = 0.2
a2 = 0.6
data, path = self.get_whitenoise()
result = F.lfilter(data, torch.tensor([a0, a1, a2]), torch.tensor([b0, b1, b2]))
self.assert_sox_effect(result, path, ['biquad', b0, b1, b2, a0, a1, a2])
|
import torch
import torchaudio.functional as F
import unittest
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, TorchaudioTestCase, skipIfNoSox
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform,
sample_rate,
format,
True,
compression
)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import torch
from .autograd_impl import Autograd, AutogradFloat32
from torchaudio_unittest import common_utils
@common_utils.skipIfNoCuda
class TestAutogradLfilterCUDA(Autograd, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestAutogradRNNTCUDA(AutogradFloat32, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
|
"""Test suites for jit-ability and its numerical compatibility"""
import unittest
import torch
import torchaudio.functional as F
from torchaudio_unittest import common_utils
from torchaudio_unittest.common_utils import (
TempDirMixin,
TestBaseMixin,
skipIfRocm,
torch_script,
)
class Functional(TempDirMixin, TestBaseMixin):
"""Implements test for `functional` module that are performed for different devices"""
def _assert_consistency(self, func, tensor, shape_only=False):
tensor = tensor.to(device=self.device, dtype=self.dtype)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(tensor)
torch.random.manual_seed(40)
ts_output = ts_func(tensor)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def _assert_consistency_complex(self, func, tensor):
assert tensor.is_complex()
tensor = tensor.to(device=self.device, dtype=self.complex_dtype)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(tensor)
torch.random.manual_seed(40)
ts_output = ts_func(tensor)
self.assertEqual(ts_output, output)
def test_spectrogram(self):
def func(tensor):
n_fft = 400
ws = 400
hop = 200
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
power = None
normalize = False
return F.spectrogram(tensor, pad, window, n_fft, hop, ws, power, normalize)
tensor = common_utils.get_whitenoise()
self._assert_consistency(func, tensor)
def test_inverse_spectrogram(self):
def func(tensor):
length = 400
n_fft = 400
hop = 200
ws = 400
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=torch.float64)
normalize = False
return F.inverse_spectrogram(tensor, length, pad, window, n_fft, hop, ws, normalize)
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=0.05)
tensor = common_utils.get_spectrogram(waveform, n_fft=400, hop_length=200)
self._assert_consistency_complex(func, tensor)
@skipIfRocm
def test_griffinlim(self):
def func(tensor):
n_fft = 400
ws = 400
hop = 200
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
power = 2.
momentum = 0.99
n_iter = 32
length = 1000
rand_int = False
return F.griffinlim(tensor, window, n_fft, hop, ws, power, n_iter, momentum, length, rand_int)
tensor = torch.rand((1, 201, 6))
self._assert_consistency(func, tensor)
def test_compute_deltas(self):
def func(tensor):
win_length = 2 * 7 + 1
return F.compute_deltas(tensor, win_length=win_length)
channel = 13
n_mfcc = channel * 3
time = 1021
tensor = torch.randn(channel, n_mfcc, time)
self._assert_consistency(func, tensor)
def test_detect_pitch_frequency(self):
waveform = common_utils.get_sinusoid(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.detect_pitch_frequency(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_melscale_fbanks(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_stft = 100
f_min = 0.0
f_max = 20.0
n_mels = 10
sample_rate = 16000
norm = "slaney"
return F.melscale_fbanks(n_stft, f_min, f_max, n_mels, sample_rate, norm)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_linear_fbanks(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_stft = 100
f_min = 0.0
f_max = 20.0
n_filter = 10
sample_rate = 16000
return F.linear_fbanks(n_stft, f_min, f_max, n_filter, sample_rate)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_amplitude_to_DB(self):
def func(tensor):
multiplier = 10.0
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
return F.amplitude_to_DB(tensor, multiplier, amin, db_multiplier, top_db)
tensor = torch.rand((6, 201))
self._assert_consistency(func, tensor)
def test_DB_to_amplitude(self):
def func(tensor):
ref = 1.
power = 1.
return F.DB_to_amplitude(tensor, ref, power)
tensor = torch.rand((1, 100))
self._assert_consistency(func, tensor)
def test_create_dct(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_mfcc = 40
n_mels = 128
norm = "ortho"
return F.create_dct(n_mfcc, n_mels, norm)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_mu_law_encoding(self):
def func(tensor):
qc = 256
return F.mu_law_encoding(tensor, qc)
waveform = common_utils.get_whitenoise()
self._assert_consistency(func, waveform)
def test_mu_law_decoding(self):
def func(tensor):
qc = 256
return F.mu_law_decoding(tensor, qc)
tensor = torch.rand((1, 10))
self._assert_consistency(func, tensor)
def test_mask_along_axis(self):
def func(tensor):
mask_param = 100
mask_value = 30.
axis = 2
return F.mask_along_axis(tensor, mask_param, mask_value, axis)
tensor = torch.randn(2, 1025, 400)
self._assert_consistency(func, tensor)
def test_mask_along_axis_iid(self):
def func(tensor):
mask_param = 100
mask_value = 30.
axis = 2
return F.mask_along_axis_iid(tensor, mask_param, mask_value, axis)
tensor = torch.randn(4, 2, 1025, 400)
self._assert_consistency(func, tensor)
def test_gain(self):
def func(tensor):
gainDB = 2.0
return F.gain(tensor, gainDB)
tensor = torch.rand((1, 1000))
self._assert_consistency(func, tensor)
def test_dither_TPDF(self):
def func(tensor):
return F.dither(tensor, 'TPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_RPDF(self):
def func(tensor):
return F.dither(tensor, 'RPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_GPDF(self):
def func(tensor):
return F.dither(tensor, 'GPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_noise_shaping(self):
def func(tensor):
return F.dither(tensor, noise_shaping=True)
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor)
def test_lfilter(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise()
def func(tensor):
# Design an IIR lowpass filter using scipy.signal filter design
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirdesign.html#scipy.signal.iirdesign
#
# Example
# >>> from scipy.signal import iirdesign
# >>> b, a = iirdesign(0.2, 0.3, 1, 60)
b_coeffs = torch.tensor(
[
0.00299893,
-0.0051152,
0.00841964,
-0.00747802,
0.00841964,
-0.0051152,
0.00299893,
],
device=tensor.device,
dtype=tensor.dtype,
)
a_coeffs = torch.tensor(
[
1.0,
-4.8155751,
10.2217618,
-12.14481273,
8.49018171,
-3.3066882,
0.56088705,
],
device=tensor.device,
dtype=tensor.dtype,
)
return F.lfilter(tensor, a_coeffs, b_coeffs)
self._assert_consistency(func, waveform)
def test_filtfilt(self):
def func(tensor):
torch.manual_seed(296)
b_coeffs = torch.rand(4, device=tensor.device, dtype=tensor.dtype)
a_coeffs = torch.rand(4, device=tensor.device, dtype=tensor.dtype)
return F.filtfilt(tensor, a_coeffs, b_coeffs)
waveform = common_utils.get_whitenoise(sample_rate=8000)
self._assert_consistency(func, waveform)
def test_lowpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
cutoff_freq = 3000.
return F.lowpass_biquad(tensor, sample_rate, cutoff_freq)
self._assert_consistency(func, waveform)
def test_highpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
cutoff_freq = 2000.
return F.highpass_biquad(tensor, sample_rate, cutoff_freq)
self._assert_consistency(func, waveform)
def test_allpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
return F.allpass_biquad(tensor, sample_rate, central_freq, q)
self._assert_consistency(func, waveform)
def test_bandpass_with_csg(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
const_skirt_gain = True
return F.bandpass_biquad(tensor, sample_rate, central_freq, q, const_skirt_gain)
self._assert_consistency(func, waveform)
def test_bandpass_without_csg(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
const_skirt_gain = True
return F.bandpass_biquad(tensor, sample_rate, central_freq, q, const_skirt_gain)
self._assert_consistency(func, waveform)
def test_bandreject(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
return F.bandreject_biquad(tensor, sample_rate, central_freq, q)
self._assert_consistency(func, waveform)
def test_band_with_noise(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
noise = True
return F.band_biquad(tensor, sample_rate, central_freq, q, noise)
self._assert_consistency(func, waveform)
def test_band_without_noise(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
noise = False
return F.band_biquad(tensor, sample_rate, central_freq, q, noise)
self._assert_consistency(func, waveform)
def test_treble(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
gain = 40.
central_freq = 1000.
q = 0.707
return F.treble_biquad(tensor, sample_rate, gain, central_freq, q)
self._assert_consistency(func, waveform)
def test_bass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
gain = 40.
central_freq = 1000.
q = 0.707
return F.bass_biquad(tensor, sample_rate, gain, central_freq, q)
self._assert_consistency(func, waveform)
def test_deemph(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.deemph_biquad(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_riaa(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.riaa_biquad(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_equalizer(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
center_freq = 300.
gain = 1.
q = 0.707
return F.equalizer_biquad(tensor, sample_rate, center_freq, gain, q)
self._assert_consistency(func, waveform)
def test_perf_biquad_filtering(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise()
def func(tensor):
a = torch.tensor([0.7, 0.2, 0.6], device=tensor.device, dtype=tensor.dtype)
b = torch.tensor([0.4, 0.2, 0.9], device=tensor.device, dtype=tensor.dtype)
return F.lfilter(tensor, a, b)
self._assert_consistency(func, waveform)
def test_sliding_window_cmn(self):
def func(tensor):
cmn_window = 600
min_cmn_window = 100
center = False
norm_vars = False
a = torch.tensor(
[
[
-1.915875792503357,
1.147700309753418
],
[
1.8242558240890503,
1.3869990110397339
]
],
device=tensor.device,
dtype=tensor.dtype
)
return F.sliding_window_cmn(a, cmn_window, min_cmn_window, center, norm_vars)
b = torch.tensor(
[
[
-1.8701,
-0.1196
],
[
1.8701,
0.1196
]
]
)
self._assert_consistency(func, b)
def test_contrast(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
enhancement_amount = 80.
return F.contrast(tensor, enhancement_amount)
self._assert_consistency(func, waveform)
def test_dcshift(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
shift = 0.5
limiter_gain = 0.05
return F.dcshift(tensor, shift, limiter_gain)
self._assert_consistency(func, waveform)
def test_overdrive(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
gain = 30.
colour = 50.
return F.overdrive(tensor, gain, colour)
self._assert_consistency(func, waveform)
def test_phaser(self):
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 44100
return F.phaser(tensor, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=True)
self._assert_consistency(func, waveform)
def test_flanger(self):
torch.random.manual_seed(40)
waveform = torch.rand(2, 100) - 0.5
def func(tensor):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.23
speed = 1.3
phase = 60.
sample_rate = 44100
return F.flanger(tensor, sample_rate, delay, depth, regen, width, speed,
phase, modulation='sinusoidal', interpolation='linear')
self._assert_consistency(func, waveform)
def test_spectral_centroid(self):
def func(tensor):
sample_rate = 44100
n_fft = 400
ws = 400
hop = 200
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
return F.spectral_centroid(tensor, sample_rate, pad, window, n_fft, hop, ws)
tensor = common_utils.get_whitenoise(sample_rate=44100)
self._assert_consistency(func, tensor)
@common_utils.skipIfNoKaldi
def test_compute_kaldi_pitch(self):
if self.dtype != torch.float32 or self.device != torch.device('cpu'):
raise unittest.SkipTest("Only float32, cpu is supported.")
def func(tensor):
sample_rate: float = 44100.
return F.compute_kaldi_pitch(tensor, sample_rate)
tensor = common_utils.get_whitenoise(sample_rate=44100)
self._assert_consistency(func, tensor)
def test_resample_sinc(self):
def func(tensor):
sr1, sr2 = 16000, 8000
return F.resample(tensor, sr1, sr2, resampling_method="sinc_interpolation")
tensor = common_utils.get_whitenoise(sample_rate=16000)
self._assert_consistency(func, tensor)
def test_resample_kaiser(self):
def func(tensor):
sr1, sr2 = 16000, 8000
return F.resample(tensor, sr1, sr2, resampling_method="kaiser_window")
def func_beta(tensor):
sr1, sr2 = 16000, 8000
beta = 6.
return F.resample(tensor, sr1, sr2, resampling_method="kaiser_window", beta=beta)
tensor = common_utils.get_whitenoise(sample_rate=16000)
self._assert_consistency(func, tensor)
self._assert_consistency(func_beta, tensor)
def test_phase_vocoder(self):
def func(tensor):
n_freq = tensor.size(-2)
rate = 0.5
hop_length = 256
phase_advance = torch.linspace(
0,
3.14 * hop_length,
n_freq,
dtype=torch.real(tensor).dtype,
device=tensor.device,
)[..., None]
return F.phase_vocoder(tensor, rate, phase_advance)
tensor = torch.view_as_complex(torch.randn(2, 1025, 400, 2))
self._assert_consistency_complex(func, tensor)
class FunctionalFloat32Only(TestBaseMixin):
def test_rnnt_loss(self):
def func(tensor):
targets = torch.tensor([[1, 2]], device=tensor.device, dtype=torch.int32)
logit_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
target_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
return F.rnnt_loss(tensor, targets, logit_lengths, target_lengths)
logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1]]]])
tensor = logits.to(device=self.device, dtype=torch.float32)
self._assert_consistency(func, tensor)
|
from parameterized import parameterized
import torch
import torchaudio.functional as F
from torchaudio_unittest.common_utils import (
get_sinusoid,
load_params,
save_wav,
skipIfNoExec,
TempDirMixin,
TestBaseMixin,
)
from torchaudio_unittest.common_utils.kaldi_utils import (
convert_args,
run_kaldi,
)
class Kaldi(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@skipIfNoExec('apply-cmvn-sliding')
def test_sliding_window_cmn(self):
"""sliding_window_cmn should be numerically compatible with apply-cmvn-sliding"""
kwargs = {
'cmn_window': 600,
'min_cmn_window': 100,
'center': False,
'norm_vars': False,
}
tensor = torch.randn(40, 10, dtype=self.dtype, device=self.device)
result = F.sliding_window_cmn(tensor, **kwargs)
command = ['apply-cmvn-sliding'] + convert_args(**kwargs) + ['ark:-', 'ark:-']
kaldi_result = run_kaldi(command, 'ark', tensor)
self.assert_equal(result, expected=kaldi_result)
class KaldiCPUOnly(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@parameterized.expand(load_params('kaldi_test_pitch_args.jsonl'))
@skipIfNoExec('compute-kaldi-pitch-feats')
def test_pitch_feats(self, kwargs):
"""compute_kaldi_pitch produces numerically compatible result with compute-kaldi-pitch-feats"""
sample_rate = kwargs['sample_rate']
waveform = get_sinusoid(dtype='float32', sample_rate=sample_rate)
result = F.compute_kaldi_pitch(waveform[0], **kwargs)
waveform = get_sinusoid(dtype='int16', sample_rate=sample_rate)
wave_file = self.get_temp_path('test.wav')
save_wav(wave_file, waveform, sample_rate)
command = ['compute-kaldi-pitch-feats'] + convert_args(**kwargs) + ['scp:-', 'ark:-']
kaldi_result = run_kaldi(command, 'scp', wave_file)
self.assert_equal(result, expected=kaldi_result)
|
import torch
from .autograd_impl import Autograd, AutogradFloat32
from torchaudio_unittest import common_utils
class TestAutogradLfilterCPU(Autograd, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
class TestAutogradRNNTCPU(AutogradFloat32, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
|
"""Test numerical consistency among single input and batched input."""
import itertools
import math
from parameterized import parameterized, parameterized_class
import torch
import torchaudio.functional as F
from torchaudio_unittest import common_utils
def _name_from_args(func, _, params):
"""Return a parameterized test name, based on parameter values."""
return "{}_{}".format(
func.__name__,
"_".join(str(arg) for arg in params.args))
@parameterized_class([
# Single-item batch isolates problems that come purely from adding a
# dimension (rather than processing multiple items)
{"batch_size": 1},
{"batch_size": 3},
])
class TestFunctional(common_utils.TorchaudioTestCase):
"""Test functions defined in `functional` module"""
backend = 'default'
def assert_batch_consistency(
self, functional, batch, *args, atol=1e-8, rtol=1e-5, seed=42,
**kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([
functional(items_input[i], *args, **kwargs) for i in range(n)
])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = functional(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
def test_griffinlim(self):
n_fft = 400
ws = 400
hop = 200
window = torch.hann_window(ws)
power = 2
momentum = 0.99
n_iter = 32
length = 1000
torch.random.manual_seed(0)
batch = torch.rand(self.batch_size, 1, 201, 6)
self.assert_batch_consistency(
F.griffinlim, batch, window, n_fft, hop, ws, power,
n_iter, momentum, length, 0, atol=5e-5)
@parameterized.expand(list(itertools.product(
[8000, 16000, 44100],
[1, 2],
)), name_func=_name_from_args)
def test_detect_pitch_frequency(self, sample_rate, n_channels):
# Use different frequencies to ensure each item in the batch returns a
# different answer.
torch.manual_seed(0)
frequencies = torch.randint(100, 1000, [self.batch_size])
waveforms = torch.stack([
common_utils.get_sinusoid(
frequency=frequency, sample_rate=sample_rate,
n_channels=n_channels, duration=5)
for frequency in frequencies
])
self.assert_batch_consistency(
F.detect_pitch_frequency, waveforms, sample_rate)
def test_amplitude_to_DB(self):
torch.manual_seed(0)
spec = torch.rand(self.batch_size, 2, 100, 100) * 200
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
# Test with & without a `top_db` clamp
self.assert_batch_consistency(
F.amplitude_to_DB, spec, amplitude_mult,
amin, db_mult, top_db=None)
self.assert_batch_consistency(
F.amplitude_to_DB, spec, amplitude_mult,
amin, db_mult, top_db=40.)
def test_amplitude_to_DB_itemwise_clamps(self):
"""Ensure that the clamps are separate for each spectrogram in a batch.
The clamp was determined per-batch in a prior implementation, which
meant it was determined by the loudest item, thus items weren't
independent. See:
https://github.com/pytorch/audio/issues/994
"""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 20.
# Make a batch of noise
torch.manual_seed(0)
spec = torch.rand([2, 2, 100, 100]) * 200
# Make one item blow out the other
spec[0] += 50
batchwise_dbs = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
itemwise_dbs = torch.stack([
F.amplitude_to_DB(item, amplitude_mult, amin,
db_mult, top_db=top_db)
for item in spec
])
self.assertEqual(batchwise_dbs, itemwise_dbs)
def test_amplitude_to_DB_not_channelwise_clamps(self):
"""Check that clamps are applied per-item, not per channel."""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 40.
torch.manual_seed(0)
spec = torch.rand([1, 2, 100, 100]) * 200
# Make one channel blow out the other
spec[:, 0] += 50
specwise_dbs = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
channelwise_dbs = torch.stack([
F.amplitude_to_DB(spec[:, i], amplitude_mult, amin,
db_mult, top_db=top_db)
for i in range(spec.size(-3))
])
# Just check channelwise gives a different answer.
difference = (specwise_dbs - channelwise_dbs).abs()
assert (difference >= 1e-5).any()
def test_contrast(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.contrast, waveforms, enhancement_amount=80.)
def test_dcshift(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.dcshift, waveforms, shift=0.5, limiter_gain=0.05)
def test_overdrive(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.overdrive, waveforms, gain=45, colour=30)
def test_phaser(self):
sample_rate = 44100
n_channels = 2
waveform = common_utils.get_whitenoise(
sample_rate=sample_rate, n_channels=self.batch_size * n_channels,
duration=1)
batch = waveform.view(self.batch_size, n_channels, waveform.size(-1))
self.assert_batch_consistency(F.phaser, batch, sample_rate)
def test_flanger(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
sample_rate = 44100
self.assert_batch_consistency(F.flanger, waveforms, sample_rate)
@parameterized.expand(list(itertools.product(
[True, False], # center
[True, False], # norm_vars
)), name_func=_name_from_args)
def test_sliding_window_cmn(self, center, norm_vars):
torch.manual_seed(0)
spectrogram = torch.rand(self.batch_size, 2, 1024, 1024) * 200
self.assert_batch_consistency(
F.sliding_window_cmn, spectrogram, center=center,
norm_vars=norm_vars)
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform(self, resampling_method):
num_channels = 3
sr = 16000
new_sr = sr // 2
multi_sound = common_utils.get_whitenoise(sample_rate=sr, n_channels=num_channels, duration=0.5,)
self.assert_batch_consistency(
F.resample, multi_sound, orig_freq=sr, new_freq=new_sr,
resampling_method=resampling_method, rtol=1e-4, atol=1e-7)
@common_utils.skipIfNoKaldi
def test_compute_kaldi_pitch(self):
sample_rate = 44100
n_channels = 2
waveform = common_utils.get_whitenoise(
sample_rate=sample_rate, n_channels=self.batch_size * n_channels)
batch = waveform.view(self.batch_size, n_channels, waveform.size(-1))
self.assert_batch_consistency(
F.compute_kaldi_pitch, batch, sample_rate=sample_rate)
def test_lfilter(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
batchwise_output = F.lfilter(x, a, b, batching=True)
itemwise_output = torch.stack([
F.lfilter(x[i], a[i], b[i])
for i in range(self.batch_size)
])
self.assertEqual(batchwise_output, itemwise_output)
def test_filtfilt(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
batchwise_output = F.filtfilt(x, a, b)
itemwise_output = torch.stack([
F.filtfilt(x[i], a[i], b[i])
for i in range(self.batch_size)
])
self.assertEqual(batchwise_output, itemwise_output)
|
import unittest
from distutils.version import StrictVersion
import torch
from parameterized import param
import torchaudio.functional as F
from torchaudio._internal.module_utils import is_module_available
LIBROSA_AVAILABLE = is_module_available('librosa')
if LIBROSA_AVAILABLE:
import numpy as np
import librosa
from torchaudio_unittest.common_utils import (
TestBaseMixin,
nested_params,
get_whitenoise,
get_spectrogram,
)
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class Functional(TestBaseMixin):
"""Test suite for functions in `functional` module."""
dtype = torch.float64
@nested_params([0, 0.99])
def test_griffinlim(self, momentum):
# FFT params
n_fft = 400
win_length = n_fft
hop_length = n_fft // 4
window = torch.hann_window(win_length, device=self.device)
power = 1
# GriffinLim params
n_iter = 8
waveform = get_whitenoise(device=self.device, dtype=self.dtype)
specgram = get_spectrogram(
waveform, n_fft=n_fft, hop_length=hop_length, power=power,
win_length=win_length, window=window)
result = F.griffinlim(
specgram,
window=window,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
power=power,
n_iter=n_iter,
momentum=momentum,
length=waveform.size(1),
rand_init=False)
expected = librosa.griffinlim(
specgram[0].cpu().numpy(),
n_iter=n_iter,
hop_length=hop_length,
momentum=momentum,
init=None,
length=waveform.size(1))[None, ...]
self.assertEqual(result, torch.from_numpy(expected), atol=5e-5, rtol=1e-07)
@nested_params(
[
param(),
param(n_mels=128, sample_rate=44100),
param(n_mels=128, fmin=2000.0, fmax=5000.0),
param(n_mels=56, fmin=100.0, fmax=9000.0),
param(n_mels=56, fmin=800.0, fmax=900.0),
param(n_mels=56, fmin=1900.0, fmax=900.0),
param(n_mels=10, fmin=1900.0, fmax=900.0),
],
[param(norm=n) for n in [None, 'slaney']],
[param(mel_scale=s) for s in ['htk', 'slaney']],
)
def test_create_mel_fb(self, n_mels=40, sample_rate=22050, n_fft=2048,
fmin=0.0, fmax=8000.0, norm=None, mel_scale="htk"):
if (norm == "slaney" and StrictVersion(librosa.__version__) < StrictVersion("0.7.2")):
self.skipTest('Test is known to fail with older versions of librosa.')
if self.device != 'cpu':
self.skipTest('No need to run this test on CUDA')
expected = librosa.filters.mel(
sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
fmax=fmax,
fmin=fmin,
htk=mel_scale == "htk",
norm=norm).T
result = F.melscale_fbanks(
sample_rate=sample_rate,
n_mels=n_mels,
f_max=fmax,
f_min=fmin,
n_freqs=(n_fft // 2 + 1),
norm=norm,
mel_scale=mel_scale)
self.assertEqual(result, torch.from_numpy(expected), atol=7e-5, rtol=1.3e-6)
def test_amplitude_to_DB_power(self):
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
multiplier = 10.0
spec = get_spectrogram(get_whitenoise(device=self.device, dtype=self.dtype), power=2)
result = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
expected = librosa.core.power_to_db(spec[0].cpu().numpy())[None, ...]
self.assertEqual(result, torch.from_numpy(expected))
def test_amplitude_to_DB(self):
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
multiplier = 20.0
spec = get_spectrogram(get_whitenoise(device=self.device, dtype=self.dtype), power=1)
result = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
expected = librosa.core.amplitude_to_db(spec[0].cpu().numpy())[None, ...]
self.assertEqual(result, torch.from_numpy(expected))
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class FunctionalComplex(TestBaseMixin):
@nested_params([0.5, 1.01, 1.3])
def test_phase_vocoder(self, rate):
hop_length = 256
num_freq = 1025
num_frames = 400
torch.random.manual_seed(42)
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
spec = torch.randn(num_freq, num_frames, device=self.device, dtype=torch.complex128)
phase_advance = torch.linspace(
0,
np.pi * hop_length,
num_freq,
device=self.device,
dtype=torch.float64)[..., None]
stretched = F.phase_vocoder(spec, rate=rate, phase_advance=phase_advance)
expected_stretched = librosa.phase_vocoder(
spec.cpu().numpy(),
rate=rate,
hop_length=hop_length)
self.assertEqual(stretched, torch.from_numpy(expected_stretched))
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from .torchscript_consistency_impl import Functional, FunctionalFloat32Only
@skipIfNoCuda
class TestFunctionalFloat32(Functional, FunctionalFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@skipIfNoCuda
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_impl import Functional, FunctionalFloat32Only
class TestFunctionalFloat32(Functional, FunctionalFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .kaldi_compatibility_test_impl import Kaldi
@skipIfNoCuda
class TestKaldiFloat32(Kaldi, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@skipIfNoCuda
class TestKaldiFloat64(Kaldi, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .kaldi_compatibility_test_impl import Kaldi, KaldiCPUOnly
class TestKaldiCPUOnly(KaldiCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat32(Kaldi, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat64(Kaldi, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .librosa_compatibility_test_impl import Functional, FunctionalComplex
@skipIfNoCuda
class TestFunctionalCUDA(Functional, PytorchTestCase):
device = 'cuda'
@skipIfNoCuda
class TestFunctionalComplexCUDA(FunctionalComplex, PytorchTestCase):
device = 'cuda'
|
from torchaudio_unittest.common_utils import PytorchTestCase
from .librosa_compatibility_test_impl import Functional, FunctionalComplex
class TestFunctionalCPU(Functional, PytorchTestCase):
device = 'cpu'
class TestFunctionalComplexCPU(FunctionalComplex, PytorchTestCase):
device = 'cpu'
|
"""Test definition common to CPU and CUDA"""
import math
import itertools
import warnings
import numpy as np
import torch
import torchaudio.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_sinusoid,
nested_params,
get_whitenoise,
rnnt_utils,
)
class Functional(TestBaseMixin):
def _test_resample_waveform_accuracy(self, up_scale_factor=None, down_scale_factor=None,
resampling_method="sinc_interpolation", atol=1e-1, rtol=1e-4):
# resample the signal and compare it to the ground truth
n_to_trim = 20
sample_rate = 1000
new_sample_rate = sample_rate
if up_scale_factor is not None:
new_sample_rate = int(new_sample_rate * up_scale_factor)
if down_scale_factor is not None:
new_sample_rate = int(new_sample_rate / down_scale_factor)
duration = 5 # seconds
original_timestamps = torch.arange(0, duration, 1.0 / sample_rate)
sound = 123 * torch.cos(2 * math.pi * 3 * original_timestamps).unsqueeze(0)
estimate = F.resample(sound, sample_rate, new_sample_rate,
resampling_method=resampling_method).squeeze()
new_timestamps = torch.arange(0, duration, 1.0 / new_sample_rate)[:estimate.size(0)]
ground_truth = 123 * torch.cos(2 * math.pi * 3 * new_timestamps)
# trim the first/last n samples as these points have boundary effects
ground_truth = ground_truth[..., n_to_trim:-n_to_trim]
estimate = estimate[..., n_to_trim:-n_to_trim]
self.assertEqual(estimate, ground_truth, atol=atol, rtol=rtol)
def _test_costs_and_gradients(
self, data, ref_costs, ref_gradients, atol=1e-6, rtol=1e-2
):
logits_shape = data["logits"].shape
costs, gradients = rnnt_utils.compute_with_pytorch_transducer(data=data)
self.assertEqual(costs, ref_costs, atol=atol, rtol=rtol)
self.assertEqual(logits_shape, gradients.shape)
self.assertEqual(gradients, ref_gradients, atol=atol, rtol=rtol)
def test_lfilter_simple(self):
"""
Create a very basic signal,
Then make a simple 4th order delay
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)
def test_lfilter_clamp(self):
input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)
assert output_signal.max() <= 1
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)
assert output_signal.max() > 1
@parameterized.expand([
((44100,), (4,), (44100,)),
((3, 44100), (4,), (3, 44100,)),
((2, 3, 44100), (4,), (2, 3, 44100,)),
((1, 2, 3, 44100), (4,), (1, 2, 3, 44100,)),
((44100,), (2, 4), (2, 44100)),
((3, 44100), (1, 4), (3, 1, 44100)),
((1, 2, 44100), (3, 4), (1, 2, 3, 44100))
])
def test_lfilter_shape(self, input_shape, coeff_shape, target_shape):
torch.random.manual_seed(42)
waveform = torch.rand(*input_shape, dtype=self.dtype, device=self.device)
b_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
a_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs, batching=False)
assert input_shape == waveform.size()
assert target_shape == output_waveform.size()
def test_lfilter_9th_order_filter_stability(self):
"""
Validate the precision of lfilter against reference scipy implementation when using high order filter.
The reference implementation use cascaded second-order filters so is more numerically accurate.
"""
# create an impulse signal
x = torch.zeros(1024, dtype=self.dtype, device=self.device)
x[0] = 1
# get target impulse response
sos = signal.butter(9, 850, 'hp', fs=22050, output='sos')
y = torch.from_numpy(signal.sosfilt(sos, x.cpu().numpy())).to(self.dtype).to(self.device)
# get lfilter coefficients
b, a = signal.butter(9, 850, 'hp', fs=22050, output='ba')
b, a = torch.from_numpy(b).to(self.dtype).to(self.device), torch.from_numpy(
a).to(self.dtype).to(self.device)
# predict impulse response
yhat = F.lfilter(x, a, b, False)
self.assertEqual(yhat, y, atol=1e-4, rtol=1e-5)
def test_filtfilt_simple(self):
"""
Check that, for an arbitrary signal, applying filtfilt with filter coefficients
corresponding to a pure delay filter imparts no time delay.
"""
waveform = get_whitenoise(sample_rate=8000, n_channels=2, dtype=self.dtype).to(
device=self.device
)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
padded_waveform = torch.cat(
(waveform, torch.zeros(2, 3, dtype=self.dtype, device=self.device)), axis=1
)
output_waveform = F.filtfilt(padded_waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform, padded_waveform, atol=1e-5, rtol=1e-5)
def test_filtfilt_filter_sinusoid(self):
"""
Check that, for a signal comprising two sinusoids, applying filtfilt
with appropriate filter coefficients correctly removes the higher-frequency
sinusoid while imparting no time delay.
"""
T = 1.0
samples = 1000
waveform_k0 = get_sinusoid(
frequency=5, sample_rate=samples // T, dtype=self.dtype, device=self.device
).squeeze(0)
waveform_k1 = get_sinusoid(
frequency=200,
sample_rate=samples // T,
dtype=self.dtype,
device=self.device,
).squeeze(0)
waveform = waveform_k0 + waveform_k1
# Transfer function numerator and denominator polynomial coefficients
# corresponding to 8th-order Butterworth filter with 100-cycle/T cutoff.
# Generated with
# >>> from scipy import signal
# >>> b_coeffs, a_coeffs = signal.butter(8, 0.2)
b_coeffs = torch.tensor(
[
2.39596441e-05,
1.91677153e-04,
6.70870035e-04,
1.34174007e-03,
1.67717509e-03,
1.34174007e-03,
6.70870035e-04,
1.91677153e-04,
2.39596441e-05,
],
dtype=self.dtype,
device=self.device,
)
a_coeffs = torch.tensor(
[
1.0,
-4.78451489,
10.44504107,
-13.45771989,
11.12933104,
-6.0252604,
2.0792738,
-0.41721716,
0.0372001,
],
dtype=self.dtype,
device=self.device,
)
# Extend waveform in each direction, preserving periodicity.
padded_waveform = torch.cat((waveform[:-1], waveform, waveform[1:]))
output_waveform = F.filtfilt(padded_waveform, a_coeffs, b_coeffs)
# Remove padding from output waveform; confirm that result
# closely matches waveform_k0.
self.assertEqual(
output_waveform[samples - 1: 2 * samples - 1],
waveform_k0,
atol=1e-3,
rtol=1e-3,
)
@parameterized.expand([(0., ), (1., ), (2., ), (3., )])
def test_spectrogram_grad_at_zero(self, power):
"""The gradient of power spectrogram should not be nan but zero near x=0
https://github.com/pytorch/audio/issues/993
"""
x = torch.zeros(1, 22050, requires_grad=True)
spec = F.spectrogram(
x,
pad=0,
window=None,
n_fft=2048,
hop_length=None,
win_length=None,
power=power,
normalized=False,
)
spec.sum().backward()
assert not x.grad.isnan().sum()
def test_compute_deltas_one_channel(self):
specgram = torch.tensor([[[1.0, 2.0, 3.0, 4.0]]], dtype=self.dtype, device=self.device)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5]]], dtype=self.dtype, device=self.device)
computed = F.compute_deltas(specgram, win_length=3)
self.assertEqual(computed, expected)
def test_compute_deltas_two_channels(self):
specgram = torch.tensor([[[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]]], dtype=self.dtype, device=self.device)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5],
[0.5, 1.0, 1.0, 0.5]]], dtype=self.dtype, device=self.device)
computed = F.compute_deltas(specgram, win_length=3)
self.assertEqual(computed, expected)
@parameterized.expand([(100,), (440,)])
def test_detect_pitch_frequency_pitch(self, frequency):
sample_rate = 44100
test_sine_waveform = get_sinusoid(
frequency=frequency, sample_rate=sample_rate, duration=5
)
freq = F.detect_pitch_frequency(test_sine_waveform, sample_rate)
threshold = 1
s = ((freq - frequency).abs() > threshold).sum()
self.assertFalse(s)
@parameterized.expand([([100, 100],), ([2, 100, 100],), ([2, 2, 100, 100],)])
def test_amplitude_to_DB_reversible(self, shape):
"""Round trip between amplitude and db should return the original for various shape
This implicitly also tests `DB_to_amplitude`.
"""
amplitude_mult = 20.
power_mult = 10.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
torch.manual_seed(0)
spec = torch.rand(*shape, dtype=self.dtype, device=self.device) * 200
# Spectrogram amplitude -> DB -> amplitude
db = F.amplitude_to_DB(spec, amplitude_mult, amin, db_mult, top_db=None)
x2 = F.DB_to_amplitude(db, ref, 0.5)
self.assertEqual(x2, spec, atol=5e-5, rtol=1e-5)
# Spectrogram power -> DB -> power
db = F.amplitude_to_DB(spec, power_mult, amin, db_mult, top_db=None)
x2 = F.DB_to_amplitude(db, ref, 1.)
self.assertEqual(x2, spec)
@parameterized.expand([([100, 100],), ([2, 100, 100],), ([2, 2, 100, 100],)])
def test_amplitude_to_DB_top_db_clamp(self, shape):
"""Ensure values are properly clamped when `top_db` is supplied."""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 40.
torch.manual_seed(0)
# A random tensor is used for increased entropy, but the max and min for
# each spectrogram still need to be predictable. The max determines the
# decibel cutoff, and the distance from the min must be large enough
# that it triggers a clamp.
spec = torch.rand(*shape, dtype=self.dtype, device=self.device)
# Ensure each spectrogram has a min of 0 and a max of 1.
spec -= spec.amin([-2, -1])[..., None, None]
spec /= spec.amax([-2, -1])[..., None, None]
# Expand the range to (0, 200) - wide enough to properly test clamping.
spec *= 200
decibels = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
# Ensure the clamp was applied
below_limit = decibels < 6.0205
assert not below_limit.any(), (
"{} decibel values were below the expected cutoff:\n{}".format(
below_limit.sum().item(), decibels
)
)
# Ensure it didn't over-clamp
close_to_limit = decibels < 6.0207
assert close_to_limit.any(), (
f"No values were close to the limit. Did it over-clamp?\n{decibels}"
)
@parameterized.expand(
list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0., 30.], [1, 2]))
)
def test_mask_along_axis(self, shape, mask_param, mask_value, axis):
torch.random.manual_seed(42)
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
mask_specgram = F.mask_along_axis(specgram, mask_param, mask_value, axis)
other_axis = 1 if axis == 2 else 2
masked_columns = (mask_specgram == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgram.size(other_axis)).sum()
num_masked_columns = torch.div(
num_masked_columns, mask_specgram.size(0), rounding_mode='floor')
assert mask_specgram.size() == specgram.size()
assert num_masked_columns < mask_param
@parameterized.expand(list(itertools.product([100], [0., 30.], [2, 3])))
def test_mask_along_axis_iid(self, mask_param, mask_value, axis):
torch.random.manual_seed(42)
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
mask_specgrams = F.mask_along_axis_iid(specgrams, mask_param, mask_value, axis)
other_axis = 2 if axis == 3 else 3
masked_columns = (mask_specgrams == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgrams.size(other_axis)).sum(-1)
assert mask_specgrams.size() == specgrams.size()
assert (num_masked_columns < mask_param).sum() == num_masked_columns.numel()
@parameterized.expand(
list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0., 30.], [1, 2]))
)
def test_mask_along_axis_preserve(self, shape, mask_param, mask_value, axis):
"""mask_along_axis should not alter original input Tensor
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
specgram_copy = specgram.clone()
F.mask_along_axis(specgram, mask_param, mask_value, axis)
self.assertEqual(specgram, specgram_copy)
@parameterized.expand(list(itertools.product([100], [0., 30.], [2, 3])))
def test_mask_along_axis_iid_preserve(self, mask_param, mask_value, axis):
"""mask_along_axis_iid should not alter original input Tensor
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
specgrams_copy = specgrams.clone()
F.mask_along_axis_iid(specgrams, mask_param, mask_value, axis)
self.assertEqual(specgrams, specgrams_copy)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
[16000, 44100],
)))
def test_resample_identity(self, resampling_method, sample_rate):
waveform = get_whitenoise(sample_rate=sample_rate, duration=1)
resampled = F.resample(waveform, sample_rate, sample_rate)
self.assertEqual(waveform, resampled)
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_upsample_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
upsampled = F.resample(waveform, sr, sr * 2, resampling_method=resampling_method)
assert upsampled.size(-1) == waveform.size(-1) * 2
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_downsample_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
downsampled = F.resample(waveform, sr, sr // 2, resampling_method=resampling_method)
assert downsampled.size(-1) == waveform.size(-1) // 2
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_identity_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
resampled = F.resample(waveform, sr, sr, resampling_method=resampling_method)
assert resampled.size(-1) == waveform.size(-1)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
list(range(1, 20)),
)))
def test_resample_waveform_downsample_accuracy(self, resampling_method, i):
self._test_resample_waveform_accuracy(down_scale_factor=i * 2, resampling_method=resampling_method)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
list(range(1, 20)),
)))
def test_resample_waveform_upsample_accuracy(self, resampling_method, i):
self._test_resample_waveform_accuracy(up_scale_factor=1.0 + i / 20.0, resampling_method=resampling_method)
@nested_params([0.5, 1.01, 1.3])
def test_phase_vocoder_shape(self, rate):
"""Verify the output shape of phase vocoder"""
hop_length = 256
num_freq = 1025
num_frames = 400
batch_size = 2
torch.random.manual_seed(42)
spec = torch.randn(
batch_size, num_freq, num_frames, dtype=self.complex_dtype, device=self.device)
phase_advance = torch.linspace(
0,
np.pi * hop_length,
num_freq,
dtype=self.dtype, device=self.device)[..., None]
spec_stretch = F.phase_vocoder(spec, rate=rate, phase_advance=phase_advance)
assert spec.dim() == spec_stretch.dim()
expected_shape = torch.Size([batch_size, num_freq, int(np.ceil(num_frames / rate))])
output_shape = spec_stretch.shape
assert output_shape == expected_shape
@parameterized.expand(
[
# words
["", "", 0], # equal
["abc", "abc", 0],
["ᑌᑎIᑕO", "ᑌᑎIᑕO", 0],
["abc", "", 3], # deletion
["aa", "aaa", 1],
["aaa", "aa", 1],
["ᑌᑎI", "ᑌᑎIᑕO", 2],
["aaa", "aba", 1], # substitution
["aba", "aaa", 1],
["aba", " ", 3],
["abc", "bcd", 2], # mix deletion and substitution
["0ᑌᑎI", "ᑌᑎIᑕO", 3],
# sentences
[["hello", "", "Tᕮ᙭T"], ["hello", "", "Tᕮ᙭T"], 0], # equal
[[], [], 0],
[["hello", "world"], ["hello", "world", "!"], 1], # deletion
[["hello", "world"], ["world"], 1],
[["hello", "world"], [], 2],
[["Tᕮ᙭T", ], ["world"], 1], # substitution
[["Tᕮ᙭T", "XD"], ["world", "hello"], 2],
[["", "XD"], ["world", ""], 2],
["aba", " ", 3],
[["hello", "world"], ["world", "hello", "!"], 2], # mix deletion and substitution
[["Tᕮ᙭T", "world", "LOL", "XD"], ["world", "hello", "ʕ•́ᴥ•̀ʔっ"], 3],
]
)
def test_simple_case_edit_distance(self, seq1, seq2, distance):
assert F.edit_distance(seq1, seq2) == distance
assert F.edit_distance(seq2, seq1) == distance
@nested_params(
[-4, -2, 0, 2, 4],
)
def test_pitch_shift_shape(self, n_steps):
sample_rate = 16000
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
waveform_shift = F.pitch_shift(waveform, sample_rate, n_steps)
assert waveform.size() == waveform_shift.size()
def test_rnnt_loss_basic_backward(self):
logits, targets, logit_lengths, target_lengths = rnnt_utils.get_basic_data(self.device)
loss = F.rnnt_loss(logits, targets, logit_lengths, target_lengths)
loss.backward()
def test_rnnt_loss_basic_forward_no_grad(self):
"""In early stage, calls to `rnnt_loss` resulted in segmentation fault when
`logits` have `requires_grad = False`. This test makes sure that this no longer
occurs and the functional call runs without error.
See https://github.com/pytorch/audio/pull/1707
"""
logits, targets, logit_lengths, target_lengths = rnnt_utils.get_basic_data(self.device)
logits.requires_grad_(False)
F.rnnt_loss(logits, targets, logit_lengths, target_lengths)
@parameterized.expand([
(rnnt_utils.get_B1_T2_U3_D5_data, torch.float32, 1e-6, 1e-2),
(rnnt_utils.get_B2_T4_U3_D3_data, torch.float32, 1e-6, 1e-2),
(rnnt_utils.get_B1_T2_U3_D5_data, torch.float16, 1e-3, 1e-2),
(rnnt_utils.get_B2_T4_U3_D3_data, torch.float16, 1e-3, 1e-2),
])
def test_rnnt_loss_costs_and_gradients(self, data_func, dtype, atol, rtol):
data, ref_costs, ref_gradients = data_func(
dtype=dtype,
device=self.device,
)
self._test_costs_and_gradients(
data=data,
ref_costs=ref_costs,
ref_gradients=ref_gradients,
atol=atol,
rtol=rtol,
)
def test_rnnt_loss_costs_and_gradients_random_data_with_numpy_fp32(self):
seed = 777
for i in range(5):
data = rnnt_utils.get_random_data(dtype=torch.float32, device=self.device, seed=(seed + i))
ref_costs, ref_gradients = rnnt_utils.compute_with_numpy_transducer(data=data)
self._test_costs_and_gradients(
data=data, ref_costs=ref_costs, ref_gradients=ref_gradients
)
class FunctionalCPUOnly(TestBaseMixin):
def test_melscale_fbanks_no_warning_high_n_freq(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(288, 0, 8000, 128, 16000)
assert len(w) == 0
def test_melscale_fbanks_no_warning_low_n_mels(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(201, 0, 8000, 89, 16000)
assert len(w) == 0
def test_melscale_fbanks_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(201, 0, 8000, 128, 16000)
assert len(w) == 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import re
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinxcontrib.bibtex',
'sphinx_gallery.gen_gallery',
]
# katex options
#
#
katex_options = r'''
delimiters : [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "\\[", right: "\\]", display: true}
]
'''
bibtex_bibfiles = ['refs.bib']
def _get_var(var, default=False):
if var not in os.environ:
return default
val = os.environ.get(var, '0')
trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
if val in trues:
return True
if val not in falses:
print(
f' --- WARNING: Unexpected environment variable value `{var}={val}`. '
f'Expected one of {trues + falses}')
return False
def _get_pattern():
pattern = os.getenv('GALLERY_PATTERN')
# If BUILD_GALLERY is falsy -> no build
# If BUILD_GALLERY is truey -> build
# If BUILD_GALLERY is undefined
# If GALLERY_PATTERN is defined -> build
# If GALLERY_PATTERN is not defined -> not build
if not _get_var('BUILD_GALLERY', default=False if pattern is None else True):
if pattern is not None:
print(
' --- WARNING: "GALLERY_PATTERN" is provided, but "BUILD_GALLERY" value is falsy. '
'Sphinx galleries are not built. To build galleries, set `BUILD_GALLERY=1`.'
)
return {
'ignore_pattern': r'\.py',
}
ret = {'filename_pattern': 'tutorial.py'}
if os.getenv('GALLERY_PATTERN'):
# See https://github.com/pytorch/tutorials/blob/cbf2238df0e78d84c15bd94288966d2f4b2e83ae/conf.py#L75-L83
ret['ignore_pattern'] = r'/(?!' + re.escape(os.getenv('GALLERY_PATTERN')) + r')[^/]+$'
return ret
sphinx_gallery_conf = {
'examples_dirs': [
'../../examples/tutorials',
],
'gallery_dirs': [
'tutorials',
],
**_get_pattern(),
'backreferences_dir': 'gen_modules/backreferences',
'first_notebook_cell': None,
'doc_module': ('torchaudio',),
}
autosummary_generate = True
napoleon_use_ivar = True
napoleon_numpy_docstring = False
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Torchaudio'
copyright = '2018, Torchaudio Contributors'
author = 'Torchaudio Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'main '
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'main'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['*/index.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'pytorch_project': 'audio',
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
'navigation_with_keys': True,
'analytics_id': 'UA-117752657-2',
}
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'
]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TorchAudiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'Torchaudio Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
author, 'Torchaudio', 'Load audio files into pytorch tensors.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'torch': ('https://pytorch.org/docs/stable/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (list, str, tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
|
#!/usr/bin/env python3
"""
Create a data preprocess pipeline that can be run with libtorchaudio
"""
import os
import argparse
import torch
import torchaudio
class Pipeline(torch.nn.Module):
"""Example audio process pipeline.
This example load waveform from a file then apply effects and save it to a file.
"""
def __init__(self, rir_path: str):
super().__init__()
rir, sample_rate = torchaudio.load(rir_path)
self.register_buffer('rir', rir)
self.rir_sample_rate: int = sample_rate
def forward(self, input_path: str, output_path: str):
torchaudio.sox_effects.init_sox_effects()
# 1. load audio
waveform, sample_rate = torchaudio.load(input_path)
# 2. Add background noise
alpha = 0.01
waveform = alpha * torch.randn_like(waveform) + (1 - alpha) * waveform
# 3. Reample the RIR filter to much the audio sample rate
rir, _ = torchaudio.sox_effects.apply_effects_tensor(
self.rir, self.rir_sample_rate, effects=[["rate", str(sample_rate)]])
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
# 4. Apply RIR filter
waveform = torch.nn.functional.pad(waveform, (rir.shape[1] - 1, 0))
waveform = torch.nn.functional.conv1d(waveform[None, ...], rir[None, ...])[0]
# Save
torchaudio.save(output_path, waveform, sample_rate)
def _create_jit_pipeline(rir_path, output_path):
module = torch.jit.script(Pipeline(rir_path))
print("*" * 40)
print("* Pipeline code")
print("*" * 40)
print()
print(module.code)
print("*" * 40)
module.save(output_path)
def _get_path(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
def _parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--rir-path",
default=_get_path("..", "data", "rir.wav"),
help="Audio dara for room impulse response."
)
parser.add_argument(
"--output-path",
default=_get_path("pipeline.zip"),
help="Output JIT file."
)
return parser.parse_args()
def _main():
args = _parse_args()
_create_jit_pipeline(args.rir_path, args.output_path)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python
"""Parse a directory contains VoxForge dataset.
Recursively search for "PROMPTS" file in the given directory and print out
`<ID>\\t<AUDIO_PATH>\\t<TRANSCRIPTION>`
example: python parse_voxforge.py voxforge/de/Helge-20150608-aku
de5-001\t/datasets/voxforge/de/guenter-20140214-afn/wav/de5-001.wav\tES SOLL ETWA FÜNFZIGTAUSEND VERSCHIEDENE SORTEN GEBEN
...
Dataset can be obtained from http://www.repository.voxforge1.org/downloads/de/Trunk/Audio/Main/16kHz_16bit/
""" # noqa: E501
import os
import argparse
from pathlib import Path
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'input_dir',
type=Path,
help='Directory where `*.trans.txt` files are searched.'
)
return parser.parse_args()
def _parse_prompts(path):
base_dir = path.parent.parent
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if not line:
continue
id_, transcript = line.split(' ', maxsplit=1)
if not transcript:
continue
transcript = transcript.upper()
filename = id_.split('/')[-1]
audio_path = base_dir / 'wav' / f'{filename}.wav'
if os.path.exists(audio_path):
yield id_, audio_path, transcript
def _parse_directory(root_dir: Path):
for prompt_file in root_dir.glob('**/PROMPTS'):
try:
yield from _parse_prompts(prompt_file)
except UnicodeDecodeError:
pass
def _main():
args = _parse_args()
for id_, path, transcription in _parse_directory(args.input_dir):
print(f'{id_}\t{path}\t{transcription}')
if __name__ == '__main__':
_main()
|
import torch
class Decoder(torch.nn.Module):
def __init__(self, labels):
super().__init__()
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = ''
for i in best_path:
char = self.labels[i]
if char in ['<s>', '<pad>']:
continue
if char == '|':
char = ' '
hypothesis += char
return hypothesis
|
#!/usr/bin/evn python3
"""Build Speech Recognition pipeline based on fairseq's wav2vec2.0 and dump it to TorchScript file.
To use this script, you need `fairseq`.
"""
import os
import argparse
import logging
from typing import Tuple
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
import torchaudio
from torchaudio.models.wav2vec2.utils.import_fairseq import import_fairseq_model
import fairseq
from greedy_decoder import Decoder
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
_LG = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--model-file',
required=True,
help='Path to the input pretrained weight file.'
)
parser.add_argument(
'--dict-dir',
help=(
'Path to the directory in which `dict.ltr.txt` file is found. '
'Required only when the model is finetuned.'
)
)
parser.add_argument(
'--output-path',
help='Path to the directory, where the TorchScript-ed pipelines are saved.',
)
parser.add_argument(
'--test-file',
help='Path to a test audio file.',
)
parser.add_argument(
'--debug',
action='store_true',
help=(
'When enabled, individual components are separately tested '
'for the numerical compatibility and TorchScript compatibility.'
)
)
parser.add_argument(
'--quantize',
action='store_true',
help='Apply quantization to model.'
)
parser.add_argument(
'--optimize-for-mobile',
action='store_true',
help='Apply optmization for mobile.'
)
return parser.parse_args()
class Loader(torch.nn.Module):
def forward(self, audio_path: str) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
return waveform
class Encoder(torch.nn.Module):
def __init__(self, encoder: torch.nn.Module):
super().__init__()
self.encoder = encoder
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
result, _ = self.encoder(waveform)
return result[0]
def _get_decoder():
labels = [
"<s>",
"<pad>",
"</s>",
"<unk>",
"|",
"E",
"T",
"A",
"O",
"N",
"I",
"H",
"S",
"R",
"D",
"L",
"U",
"M",
"W",
"C",
"F",
"G",
"Y",
"P",
"B",
"V",
"K",
"'",
"X",
"J",
"Q",
"Z",
]
return Decoder(labels)
def _load_fairseq_model(input_file, data_dir=None):
overrides = {}
if data_dir:
overrides['data'] = data_dir
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[input_file], arg_overrides=overrides
)
model = model[0]
return model
def _get_model(model_file, dict_dir):
original = _load_fairseq_model(model_file, dict_dir)
model = import_fairseq_model(original.w2v_encoder)
return model
def _main():
args = _parse_args()
_init_logging(args.debug)
loader = Loader()
model = _get_model(args.model_file, args.dict_dir).eval()
encoder = Encoder(model)
decoder = _get_decoder()
_LG.info(encoder)
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
# test
if args.test_file:
_LG.info('Testing with %s', args.test_file)
waveform = loader(args.test_file)
emission = encoder(waveform)
transcript = decoder(emission)
_LG.info(transcript)
torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
scripted = torch.jit.script(encoder)
if args.optimize_for_mobile:
scripted = optimize_for_mobile(scripted)
scripted.save(os.path.join(args.output_path, 'encoder.zip'))
def _init_logging(debug=False):
level = logging.DEBUG if debug else logging.INFO
format_ = (
'%(message)s' if not debug else
'%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
)
logging.basicConfig(level=level, format=format_)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Parse a directory contains Librispeech dataset.
Recursively search for "*.trans.txt" file in the given directory and print out
`<ID>\\t<AUDIO_PATH>\\t<TRANSCRIPTION>`
example: python parse_librispeech.py LibriSpeech/test-clean
1089-134691-0000\t/LibriSpeech/test-clean/1089/134691/1089-134691-0000.flac\tHE COULD WAIT NO LONGER
...
Dataset can be obtained from https://www.openslr.org/12
"""
import argparse
from pathlib import Path
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'input_dir',
type=Path,
help='Directory where `*.trans.txt` files are searched.'
)
return parser.parse_args()
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if line:
yield line.split(' ', maxsplit=1)
def _parse_directory(root_dir: Path):
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id_, transcription in _parse_transcript(trans_file):
audio_path = trans_dir / f'{id_}.flac'
yield id_, audio_path, transcription
def _main():
args = _parse_args()
for id_, path, transcription in _parse_directory(args.input_dir):
print(f'{id_}\t{path}\t{transcription}')
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
import argparse
import logging
import os
from typing import Tuple
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from greedy_decoder import Decoder
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
_LG = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--model',
required=True,
help='Path to the input pretrained weight file.'
)
parser.add_argument(
'--output-path',
help='Path to the directory, where the Torchscript-ed pipelines are saved.',
)
parser.add_argument(
'--test-file',
help='Path to a test audio file.',
)
parser.add_argument(
'--quantize',
action='store_true',
help='Quantize the model.',
)
parser.add_argument(
'--debug',
action='store_true',
help=(
'When enabled, individual components are separately tested '
'for the numerical compatibility and TorchScript compatibility.'
)
)
return parser.parse_args()
class Loader(torch.nn.Module):
def forward(self, audio_path: str) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
return waveform
class Encoder(torch.nn.Module):
def __init__(self, encoder: torch.nn.Module):
super().__init__()
self.encoder = encoder
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
result, _ = self.encoder(waveform)
return result[0]
def _get_model(model_id):
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer
labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]
original = Wav2Vec2ForCTC.from_pretrained(model_id)
model = import_huggingface_model(original)
return model.eval(), labels
def _get_decoder(labels):
return Decoder(labels)
def _main():
args = _parse_args()
_init_logging(args.debug)
_LG.info('Loading model: %s', args.model)
model, labels = _get_model(args.model)
_LG.info('Labels: %s', labels)
_LG.info('Building pipeline')
loader = Loader()
encoder = Encoder(model)
decoder = _get_decoder(labels)
_LG.info(encoder)
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
# test
if args.test_file:
_LG.info('Testing with %s', args.test_file)
waveform = loader(args.test_file)
emission = encoder(waveform)
transcript = decoder(emission)
_LG.info(transcript)
torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))
torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
def _init_logging(debug=False):
level = logging.DEBUG if debug else logging.INFO
format_ = (
'%(message)s' if not debug else
'%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
)
logging.basicConfig(level=level, format=format_)
if __name__ == '__main__':
_main()
|
import argparse
import logging
import os
import unittest
from interactive_asr.utils import setup_asr, transcribe_file
class ASRTest(unittest.TestCase):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
arguments_dict = {
"path": "/scratch/jamarshon/downloads/model.pt",
"input_file": "/scratch/jamarshon/audio/examples/interactive_asr/data/sample.wav",
"data": "/scratch/jamarshon/downloads",
"user_dir": "/scratch/jamarshon/fairseq-py/examples/speech_recognition",
"no_progress_bar": False,
"log_interval": 1000,
"log_format": None,
"tensorboard_logdir": "",
"tbmf_wrapper": False,
"seed": 1,
"cpu": True,
"fp16": False,
"memory_efficient_fp16": False,
"fp16_init_scale": 128,
"fp16_scale_window": None,
"fp16_scale_tolerance": 0.0,
"min_loss_scale": 0.0001,
"threshold_loss_scale": None,
"criterion": "cross_entropy",
"tokenizer": None,
"bpe": None,
"optimizer": "nag",
"lr_scheduler": "fixed",
"task": "speech_recognition",
"num_workers": 0,
"skip_invalid_size_inputs_valid_test": False,
"max_tokens": 10000000,
"max_sentences": None,
"required_batch_size_multiple": 8,
"dataset_impl": None,
"gen_subset": "test",
"num_shards": 1,
"shard_id": 0,
"remove_bpe": None,
"quiet": False,
"model_overrides": "{}",
"results_path": None,
"beam": 40,
"nbest": 1,
"max_len_a": 0,
"max_len_b": 200,
"min_len": 1,
"match_source_len": False,
"no_early_stop": False,
"unnormalized": False,
"no_beamable_mm": False,
"lenpen": 1,
"unkpen": 0,
"replace_unk": None,
"sacrebleu": False,
"score_reference": False,
"prefix_size": 0,
"no_repeat_ngram_size": 0,
"sampling": False,
"sampling_topk": -1,
"sampling_topp": -1.0,
"temperature": 1.0,
"diverse_beam_groups": -1,
"diverse_beam_strength": 0.5,
"print_alignment": False,
"ctc": False,
"rnnt": False,
"kspmodel": None,
"wfstlm": None,
"rnnt_decoding_type": "greedy",
"lm_weight": 0.2,
"rnnt_len_penalty": -0.5,
"momentum": 0.99,
"weight_decay": 0.0,
"force_anneal": None,
"lr_shrink": 0.1,
"warmup_updates": 0,
}
arguments_dict["path"] = os.environ.get("ASR_MODEL_PATH", None)
arguments_dict["input_file"] = os.environ.get("ASR_INPUT_FILE", None)
arguments_dict["data"] = os.environ.get("ASR_DATA_PATH", None)
arguments_dict["user_dir"] = os.environ.get("ASR_USER_DIR", None)
args = argparse.Namespace(**arguments_dict)
def test_transcribe_file(self):
task, generator, models, sp, tgt_dict = setup_asr(self.args, self.logger)
_, transcription = transcribe_file(
self.args, task, generator, models, sp, tgt_dict
)
expected_transcription = [["THE QUICK BROWN FOX JUMPS OVER THE LAZY DOG"]]
self.assertEqual(transcription, expected_transcription, msg=str(transcription))
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/env python3
"""This is the preprocessing script for HuBERT model training.
The script includes:
- File list creation
- MFCC/HuBERT feature extraction
- KMeans clustering model training
- Pseudo-label generation
"""
import logging
from argparse import ArgumentParser, RawTextHelpFormatter
from multiprocessing import Pool
from pathlib import Path
import torch
from utils import (
create_tsv,
dump_features,
learn_kmeans,
get_km_label,
)
def _init_logger(debug=False):
message_fmt = (
"%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
)
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {message_fmt}",
)
def _parse_args():
parser = ArgumentParser(
description=__doc__,
formatter_class=RawTextHelpFormatter,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
parser.add_argument("--dataset", default="librispeech", type=str, choices=["librispeech", "librilight"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``LibriSpeech`` or ``LibriLight`` is stored.",
)
parser.add_argument("--num-rank", default=5, type=int)
parser.add_argument("--feat-type", default="mfcc", type=str)
parser.add_argument("--use-gpu", default=False, type=bool)
parser.add_argument(
"--exp-dir",
type=Path,
help="The directory to store the experiment outputs.",
)
parser.add_argument(
"--num-cluster",
default=100,
type=int,
help="The number of clusters for KMeans clustering.",
)
args = parser.parse_args()
return args
def main(args):
_init_logger(args.debug)
if not args.exp_dir.exists():
args.exp_dir.mkdir()
tsv_dir = args.exp_dir / "tsv"
feat_dir = args.exp_dir / args.feat_type
km_dir = args.exp_dir / "km_model"
label_dir = args.exp_dir / "label"
if args.use_gpu:
device = torch.device("cuda")
else:
device = torch.device("cpu")
# Create file lists for training and validation (optional)
create_tsv(args.root_dir, tsv_dir)
# Extract features for KMeans clustering
if not feat_dir.exists():
feat_dir.mkdir()
for split in ["train", "valid"]:
p = Pool(args.num_rank)
inputs = [(
tsv_dir / f"{args.dataset}_{split}.tsv",
feat_dir,
split,
rank,
args.num_rank,
device,
args.feat_type,
16_000,)
for rank in range(args.num_rank)
]
_ = p.starmap(dump_features, inputs)
p.close()
p.join()
# Fit KMeans clustering model
learn_kmeans(
feat_dir,
"train",
args.num_rank,
km_dir,
args.num_cluster,
)
# Predict labels for MFCC features
for split in ["train", "valid"]:
get_km_label(
feat_dir,
km_dir,
label_dir,
split,
args.num_rank,
device,
)
if __name__ == "__main__":
main(_parse_args())
|
from .common_utils import create_tsv
from .feature_utils import dump_features
from .kmeans import learn_kmeans, get_km_label
__all__ = [
"create_tsv",
"dump_features",
"learn_kmeans",
"get_km_label",
]
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
)
import joblib
import torch
from sklearn.cluster import MiniBatchKMeans
from torch import Tensor
from .common_utils import _get_feat_lens_paths, _get_model_path
_LG = logging.getLogger(__name__)
def load_feature(
feat_dir: Path,
split: str,
num_rank: int,
) -> Tuple[Tensor, Tensor]:
r"""Loading features from pre-saved `.pt` files.
Args:
feat_dir (Path): The directory that stores the feature files.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Tensor, Tensor)
Tensor: The concatenated feature tensor of shape `(frame, feature_dim)`.
Tensor: The lengths tensor of shape `(num_utterance,)`.
"""
feats = []
lens = []
for rank in range(num_rank):
feat_path, len_path = _get_feat_lens_paths(feat_dir, split, rank, num_rank)
feat = torch.load(feat_path)
length = torch.load(len_path)
feats.append(feat)
lens.append(length)
feats = torch.cat(feats)
lens = torch.cat(lens)
return feats, lens
def learn_kmeans(
feat_dir: Path,
split: str,
num_rank: int,
km_dir: Path,
n_clusters: int,
init: str = "k-means++",
max_iter: int = 100,
batch_size: int = 10000,
tol: float = 0.0,
n_init: int = 20,
reassignment_ratio: float = 0.0,
max_no_improvement: int = 100,
) -> None:
r"""Build and train the KMeans clustering model. The model is saved in "{km_dir}/model.pt"
Args:
feat_dir (Path): The directory that stores the feature files.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
km_dir (Path): The directory to store the KMeans clustering model.
n_clusters (int): The number of clusters.
init (str, optional): Method for initialization. Options: [``k-means++``, ``random``].
(Default: ``k-means++``)
max_iter (int, optional): Maximum number of iterations over the complete dataset. (Default: 100)
batch_size (int, optional): Batch size for training the KMeans clustering model. (Default: 10000)
tol (float, optional): Control early stopping based on the relative center changes as measured by a smoothed,
variance-normalized of the mean center squared position changes. (Default: 0.0)
n_init (int, optional): Number of random initializations that are tried. (Default: 20)
reassignment_ratio (float, optional): Control the fraction of the maximum number of counts for a center
to be reassigned. A higher value means that low count centers are more easily reassigned. (Default: 0.0)
max_no_improvement (int, optional): Control early stopping based on the consecutive number of mini batches
that does not yield an improvement on the smoothed inertia. (Default: 100)
Returns:
None
"""
if not km_dir.exists():
km_dir.mkdir()
km_model = MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=0,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
feats, _ = load_feature(
feat_dir,
split,
num_rank,
)
feats = feats.numpy()
km_model.fit(feats)
km_path = _get_model_path(km_dir)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feats) / len(feats)
_LG.info("Total intertia: %.5f", inertia)
_LG.info("Finished training the KMeans clustering model successfully")
class ApplyKmeans(object):
def __init__(self, km_path, device):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np).to(device)
self.Cnorm = torch.from_numpy(self.Cnorm_np).to(device)
def __call__(self, x):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
def get_km_label(
feat_dir: Path,
km_dir: Path,
label_dir: Path,
split: str,
num_rank: int,
device: torch.device,
) -> None:
r"""Predict the labels by the KMeans clustering model.
Args:
feat_dir (Path): The directory that stores the dumped features.
km_dir (Path): The directory that stores the KMeans model.
label_dir (Path): The directory to save the predicted labels.
split (str): The split of data. Options: [``train``, ``valid``].
num_rank (int): The number of ranks for multi-processing in feature extraction.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
Returns:
None
"""
if not label_dir.exists():
label_dir.mkdir()
km_path = _get_model_path(km_dir)
label_path = label_dir / f"label_{split}.pt"
apply_kmeans = ApplyKmeans(km_path, device)
feats, lens = load_feature(
feat_dir,
split,
num_rank,
)
feats = feats
lens = lens.long()
offset = 0
assert feats.shape[0] == lens.sum()
with open(label_path, "w") as f:
for i in range(lens.shape[0]):
feat = feats[offset:offset + lens[i]].to(device)
offset += lens[i]
label = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, label)) + "\n")
_LG.info("Finished predicting labels successfully")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
import logging
from pathlib import Path
from typing import (
Tuple,
Union,
)
import torch
import torchaudio
from torch import Tensor
from .common_utils import _get_feat_lens_paths
_LG = logging.getLogger(__name__)
def get_shard_range(
num_lines: int,
num_rank: int,
rank: int
) -> Tuple[int, int]:
r"""Get the range of indices for the current rank in multi-processing.
Args:
num_lines (int): The number of lines to process.
num_rank (int): The number of ranks for multi-processing in feature extraction.
rank (int): The rank in the multi-processing.
Returns:
(int, int):
int: The start index for the current rank.
int: The end index for the current rank.
"""
assert 0 <= rank < num_rank, f"invalid rank/num_rank {rank}/{num_rank}"
assert num_lines > 0, f"Found {num_lines} files, make sure you specify the correct root directory"
start = round(num_lines / num_rank * rank)
end = round(num_lines / num_rank * (rank + 1))
_LG.info(
f"rank {rank} of {num_rank}, process {end-start} "
f"({start}-{end}) out of {num_lines}"
)
return start, end
def extract_feature(
path: str,
device: torch.device,
feature_type: str,
sample_rate: int,
) -> Tensor:
r"""Extract features for KMeans clustering and pseudo label prediction.
Args:
path (str): The file path of the audio.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
feature_type (str): The type of the desired feature. Options: [``mfcc``, ``hubert``].
sample_rate (int): The sample rate of the audio.
Returns:
Tensor: The desired feature tensor of the given audio file.
"""
waveform, sr = torchaudio.load(path)
assert sr == sample_rate
waveform = waveform[0].to(device)
if feature_type == "mfcc":
feature_extractor = torchaudio.transforms.MFCC(
sample_rate=sample_rate,
n_mfcc=13,
melkwargs={'n_fft': 400, 'hop_length': 160, 'center': False}
).to(device)
mfccs = feature_extractor(waveform) # (freq, time)
# mfccs = torchaudio.compliance.kaldi.mfcc(
# waveform=waveform,
# sample_frequency=sample_rate,
# use_energy=False,
# ) # (time, freq)
# mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1) # (time, freq)
return concat
def dump_features(
tsv_file: Union[str, Path],
out_dir: Union[str, Path],
split: str,
rank: int,
num_rank: int,
device: torch.device,
feature_type: str = "mfcc",
sample_rate: int = 16_000,
) -> None:
r"""Dump the feature tensors given a ``.tsv`` file list. The feature and lengths tensors
will be stored under ``out_dir`` directory.
Args:
tsv_file (str or Path): The path of the tsv file.
out_dir (str or Path): The directory to store the feature tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
device (torch.device): The location to allocate for PyTorch Tensors.
Options: [``torch.device('cpu')``, torch.device('cuda')``].
feature_type (str, optional): The type of the desired feature. Options: [``mfcc``, ``hubert``].
(Default: ``mfcc``)
sample_rate (int, optional): The sample rate of the audio. (Default: 16000)
Returns:
None
"""
if feature_type not in ["mfcc", "hubert"]:
raise ValueError("Unexpected feature type.")
features = []
lens = []
out_dir = Path(out_dir)
feat_path, len_path = _get_feat_lens_paths(out_dir, split, rank, num_rank)
with open(tsv_file, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
start, end = get_shard_range(len(lines), num_rank, rank)
lines = lines[start:end]
for line in lines:
path, nsample = line.split("\t")
path = f"{root}/{path}"
nsample = int(nsample)
feature = extract_feature(path, device, feature_type, sample_rate)
features.append(feature.cpu())
lens.append(feature.shape[0])
features = torch.cat(features)
lens = torch.Tensor(lens)
torch.save(features, feat_path)
torch.save(lens, len_path)
_LG.info(f"Finished dumping features for rank {rank} of {num_rank} successfully")
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# https://github.com/pytorch/fairseq/blob/265df7144c79446f5ea8d835bda6e727f54dad9d/LICENSE
"""
Data pre-processing: create tsv files for training (and valiation).
"""
import logging
import re
from pathlib import Path
from typing import (
Tuple,
Union,
)
import torch
import torchaudio
_LG = logging.getLogger(__name__)
def create_tsv(
root_dir: Union[str, Path],
out_dir: Union[str, Path],
dataset: str = "librispeech",
valid_percent: float = 0.01,
seed: int = 0,
extension: str = "flac",
) -> None:
"""Create file lists for training and validation.
Args:
root_dir (str or Path): The directory of the dataset.
out_dir (str or Path): The directory to store the file lists.
dataset (str, optional): The dataset to use. Options:
[``librispeech``, ``libri-light``]. (Default: ``librispeech``)
valid_percent (float, optional): The percentage of data for validation. (Default: 0.01)
seed (int): The seed for randomly selecting the validation files.
extension (str, optional): The extention of audio files. (Default: ``flac``)
Returns:
None
"""
assert valid_percent >= 0 and valid_percent <= 1.0
torch.manual_seed(seed)
root_dir = Path(root_dir)
out_dir = Path(out_dir)
if not out_dir.exists():
out_dir.mkdir()
valid_f = (
open(out_dir / f"{dataset}_valid.tsv", "w")
if valid_percent > 0
else None
)
search_pattern = ".*train.*"
with open(out_dir / f"{dataset}_train.tsv", "w") as train_f:
print(root_dir, file=train_f)
if valid_f is not None:
print(root_dir, file=valid_f)
for fname in root_dir.glob(f"**/*.{extension}"):
if re.match(search_pattern, str(fname)):
frames = torchaudio.info(fname).num_frames
dest = train_f if torch.rand(1) > valid_percent else valid_f
print(
f"{fname.relative_to(root_dir)}\t{frames}", file=dest
)
if valid_f is not None:
valid_f.close()
_LG.info("Finished creating the file lists successfully")
def _get_feat_lens_paths(
feat_dir: Path,
split: str,
rank: int,
num_rank: int
) -> Tuple[Path, Path]:
r"""Get the feature and lengths paths based on feature directory,
data split, rank, and number of ranks.
Args:
feat_dir (Path): The directory that stores the feature and lengths tensors.
split (str): The split of data. Options: [``train``, ``valid``].
rank (int): The rank in the multi-processing.
num_rank (int): The number of ranks for multi-processing in feature extraction.
Returns:
(Path, Path)
Path: The file path of the feature tensor for the current rank.
Path: The file path of the lengths tensor for the current rank.
"""
feat_path = feat_dir / f"{split}_{rank}_{num_rank}.pt"
len_path = feat_dir / f"len_{split}_{rank}_{num_rank}.pt"
return feat_path, len_path
def _get_model_path(
km_dir: Path
) -> Path:
r"""Get the file path of the KMeans clustering model
Args:
km_dir (Path): The directory to store the KMeans clustering model.
Returns:
Path: The file path of the model.
"""
return km_dir / "model.pt"
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Tuple, Callable, List
import torch
from torch import Tensor
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH
class SpectralNormalization(torch.nn.Module):
def forward(self, input):
return torch.log(torch.clamp(input, min=1e-5))
class InverseSpectralNormalization(torch.nn.Module):
def forward(self, input):
return torch.exp(input)
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms, text_preprocessor):
self.dataset = dataset
self.transforms = transforms
self.text_preprocessor = text_preprocessor
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
melspec = self.transforms(item[0])
text_norm = torch.IntTensor(self.text_preprocessor(item[2]))
return text_norm, torch.squeeze(melspec, 0)
def split_process_dataset(dataset: str,
file_path: str,
val_ratio: float,
transforms: Callable,
text_preprocessor: Callable[[str], List[int]],
) -> Tuple[torch.utils.data.Dataset, torch.utils.data.Dataset]:
"""Returns the Training and validation datasets.
Args:
dataset (str): The dataset to use. Avaliable options: [`'ljspeech'`]
file_path (str): Path to the data.
val_ratio (float): Path to the data.
transforms (callable): A function/transform that takes in a waveform and
returns a transformed waveform (mel spectrogram in this example).
text_preprocess (callable): A function that takes in a string and
returns a list of integers representing each of the symbol in the string.
Returns:
train_dataset (`torch.utils.data.Dataset`): The training set.
val_dataset (`torch.utils.data.Dataset`): The validation set.
"""
if dataset == 'ljspeech':
data = LJSPEECH(root=file_path, download=False)
val_length = int(len(data) * val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
else:
raise ValueError(f"Expected datasets: `ljspeech`, but found {dataset}")
train_dataset = Processed(train_dataset, transforms, text_preprocessor)
val_dataset = Processed(val_dataset, transforms, text_preprocessor)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def text_mel_collate_fn(batch: Tuple[Tensor, Tensor],
n_frames_per_step: int = 1) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""The collate function padding and adjusting the data based on `n_frames_per_step`.
Modified from https://github.com/NVIDIA/DeepLearningExamples
Args:
batch (tuple of two tensors): the first tensor is the mel spectrogram with shape
(n_batch, n_mels, n_frames), the second tensor is the text with shape (n_batch, ).
n_frames_per_step (int, optional): The number of frames to advance every step.
Returns:
text_padded (Tensor): The input text to Tacotron2 with shape (n_batch, max of ``text_lengths``).
text_lengths (Tensor): The length of each text with shape (n_batch).
mel_specgram_padded (Tensor): The target mel spectrogram
with shape (n_batch, n_mels, max of ``mel_specgram_lengths``)
mel_specgram_lengths (Tensor): The length of each mel spectrogram with shape (n_batch).
gate_padded (Tensor): The ground truth gate output
with shape (n_batch, max of ``mel_specgram_lengths``)
"""
text_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]), dim=0, descending=True)
max_input_len = text_lengths[0]
text_padded = torch.zeros((len(batch), max_input_len), dtype=torch.int64)
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % n_frames_per_step != 0:
max_target_len += n_frames_per_step - max_target_len % n_frames_per_step
assert max_target_len % n_frames_per_step == 0
# include mel padded and gate padded
mel_specgram_padded = torch.zeros((len(batch), num_mels, max_target_len), dtype=torch.float32)
gate_padded = torch.zeros((len(batch), max_target_len), dtype=torch.float32)
mel_specgram_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_specgram_padded[i, :, :mel.size(1)] = mel
mel_specgram_lengths[i] = mel.size(1)
gate_padded[i, mel.size(1) - 1:] = 1
return text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths, gate_padded
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Tuple
from torch import nn, Tensor
class Tacotron2Loss(nn.Module):
"""Tacotron2 loss function modified from:
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/tacotron2/loss_function.py
"""
def __init__(self):
super().__init__()
self.mse_loss = nn.MSELoss(reduction="mean")
self.bce_loss = nn.BCEWithLogitsLoss(reduction="mean")
def forward(
self,
model_outputs: Tuple[Tensor, Tensor, Tensor],
targets: Tuple[Tensor, Tensor],
) -> Tuple[Tensor, Tensor, Tensor]:
r"""Pass the input through the Tacotron2 loss.
The original implementation was introduced in
*Natural TTS Synthesis by Conditioning WaveNet on Mel Spectrogram Predictions*
[:footcite:`shen2018natural`].
Args:
model_outputs (tuple of three Tensors): The outputs of the
Tacotron2. These outputs should include three items:
(1) the predicted mel spectrogram before the postnet (``mel_specgram``)
with shape (batch, mel, time).
(2) predicted mel spectrogram after the postnet (``mel_specgram_postnet``)
with shape (batch, mel, time), and
(3) the stop token prediction (``gate_out``) with shape (batch, ).
targets (tuple of two Tensors): The ground truth mel spectrogram (batch, mel, time) and
stop token with shape (batch, ).
Returns:
mel_loss (Tensor): The mean MSE of the mel_specgram and ground truth mel spectrogram
with shape ``torch.Size([])``.
mel_postnet_loss (Tensor): The mean MSE of the mel_specgram_postnet and
ground truth mel spectrogram with shape ``torch.Size([])``.
gate_loss (Tensor): The mean binary cross entropy loss of
the prediction on the stop token with shape ``torch.Size([])``.
"""
mel_target, gate_target = targets[0], targets[1]
gate_target = gate_target.view(-1, 1)
mel_specgram, mel_specgram_postnet, gate_out = model_outputs
gate_out = gate_out.view(-1, 1)
mel_loss = self.mse_loss(mel_specgram, mel_target)
mel_postnet_loss = self.mse_loss(mel_specgram_postnet, mel_target)
gate_loss = self.bce_loss(gate_out, gate_target)
return mel_loss, mel_postnet_loss, gate_loss
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import logging
import os
import shutil
from typing import List, Tuple, Callable
import torch
from torch import Tensor
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first, then copy it to filename,
in case signals interrupt the torch.save() process.
"""
torch.save(state, filename)
logging.info(f"Checkpoint saved to {filename}")
if is_best:
path, best_filename = os.path.split(filename)
best_filename = os.path.join(path, "best_" + best_filename)
shutil.copyfile(filename, best_filename)
logging.info(f"Current best checkpoint saved to {best_filename}")
def pad_sequences(batch: List[Tensor]) -> Tuple[Tensor, Tensor]:
r"""Right zero-pad all one-hot text sequences to max input length.
Modified from https://github.com/NVIDIA/DeepLearningExamples.
"""
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x) for x in batch]), dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, :text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts: List[str],
text_processor: Callable[[str], List[int]]) -> Tuple[Tensor, Tensor]:
d = []
for text in texts:
d.append(torch.IntTensor(text_processor(text)[:]))
text_padded, input_lengths = pad_sequences(d)
return text_padded, input_lengths
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
"""
Modified from
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/train.py
"""
import argparse
from datetime import datetime
from functools import partial
import logging
import random
import os
from time import time
import torch
import torchaudio
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
from torch.optim import Adam
from torchaudio.models import Tacotron2
from tqdm import tqdm
import matplotlib.pyplot as plt
plt.switch_backend('agg')
from datasets import text_mel_collate_fn, split_process_dataset, SpectralNormalization
from utils import save_checkpoint
from loss import Tacotron2Loss
from text.text_preprocessing import (
available_symbol_set,
available_phonemizers,
get_symbol_list,
text_to_sequence,
)
logging.basicConfig(format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logger = logging.getLogger(os.path.basename(__file__))
def parse_args(parser):
"""Parse commandline arguments."""
parser.add_argument("--dataset", default="ljspeech", choices=["ljspeech"], type=str,
help="select dataset to train with")
parser.add_argument('--logging-dir', type=str, default=None,
help='directory to save the log files')
parser.add_argument('--dataset-path', type=str, default='./',
help='path to dataset')
parser.add_argument("--val-ratio", default=0.1, type=float,
help="the ratio of waveforms for validation")
parser.add_argument('--anneal-steps', nargs='*',
help='epochs after which decrease learning rate')
parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,
help='factor for annealing learning rate')
parser.add_argument('--master-addr', default=None, type=str,
help='the address to use for distributed training')
parser.add_argument('--master-port', default=None, type=str,
help='the port to use for distributed training')
preprocessor = parser.add_argument_group('text preprocessor setup')
preprocessor.add_argument('--text-preprocessor', default='english_characters', type=str,
choices=available_symbol_set,
help='select text preprocessor to use.')
preprocessor.add_argument('--phonemizer', type=str, choices=available_phonemizers,
help='select phonemizer to use, only used when text-preprocessor is "english_phonemes"')
preprocessor.add_argument('--phonemizer-checkpoint', type=str,
help='the path or name of the checkpoint for the phonemizer, '
'only used when text-preprocessor is "english_phonemes"')
preprocessor.add_argument('--cmudict-root', default="./", type=str,
help='the root directory for storing cmudictionary files')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', type=int, required=True,
help='number of total epochs to run')
training.add_argument('--checkpoint-path', type=str, default='',
help='checkpoint path. If a file exists, '
'the program will load it and resume training.')
training.add_argument('--workers', default=8, type=int,
help="number of data loading workers")
training.add_argument("--validate-and-checkpoint-freq", default=10, type=int, metavar="N",
help="validation and saving checkpoint frequency in epochs",)
training.add_argument("--logging-freq", default=10, type=int, metavar="N",
help="logging frequency in epochs")
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument('--learning-rate', default=1e-3, type=float,
help='initial learing rate')
optimization.add_argument('--weight-decay', default=1e-6, type=float,
help='weight decay')
optimization.add_argument('--batch-size', default=32, type=int,
help='batch size per GPU')
optimization.add_argument('--grad-clip', default=5.0, type=float,
help='clipping gradient with maximum gradient norm value')
# model parameters
model = parser.add_argument_group('model parameters')
model.add_argument('--mask-padding', action='store_true', default=False,
help='use mask padding')
model.add_argument('--symbols-embedding-dim', default=512, type=int,
help='input embedding dimension')
# encoder
model.add_argument('--encoder-embedding-dim', default=512, type=int,
help='encoder embedding dimension')
model.add_argument('--encoder-n-convolution', default=3, type=int,
help='number of encoder convolutions')
model.add_argument('--encoder-kernel-size', default=5, type=int,
help='encoder kernel size')
# decoder
model.add_argument('--n-frames-per-step', default=1, type=int,
help='number of frames processed per step (currently only 1 is supported)')
model.add_argument('--decoder-rnn-dim', default=1024, type=int,
help='number of units in decoder LSTM')
model.add_argument('--decoder-dropout', default=0.1, type=float,
help='dropout probability for decoder LSTM')
model.add_argument('--decoder-max-step', default=2000, type=int,
help='maximum number of output mel spectrograms')
model.add_argument('--decoder-no-early-stopping', action='store_true', default=False,
help='stop decoding only when all samples are finished')
# attention model
model.add_argument('--attention-hidden-dim', default=128, type=int,
help='dimension of attention hidden representation')
model.add_argument('--attention-rnn-dim', default=1024, type=int,
help='number of units in attention LSTM')
model.add_argument('--attention-location-n-filter', default=32, type=int,
help='number of filters for location-sensitive attention')
model.add_argument('--attention-location-kernel-size', default=31, type=int,
help='kernel size for location-sensitive attention')
model.add_argument('--attention-dropout', default=0.1, type=float,
help='dropout probability for attention LSTM')
model.add_argument('--prenet-dim', default=256, type=int,
help='number of ReLU units in prenet layers')
# mel-post processing network parameters
model.add_argument('--postnet-n-convolution', default=5, type=float,
help='number of postnet convolutions')
model.add_argument('--postnet-kernel-size', default=5, type=float,
help='postnet kernel size')
model.add_argument('--postnet-embedding-dim', default=512, type=float,
help='postnet embedding dimension')
model.add_argument('--gate-threshold', default=0.5, type=float,
help='probability threshold for stop token')
# audio parameters
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--sample-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--n-fft', default=1024, type=int,
help='Filter length for STFT')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--n-mels', default=80, type=int,
help='')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
return parser
def adjust_learning_rate(epoch, optimizer, learning_rate,
anneal_steps, anneal_factor):
"""Adjust learning rate base on the initial setting."""
p = 0
if anneal_steps is not None:
for _, a_step in enumerate(anneal_steps):
if epoch >= int(a_step):
p = p + 1
if anneal_factor == 0.3:
lr = learning_rate * ((0.1 ** (p // 2)) * (1.0 if p % 2 == 0 else 0.3))
else:
lr = learning_rate * (anneal_factor ** p)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return x
def batch_to_gpu(batch):
text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths, gate_padded = batch
text_padded = to_gpu(text_padded).long()
text_lengths = to_gpu(text_lengths).long()
mel_specgram_padded = to_gpu(mel_specgram_padded).float()
gate_padded = to_gpu(gate_padded).float()
mel_specgram_lengths = to_gpu(mel_specgram_lengths).long()
x = (text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
y = (mel_specgram_padded, gate_padded)
return x, y
def training_step(model, train_batch, batch_idx):
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), y = batch_to_gpu(train_batch)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
y[0].requires_grad = False
y[1].requires_grad = False
losses = Tacotron2Loss()(y_pred[:3], y)
return losses[0] + losses[1] + losses[2], losses
def validation_step(model, val_batch, batch_idx):
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), y = batch_to_gpu(val_batch)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
losses = Tacotron2Loss()(y_pred[:3], y)
return losses[0] + losses[1] + losses[2], losses
def reduce_tensor(tensor, world_size):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt / world_size
else:
rt = rt // world_size
return rt
def log_additional_info(writer, model, loader, epoch):
model.eval()
data = next(iter(loader))
with torch.no_grad():
(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths), _ = batch_to_gpu(data)
y_pred = model(text_padded, text_lengths, mel_specgram_padded, mel_specgram_lengths)
mel_out, mel_out_postnet, gate_out, alignment = y_pred
fig = plt.figure()
ax = plt.gca()
ax.imshow(mel_out[0].cpu().numpy())
writer.add_figure("trn/mel_out", fig, epoch)
fig = plt.figure()
ax = plt.gca()
ax.imshow(mel_out_postnet[0].cpu().numpy())
writer.add_figure("trn/mel_out_postnet", fig, epoch)
writer.add_image("trn/gate_out", torch.tile(gate_out[:1], (10, 1)), epoch, dataformats="HW")
writer.add_image("trn/alignment", alignment[0], epoch, dataformats="HW")
def get_datasets(args):
text_preprocessor = partial(
text_to_sequence,
symbol_list=args.text_preprocessor,
phonemizer=args.phonemizer,
checkpoint=args.phonemizer_checkpoint,
cmudict_root=args.cmudict_root,
)
transforms = torch.nn.Sequential(
torchaudio.transforms.MelSpectrogram(
sample_rate=args.sample_rate,
n_fft=args.n_fft,
win_length=args.win_length,
hop_length=args.hop_length,
f_min=args.mel_fmin,
f_max=args.mel_fmax,
n_mels=args.n_mels,
mel_scale='slaney',
normalized=False,
power=1,
norm='slaney',
),
SpectralNormalization()
)
trainset, valset = split_process_dataset(
args.dataset, args.dataset_path, args.val_ratio, transforms, text_preprocessor)
return trainset, valset
def train(rank, world_size, args):
dist.init_process_group("nccl", rank=rank, world_size=world_size)
if rank == 0 and args.logging_dir:
if not os.path.isdir(args.logging_dir):
os.makedirs(args.logging_dir)
filehandler = logging.FileHandler(os.path.join(args.logging_dir, 'train.log'))
filehandler.setLevel(logging.INFO)
logger.addHandler(filehandler)
writer = SummaryWriter(log_dir=args.logging_dir)
else:
writer = None
torch.manual_seed(0)
torch.cuda.set_device(rank)
symbols = get_symbol_list(args.text_preprocessor)
model = Tacotron2(
mask_padding=args.mask_padding,
n_mels=args.n_mels,
n_symbol=len(symbols),
n_frames_per_step=args.n_frames_per_step,
symbol_embedding_dim=args.symbols_embedding_dim,
encoder_embedding_dim=args.encoder_embedding_dim,
encoder_n_convolution=args.encoder_n_convolution,
encoder_kernel_size=args.encoder_kernel_size,
decoder_rnn_dim=args.decoder_rnn_dim,
decoder_max_step=args.decoder_max_step,
decoder_dropout=args.decoder_dropout,
decoder_early_stopping=(not args.decoder_no_early_stopping),
attention_rnn_dim=args.attention_rnn_dim,
attention_hidden_dim=args.attention_hidden_dim,
attention_location_n_filter=args.attention_location_n_filter,
attention_location_kernel_size=args.attention_location_kernel_size,
attention_dropout=args.attention_dropout,
prenet_dim=args.prenet_dim,
postnet_n_convolution=args.postnet_n_convolution,
postnet_kernel_size=args.postnet_kernel_size,
postnet_embedding_dim=args.postnet_embedding_dim,
gate_threshold=args.gate_threshold,
).cuda(rank)
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[rank])
optimizer = Adam(model.parameters(), lr=args.learning_rate)
best_loss = float("inf")
start_epoch = 0
if args.checkpoint_path and os.path.isfile(args.checkpoint_path):
logger.info(f"Checkpoint: loading '{args.checkpoint_path}'")
map_location = {'cuda:%d' % 0: 'cuda:%d' % rank}
checkpoint = torch.load(args.checkpoint_path, map_location=map_location)
start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logger.info(
f"Checkpoint: loaded '{args.checkpoint_path}' at epoch {checkpoint['epoch']}"
)
trainset, valset = get_datasets(args)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset,
shuffle=True,
num_replicas=world_size,
rank=rank,
)
val_sampler = torch.utils.data.distributed.DistributedSampler(
valset,
shuffle=False,
num_replicas=world_size,
rank=rank,
)
loader_params = {
"batch_size": args.batch_size,
"num_workers": args.workers,
"prefetch_factor": 1024,
'persistent_workers': True,
"shuffle": False,
"pin_memory": True,
"drop_last": False,
"collate_fn": partial(text_mel_collate_fn, n_frames_per_step=args.n_frames_per_step),
}
train_loader = DataLoader(trainset, sampler=train_sampler, **loader_params)
val_loader = DataLoader(valset, sampler=val_sampler, **loader_params)
dist.barrier()
for epoch in range(start_epoch, args.epochs):
start = time()
model.train()
trn_loss, counts = 0, 0
if rank == 0:
iterator = tqdm(enumerate(train_loader), desc=f"Epoch {epoch}", total=len(train_loader))
else:
iterator = enumerate(train_loader)
for i, batch in iterator:
adjust_learning_rate(epoch, optimizer, args.learning_rate,
args.anneal_steps, args.anneal_factor)
model.zero_grad()
loss, losses = training_step(model, batch, i)
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip)
optimizer.step()
if rank == 0 and writer:
global_iters = epoch * len(train_loader)
writer.add_scalar("trn/mel_loss", losses[0], global_iters)
writer.add_scalar("trn/mel_postnet_loss", losses[1], global_iters)
writer.add_scalar("trn/gate_loss", losses[2], global_iters)
trn_loss += loss * len(batch[0])
counts += len(batch[0])
trn_loss = trn_loss / counts
trn_loss = reduce_tensor(trn_loss, world_size)
if rank == 0:
logger.info(f"[Epoch: {epoch}] time: {time()-start}; trn_loss: {trn_loss}")
if writer:
writer.add_scalar("trn_loss", trn_loss, epoch)
if ((epoch + 1) % args.validate_and_checkpoint_freq == 0) or (epoch == args.epochs - 1):
val_start_time = time()
model.eval()
val_loss, counts = 0, 0
iterator = tqdm(enumerate(val_loader), desc=f"[Rank: {rank}; Epoch: {epoch}; Eval]", total=len(val_loader))
with torch.no_grad():
for val_batch_idx, val_batch in iterator:
val_loss = val_loss + validation_step(model, val_batch, val_batch_idx)[0] * len(val_batch[0])
counts = counts + len(val_batch[0])
val_loss = val_loss / counts
val_loss = reduce_tensor(val_loss, world_size)
if rank == 0 and writer:
writer.add_scalar("val_loss", val_loss, epoch)
log_additional_info(writer, model, val_loader, epoch)
if rank == 0:
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
logger.info(f"[Rank: {rank}, Epoch: {epoch}; Eval] time: {time()-val_start_time}; val_loss: {val_loss}")
logger.info(f"[Epoch: {epoch}] Saving checkpoint to {args.checkpoint_path}")
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
is_best,
args.checkpoint_path,
)
dist.destroy_process_group()
def main(args):
logger.info("Start time: {}".format(str(datetime.now())))
torch.manual_seed(0)
random.seed(0)
if args.master_addr is not None:
os.environ['MASTER_ADDR'] = args.master_addr
elif 'MASTER_ADDR' not in os.environ:
os.environ['MASTER_ADDR'] = 'localhost'
if args.master_port is not None:
os.environ['MASTER_PORT'] = args.master_port
elif 'MASTER_PORT' not in os.environ:
os.environ['MASTER_PORT'] = '17778'
device_counts = torch.cuda.device_count()
logger.info(f"# available GPUs: {device_counts}")
# download dataset is not already downloaded
if args.dataset == 'ljspeech':
if not os.path.exists(os.path.join(args.dataset_path, 'LJSpeech-1.1')):
from torchaudio.datasets import LJSPEECH
LJSPEECH(root=args.dataset_path, download=True)
if device_counts == 1:
train(0, 1, args)
else:
mp.spawn(train, args=(device_counts, args, ),
nprocs=device_counts, join=True)
logger.info(f"End time: {datetime.now()}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
main(args)
|
"""
Text-to-speech pipeline using Tacotron2.
"""
from functools import partial
import argparse
import os
import random
import sys
import torch
import torchaudio
import numpy as np
from torchaudio.models import Tacotron2
from torchaudio.models import tacotron2 as pretrained_tacotron2
from utils import prepare_input_sequence
from datasets import InverseSpectralNormalization
from text.text_preprocessing import (
available_symbol_set,
available_phonemizers,
get_symbol_list,
text_to_sequence,
)
def parse_args():
r"""
Parse commandline arguments.
"""
from torchaudio.models.tacotron2 import _MODEL_CONFIG_AND_URLS as tacotron2_config_and_urls
from torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS as wavernn_config_and_urls
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--checkpoint-name',
type=str,
default=None,
choices=list(tacotron2_config_and_urls.keys()),
help='[string] The name of the checkpoint to load.'
)
parser.add_argument(
'--checkpoint-path',
type=str,
default=None,
help='[string] Path to the checkpoint file.'
)
parser.add_argument(
'--output-path',
type=str,
default="./audio.wav",
help='[string] Path to the output .wav file.'
)
parser.add_argument(
'--input-text',
'-i',
type=str,
default="Hello world",
help='[string] Type in something here and TTS will generate it!'
)
parser.add_argument(
'--vocoder',
default='nvidia_waveglow',
choices=['griffin_lim', 'wavernn', 'nvidia_waveglow'],
type=str,
help="Select the vocoder to use.",
)
parser.add_argument(
"--jit",
default=False,
action="store_true",
help="If used, the model and inference function is jitted."
)
preprocessor = parser.add_argument_group('text preprocessor setup')
preprocessor.add_argument(
'--text-preprocessor',
default='english_characters',
type=str,
choices=available_symbol_set,
help='select text preprocessor to use.'
)
preprocessor.add_argument(
'--phonemizer',
default="DeepPhonemizer",
type=str,
choices=available_phonemizers,
help='select phonemizer to use, only used when text-preprocessor is "english_phonemes"'
)
preprocessor.add_argument(
'--phonemizer-checkpoint',
default="./en_us_cmudict_forward.pt",
type=str,
help='the path or name of the checkpoint for the phonemizer, '
'only used when text-preprocessor is "english_phonemes"'
)
preprocessor.add_argument(
'--cmudict-root',
default="./",
type=str,
help='the root directory for storing CMU dictionary files'
)
audio = parser.add_argument_group('audio parameters')
audio.add_argument(
'--sample-rate',
default=22050,
type=int,
help='Sampling rate'
)
audio.add_argument(
'--n-fft',
default=1024,
type=int,
help='Filter length for STFT'
)
audio.add_argument(
'--n-mels',
default=80,
type=int,
help=''
)
audio.add_argument(
'--mel-fmin',
default=0.0,
type=float,
help='Minimum mel frequency'
)
audio.add_argument(
'--mel-fmax',
default=8000.0,
type=float,
help='Maximum mel frequency'
)
# parameters for WaveRNN
wavernn = parser.add_argument_group('WaveRNN parameters')
wavernn.add_argument(
'--wavernn-checkpoint-name',
default="wavernn_10k_epochs_8bits_ljspeech",
choices=list(wavernn_config_and_urls.keys()),
help="Select the WaveRNN checkpoint."
)
wavernn.add_argument(
"--wavernn-loss",
default="crossentropy",
choices=["crossentropy"],
type=str,
help="The type of loss the WaveRNN pretrained model is trained on.",
)
wavernn.add_argument(
"--wavernn-no-batch-inference",
default=False,
action="store_true",
help="Don't use batch inference for WaveRNN inference."
)
wavernn.add_argument(
"--wavernn-no-mulaw",
default=False,
action="store_true",
help="Don't use mulaw decoder to decode the signal."
)
wavernn.add_argument(
"--wavernn-batch-timesteps",
default=11000,
type=int,
help="The time steps for each batch. Only used when batch inference is used",
)
wavernn.add_argument(
"--wavernn-batch-overlap",
default=550,
type=int,
help="The overlapping time steps between batches. Only used when batch inference is used",
)
return parser
def unwrap_distributed(state_dict):
r"""torch.distributed.DistributedDataParallel wraps the model with an additional "module.".
This function unwraps this layer so that the weights can be loaded on models with a single GPU.
Args:
state_dict: Original state_dict.
Return:
unwrapped_state_dict: Unwrapped state_dict.
"""
return {k.replace('module.', ''): v for k, v in state_dict.items()}
def nvidia_waveglow_vocode(mel_specgram, device, jit=False):
waveglow = torch.hub.load('NVIDIA/DeepLearningExamples:torchhub', 'nvidia_waveglow', model_math='fp16')
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to(device)
waveglow.eval()
if args.jit:
raise ValueError("Vocoder option `nvidia_waveglow is not jittable.")
with torch.no_grad():
waveform = waveglow.infer(mel_specgram).cpu()
return waveform
def wavernn_vocode(mel_specgram, wavernn_checkpoint_name, wavernn_loss, wavernn_no_mulaw,
wavernn_no_batch_inference, wavernn_batch_timesteps, wavernn_batch_overlap,
device, jit):
from torchaudio.models import wavernn
sys.path.append(os.path.join(os.path.dirname(__file__), "../pipeline_wavernn"))
from wavernn_inference_wrapper import WaveRNNInferenceWrapper
from processing import NormalizeDB
wavernn_model = wavernn(wavernn_checkpoint_name).eval().to(device)
wavernn_inference_model = WaveRNNInferenceWrapper(wavernn_model)
if jit:
wavernn_inference_model = torch.jit.script(wavernn_inference_model)
# WaveRNN spectro setting for default checkpoint
# n_fft = 2048
# n_mels = 80
# win_length = 1100
# hop_length = 275
# f_min = 40
# f_max = 11025
transforms = torch.nn.Sequential(
InverseSpectralNormalization(),
NormalizeDB(min_level_db=-100, normalization=True),
)
mel_specgram = transforms(mel_specgram.cpu())
with torch.no_grad():
waveform = wavernn_inference_model(mel_specgram.to(device),
loss_name=wavernn_loss,
mulaw=(not wavernn_no_mulaw),
batched=(not wavernn_no_batch_inference),
timesteps=wavernn_batch_timesteps,
overlap=wavernn_batch_overlap,)
return waveform.unsqueeze(0)
def griffin_lim_vocode(mel_specgram, n_fft, n_mels, sample_rate, mel_fmin, mel_fmax, jit, ):
from torchaudio.transforms import GriffinLim, InverseMelScale
inv_norm = InverseSpectralNormalization()
inv_mel = InverseMelScale(
n_stft=(n_fft // 2 + 1),
n_mels=n_mels,
sample_rate=sample_rate,
f_min=mel_fmin,
f_max=mel_fmax,
mel_scale="slaney",
norm='slaney',
)
griffin_lim = GriffinLim(
n_fft=n_fft,
power=1,
hop_length=256,
win_length=1024,
)
vocoder = torch.nn.Sequential(
inv_norm,
inv_mel,
griffin_lim
)
if jit:
vocoder = torch.jit.script(vocoder)
waveform = vocoder(mel_specgram.cpu())
return waveform
def main(args):
torch.manual_seed(0)
random.seed(0)
np.random.seed(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.checkpoint_path is None and args.checkpoint_name is None:
raise ValueError("Either --checkpoint-path or --checkpoint-name must be specified.")
elif args.checkpoint_path is not None and args.checkpoint_name is not None:
raise ValueError("Both --checkpoint-path and --checkpoint-name are specified, "
"can only specify one.")
n_symbols = len(get_symbol_list(args.text_preprocessor))
text_preprocessor = partial(
text_to_sequence,
symbol_list=args.text_preprocessor,
phonemizer=args.phonemizer,
checkpoint=args.phonemizer_checkpoint,
cmudict_root=args.cmudict_root,
)
if args.checkpoint_path is not None:
tacotron2 = Tacotron2(n_symbol=n_symbols)
tacotron2.load_state_dict(
unwrap_distributed(torch.load(args.checkpoint_path, map_location=device)['state_dict']))
tacotron2 = tacotron2.to(device).eval()
elif args.checkpoint_name is not None:
tacotron2 = pretrained_tacotron2(args.checkpoint_name).to(device).eval()
if n_symbols != tacotron2.n_symbols:
raise ValueError("the number of symbols for text_preprocessor ({n_symbols}) "
"should match the number of symbols for the"
"pretrained tacotron2 ({tacotron2.n_symbols}).")
if args.jit:
tacotron2 = torch.jit.script(tacotron2)
sequences, lengths = prepare_input_sequence([args.input_text],
text_processor=text_preprocessor)
sequences, lengths = sequences.long().to(device), lengths.long().to(device)
with torch.no_grad():
mel_specgram, _, _ = tacotron2.infer(sequences, lengths)
if args.vocoder == "nvidia_waveglow":
waveform = nvidia_waveglow_vocode(mel_specgram=mel_specgram, device=device, jit=args.jit)
elif args.vocoder == "wavernn":
waveform = wavernn_vocode(mel_specgram=mel_specgram,
wavernn_checkpoint_name=args.wavernn_checkpoint_name,
wavernn_loss=args.wavernn_loss,
wavernn_no_mulaw=args.wavernn_no_mulaw,
wavernn_no_batch_inference=args.wavernn_no_batch_inference,
wavernn_batch_timesteps=args.wavernn_batch_timesteps,
wavernn_batch_overlap=args.wavernn_batch_overlap,
device=device,
jit=args.jit)
elif args.vocoder == "griffin_lim":
waveform = griffin_lim_vocode(mel_specgram=mel_specgram,
n_fft=args.n_fft,
n_mels=args.n_mels,
sample_rate=args.sample_rate,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
jit=args.jit)
torchaudio.save(args.output_path, waveform, args.sample_rate)
if __name__ == "__main__":
parser = parse_args()
args, _ = parser.parse_known_args()
main(args)
|
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# *****************************************************************************
"""
Modified from https://github.com/keithito/tacotron
"""
from typing import List, Union, Optional
import re
from unidecode import unidecode
from torchaudio.datasets import CMUDict
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters)
_phonemizer = None
available_symbol_set = set(["english_characters", "english_phonemes"])
available_phonemizers = set(["DeepPhonemizer"])
def get_symbol_list(symbol_list: str = "english_characters",
cmudict_root: Optional[str] = "./") -> List[str]:
if symbol_list == "english_characters":
return [_pad] + list(_special) + list(_punctuation) + list(_letters)
elif symbol_list == "english_phonemes":
return [_pad] + list(_special) + list(_punctuation) + CMUDict(cmudict_root).symbols
else:
raise ValueError(f"The `symbol_list` {symbol_list} is not supported."
f"Supported `symbol_list` includes {available_symbol_set}.")
def word_to_phonemes(sent: str, phonemizer: str, checkpoint: str) -> List[str]:
if phonemizer == "DeepPhonemizer":
from dp.phonemizer import Phonemizer
global _phonemizer
_other_symbols = ''.join(list(_special) + list(_punctuation))
_phone_symbols_re = r'(\[[A-Z]+?\]|' + '[' + _other_symbols + '])' # [\[([A-Z]+?)\]|[-!'(),.:;? ]]
if _phonemizer is None:
# using a global variable so that we don't have to relode checkpoint
# everytime this function is called
_phonemizer = Phonemizer.from_checkpoint(checkpoint)
# Example:
# sent = "hello world!"
# '[HH][AH][L][OW] [W][ER][L][D]!'
sent = _phonemizer(sent, lang='en_us')
# ['[HH]', '[AH]', '[L]', '[OW]', ' ', '[W]', '[ER]', '[L]', '[D]', '!']
ret = re.findall(_phone_symbols_re, sent)
# ['HH', 'AH', 'L', 'OW', ' ', 'W', 'ER', 'L', 'D', '!']
ret = [r.replace("[", "").replace("]", "") for r in ret]
return ret
else:
raise ValueError(f"The `phonemizer` {phonemizer} is not supported. "
"Supported `symbol_list` includes `'DeepPhonemizer'`.")
def text_to_sequence(sent: str,
symbol_list: Union[str, List[str]] = "english_characters",
phonemizer: Optional[str] = "DeepPhonemizer",
checkpoint: Optional[str] = "./en_us_cmudict_forward.pt",
cmudict_root: Optional[str] = "./") -> List[int]:
r'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
Args:
sent (str): The input sentence to convert to a sequence.
symbol_list (str or List of string, optional): When the input is a string, available options include
"english_characters" and "english_phonemes". When the input is a list of string, ``symbol_list`` will
directly be used as the symbol to encode. (Default: "english_characters")
phonemizer (str or None, optional): The phonemizer to use. Only used when ``symbol_list`` is "english_phonemes".
Available options include "DeepPhonemizer". (Default: "DeepPhonemizer")
checkpoint (str or None, optional): The path to the checkpoint of the phonemizer. Only used when
``symbol_list`` is "english_phonemes". (Default: "./en_us_cmudict_forward.pt")
cmudict_root (str or None, optional): The path to the directory where the CMUDict dataset is found or
downloaded. Only used when ``symbol_list`` is "english_phonemes". (Default: "./")
Returns:
List of integers corresponding to the symbols in the sentence.
Examples:
>>> text_to_sequence("hello world!", "english_characters")
[19, 16, 23, 23, 26, 11, 34, 26, 29, 23, 15, 2]
>>> text_to_sequence("hello world!", "english_phonemes")
[54, 20, 65, 69, 11, 92, 44, 65, 38, 2]
'''
if symbol_list == "english_phonemes":
if any(param is None for param in [phonemizer, checkpoint, cmudict_root]):
raise ValueError(
"When `symbol_list` is 'english_phonemes', "
"all of `phonemizer`, `checkpoint`, and `cmudict_root` must be provided.")
sent = unidecode(sent) # convert to ascii
sent = sent.lower() # lower case
sent = normalize_numbers(sent) # expand numbers
for regex, replacement in _abbreviations: # expand abbreviations
sent = re.sub(regex, replacement, sent)
sent = re.sub(_whitespace_re, ' ', sent) # collapse whitespace
if isinstance(symbol_list, list):
symbols = symbol_list
elif isinstance(symbol_list, str):
symbols = get_symbol_list(symbol_list, cmudict_root=cmudict_root)
if symbol_list == "english_phonemes":
sent = word_to_phonemes(sent, phonemizer=phonemizer, checkpoint=checkpoint)
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
return [_symbol_to_id[s] for s in sent if s in _symbol_to_id]
|
# *****************************************************************************
# Copyright (c) 2017 Keith Ito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# *****************************************************************************
"""
Modified from https://github.com/keithito/tacotron
"""
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(text: str) -> str:
return re.sub(_comma_number_re, lambda m: m.group(1).replace(',', ''), text)
def _expand_pounds(text: str) -> str:
return re.sub(_pounds_re, r'\1 pounds', text)
def _expand_dollars_repl_fn(m):
"""The replacement function for expanding dollars."""
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
if len(parts) > 1 and parts[1]:
if len(parts[1]) == 1:
# handle the case where we have one digit after the decimal point
cents = int(parts[1]) * 10
else:
cents = int(parts[1])
else:
cents = 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_dollars(text: str) -> str:
return re.sub(_dollars_re, _expand_dollars_repl_fn, text)
def _expand_decimal_point(text: str) -> str:
return re.sub(_decimal_number_re, lambda m: m.group(1).replace('.', ' point '), text)
def _expand_ordinal(text: str) -> str:
return re.sub(_ordinal_re, lambda m: _inflect.number_to_words(m.group(0)), text)
def _expand_number_repl_fn(m):
"""The replacement function for expanding number."""
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def _expand_number(text: str) -> str:
return re.sub(_number_re, _expand_number_repl_fn, text)
def normalize_numbers(text: str) -> str:
text = _remove_commas(text)
text = _expand_pounds(text)
text = _expand_dollars(text)
text = _expand_decimal_point(text)
text = _expand_ordinal(text)
text = _expand_number(text)
return text
|
from . import utils, vad
__all__ = ['utils', 'vad']
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Run inference for pre-processed data with a trained model.
"""
import datetime as dt
import logging
from fairseq import options
from interactive_asr.utils import add_asr_eval_argument, setup_asr, get_microphone_transcription, transcribe_file
def main(args):
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
task, generator, models, sp, tgt_dict = setup_asr(args, logger)
print("READY!")
if args.input_file:
transcription_time, transcription = transcribe_file(args, task, generator, models, sp, tgt_dict)
print("transcription:", transcription)
print("transcription_time:", transcription_time)
else:
for transcription in get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
print(
"{}: {}".format(
dt.datetime.now().strftime("%H:%M:%S"), transcription[0][0]
)
)
def cli_main():
parser = options.get_generation_parser()
parser = add_asr_eval_argument(parser)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == "__main__":
cli_main()
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import os
import sys
import time
import torch
import torchaudio
import sentencepiece as spm
from fairseq import tasks
from fairseq.utils import load_ensemble_for_inference, import_user_module
from interactive_asr.vad import get_microphone_chunks
def add_asr_eval_argument(parser):
parser.add_argument("--input_file", help="input file")
parser.add_argument("--ctc", action="store_true", help="decode a ctc model")
parser.add_argument("--rnnt", default=False, help="decode a rnnt model")
parser.add_argument("--kspmodel", default=None, help="sentence piece model")
parser.add_argument(
"--wfstlm", default=None, help="wfstlm on dictonary output units"
)
parser.add_argument(
"--rnnt_decoding_type",
default="greedy",
help="wfstlm on dictonary output units",
)
parser.add_argument(
"--lm_weight",
default=0.2,
help="weight for wfstlm while interpolating with neural score",
)
parser.add_argument(
"--rnnt_len_penalty", default=-0.5, help="rnnt length penalty on word level"
)
return parser
def check_args(args):
assert args.path is not None, "--path required for generation!"
assert (
not args.sampling or args.nbest == args.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
args.replace_unk is None or args.raw_text
), "--replace-unk requires a raw text dataset (--raw-text)"
def process_predictions(args, hypos, sp, tgt_dict):
res = []
device = torch.device("cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
for hypo in hypos[: min(len(hypos), args.nbest)]:
hyp_pieces = tgt_dict.string(hypo["tokens"].int().to(device))
hyp_words = sp.DecodePieces(hyp_pieces.split())
res.append(hyp_words)
return res
def optimize_models(args, use_cuda, models):
"""Optimize ensemble for generation
"""
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
def calc_mean_invstddev(feature):
if len(feature.shape) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = torch.mean(feature, dim=0)
var = torch.var(feature, dim=0)
# avoid division by ~zero
if (var < sys.float_info.epsilon).any():
return mean, 1.0 / (torch.sqrt(var) + sys.float_info.epsilon)
return mean, 1.0 / torch.sqrt(var)
def calcMN(features):
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def transcribe(waveform, args, task, generator, models, sp, tgt_dict):
num_features = 80
output = torchaudio.compliance.kaldi.fbank(waveform, num_mel_bins=num_features)
device = torch.device("cuda:0" if torch.cuda.is_available() and not args.cpu else "cpu")
output_cmvn = calcMN(output.to(device).detach())
# size (m, n)
source = output_cmvn
frames_lengths = torch.LongTensor([source.size(0)])
# size (1, m, n). In general, if source is (x, m, n), then hypos is (x, ...)
source.unsqueeze_(0)
sample = {"net_input": {"src_tokens": source, "src_lengths": frames_lengths}}
hypos = task.inference_step(generator, models, sample)
assert len(hypos) == 1
transcription = []
for i in range(len(hypos)):
# Process top predictions
hyp_words = process_predictions(args, hypos[i], sp, tgt_dict)
transcription.append(hyp_words)
return transcription
def setup_asr(args, logger):
check_args(args)
import_user_module(args)
if args.max_tokens is None and args.batch_size is None:
args.max_tokens = 30000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
# Set dictionary
tgt_dict = task.target_dictionary
if args.ctc or args.rnnt:
tgt_dict.add_symbol("<ctc_blank>")
if args.ctc:
logger.info("| decoding a ctc model")
if args.rnnt:
logger.info("| decoding a rnnt model")
# Load ensemble
logger.info("| loading model(s) from {}".format(args.path))
models, _model_args = load_ensemble_for_inference(
args.path.split(":"),
task,
model_arg_overrides=eval(args.model_overrides), # noqa
)
optimize_models(args, use_cuda, models)
# Initialize generator
generator = task.build_generator(models, args)
sp = spm.SentencePieceProcessor()
sp.Load(os.path.join(args.data, "spm.model"))
return task, generator, models, sp, tgt_dict
def transcribe_file(args, task, generator, models, sp, tgt_dict):
path = args.input_file
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
waveform, sample_rate = torchaudio.load_wav(path)
waveform = waveform.mean(0, True)
waveform = torchaudio.transforms.Resample(
orig_freq=sample_rate, new_freq=16000
)(waveform)
start = time.time()
transcription = transcribe(
waveform, args, task, generator, models, sp, tgt_dict
)
transcription_time = time.time() - start
return transcription_time, transcription
def get_microphone_transcription(args, task, generator, models, sp, tgt_dict):
for (waveform, sample_rate) in get_microphone_chunks():
waveform = torchaudio.transforms.Resample(
orig_freq=sample_rate, new_freq=16000
)(waveform.reshape(1, -1))
transcription = transcribe(
waveform, args, task, generator, models, sp, tgt_dict
)
yield transcription
|
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
"""
Following `a simple but efficient real-time voice activity detection algorithm
<https://www.eurasip.org/Proceedings/Eusipco/Eusipco2009/contents/papers/1569192958.pdf>`__.
There are three criteria to decide if a frame contains speech: energy, most
dominant frequency, and spectral flatness. If any two of those are higher than
a minimum plus a threshold, then the frame contains speech. In the offline
case, the list of frames is postprocessed to remove too short silence and
speech sequences. In the online case here, inertia is added before switching
from speech to silence or vice versa.
"""
from collections import deque
import numpy as np
import torch
import queue
import librosa
import pyaudio
import torchaudio
def compute_spectral_flatness(frame, epsilon=0.01):
# epsilon protects against log(0)
geometric_mean = torch.exp((frame + epsilon).log().mean(-1)) - epsilon
arithmetic_mean = frame.mean(-1)
return -10 * torch.log10(epsilon + geometric_mean / arithmetic_mean)
class VoiceActivityDetection:
def __init__(
self,
num_init_frames=30,
ignore_silent_count=4,
ignore_speech_count=1,
energy_prim_thresh=60,
frequency_prim_thresh=10,
spectral_flatness_prim_thresh=3,
verbose=False,
):
self.num_init_frames = num_init_frames
self.ignore_silent_count = ignore_silent_count
self.ignore_speech_count = ignore_speech_count
self.energy_prim_thresh = energy_prim_thresh
self.frequency_prim_thresh = frequency_prim_thresh
self.spectral_flatness_prim_thresh = spectral_flatness_prim_thresh
self.verbose = verbose
self.speech_mark = True
self.silence_mark = False
self.silent_count = 0
self.speech_count = 0
self.n = 0
if self.verbose:
self.energy_list = []
self.frequency_list = []
self.spectral_flatness_list = []
def iter(self, frame):
frame_fft = torch.rfft(frame, 1)
amplitudes = torchaudio.functional.complex_norm(frame_fft)
# Compute frame energy
energy = frame.pow(2).sum(-1)
# Most dominant frequency component
frequency = amplitudes.argmax()
# Spectral flatness measure
spectral_flatness = compute_spectral_flatness(amplitudes)
if self.verbose:
self.energy_list.append(energy)
self.frequency_list.append(frequency)
self.spectral_flatness_list.append(spectral_flatness)
if self.n == 0:
self.min_energy = energy
self.min_frequency = frequency
self.min_spectral_flatness = spectral_flatness
elif self.n < self.num_init_frames:
self.min_energy = min(energy, self.min_energy)
self.min_frequency = min(frequency, self.min_frequency)
self.min_spectral_flatness = min(
spectral_flatness, self.min_spectral_flatness
)
self.n += 1
# Add 1. to avoid log(0)
thresh_energy = self.energy_prim_thresh * torch.log(1.0 + self.min_energy)
thresh_frequency = self.frequency_prim_thresh
thresh_spectral_flatness = self.spectral_flatness_prim_thresh
# Check all three conditions
counter = 0
if energy - self.min_energy >= thresh_energy:
counter += 1
if frequency - self.min_frequency >= thresh_frequency:
counter += 1
if spectral_flatness - self.min_spectral_flatness >= thresh_spectral_flatness:
counter += 1
# Detection
if counter > 1:
# Speech detected
self.speech_count += 1
# Inertia against switching
if (
self.n >= self.num_init_frames
and self.speech_count <= self.ignore_speech_count
):
# Too soon to change
return self.silence_mark
else:
self.silent_count = 0
return self.speech_mark
else:
# Silence detected
self.min_energy = ((self.silent_count * self.min_energy) + energy) / (
self.silent_count + 1
)
self.silent_count += 1
# Inertia against switching
if (
self.n >= self.num_init_frames
and self.silent_count <= self.ignore_silent_count
):
# Too soon to change
return self.speech_mark
else:
self.speech_count = 0
return self.silence_mark
class MicrophoneStream:
"""Opens a recording stream as a generator yielding the audio chunks."""
def __init__(self, device=None, rate=22050, chunk=2205):
"""
The 22050 is the librosa default, which is what our models were
trained on. The ratio of [chunk / rate] is the amount of time between
audio samples - for example, with these defaults,
an audio fragment will be processed every tenth of a second.
"""
self._rate = rate
self._chunk = chunk
self._device = device
# Create a thread-safe buffer of audio data
self._buff = queue.Queue()
self.closed = True
def __enter__(self):
self._audio_interface = pyaudio.PyAudio()
self._audio_stream = self._audio_interface.open(
# format=pyaudio.paInt16,
format=pyaudio.paFloat32,
# The API currently only supports 1-channel (mono) audio
# https://goo.gl/z757pE
channels=1,
rate=self._rate,
input=True,
frames_per_buffer=self._chunk,
input_device_index=self._device,
# Run the audio stream asynchronously to fill the buffer object.
# This is necessary so that the input device's buffer doesn't
# overflow while the calling thread makes network requests, etc.
stream_callback=self._fill_buffer,
)
self.closed = False
return self
def __exit__(self, type, value, traceback):
self._audio_stream.stop_stream()
self._audio_stream.close()
self.closed = True
# Signal the generator to terminate so that the client's
# streaming_recognize method will not block the process termination.
self._buff.put(None)
self._audio_interface.terminate()
def _fill_buffer(self, in_data, frame_count, time_info, status_flags):
"""Continuously collect data from the audio stream, into the buffer."""
self._buff.put(in_data)
return None, pyaudio.paContinue
def generator(self):
while not self.closed:
# Use a blocking get() to ensure there's at least one chunk of
# data, and stop iteration if the chunk is None, indicating the
# end of the audio stream.
chunk = self._buff.get()
if chunk is None:
return
data = [chunk]
# Now consume whatever other data's still buffered.
while True:
try:
chunk = self._buff.get(block=False)
if chunk is None:
return
data.append(chunk)
except queue.Empty:
break
ans = np.fromstring(b"".join(data), dtype=np.float32)
# yield uniform-sized chunks
ans = np.split(ans, np.shape(ans)[0] / self._chunk)
# Resample the audio to 22050, librosa default
for chunk in ans:
yield librosa.core.resample(chunk, self._rate, 22050)
def get_microphone_chunks(
min_to_cumulate=5, # 0.5 seconds
max_to_cumulate=100, # 10 seconds
precumulate=5,
max_to_visualize=100,
):
vad = VoiceActivityDetection()
cumulated = []
precumulated = deque(maxlen=precumulate)
with MicrophoneStream() as stream:
audio_generator = stream.generator()
chunk_length = stream._chunk
waveform = torch.zeros(max_to_visualize * chunk_length)
for chunk in audio_generator:
# Is speech?
chunk = torch.tensor(chunk)
is_speech = vad.iter(chunk)
# Cumulate speech
if is_speech or cumulated:
cumulated.append(chunk)
else:
precumulated.append(chunk)
if (not is_speech and len(cumulated) >= min_to_cumulate) or (
len(cumulated) > max_to_cumulate
):
waveform = torch.cat(list(precumulated) + cumulated, -1)
yield (waveform * stream._rate, stream._rate)
cumulated = []
precumulated = deque(maxlen=precumulate)
|
#!/usr/bin/env python3
"""Launch souce separation training.
This script runs training in Distributed Data Parallel (DDP) framework and has two major
operation modes. This behavior depends on if `--worker-id` argument is given or not.
1. (`--worker-id` is not given) Launchs worker subprocesses that performs the actual training.
2. (`--worker-id` is given) Performs the training as a part of distributed training.
When launching the script without any distributed trainig parameters (operation mode 1),
this script will check the number of GPUs available on the local system and spawns the same
number of training subprocesses (as operaiton mode 2). You can reduce the number of GPUs with
`--num-workers`. If there is no GPU available, only one subprocess is launched.
When launching the script as a worker process of a distributed training, you need to configure
the coordination of the workers.
"""
import sys
import logging
import argparse
import subprocess
import torch
from utils import dist_utils
_LG = dist_utils.getLogger(__name__)
def _parse_args(args=None):
max_world_size = torch.cuda.device_count() or 1
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument("--debug", action="store_true", help="Enable debug log")
group = parser.add_argument_group("Distributed Training")
group.add_argument(
"--worker-id",
type=int,
help=(
"If not provided, the launched process serves as a master process of "
"single-node, multi-worker training and spawns the worker subprocesses. "
"If provided, the launched process serves as a worker process, which "
"performs the actual training. The valid value is [0, --num-workers)."
),
)
group.add_argument(
"--device-id",
type=int,
help="The CUDA device ID. Allowed only when --worker-id is provided.",
)
group.add_argument(
"--num-workers",
type=int,
default=max_world_size,
help=(
"The size of distributed trainig workers. "
"If launching a training as single-node, multi-worker training, "
"(i.e. --worker-id is not provided) then this value should not exceed "
"the number of available GPUs. "
"If launching the training process as a multi-node, multi-gpu training, "
"(i.e. --worker-id is provided) then the value has to match "
f"the number of workers across nodes. (default: {max_world_size})"
),
)
group.add_argument(
"--sync-protocol",
type=str,
default="env://",
help=(
"Synchronization protocol for distributed training. "
"This value is passed as `init_method` argument of "
"`torch.distributed.init_process_group` function."
'If you are using `"env://"`, you can additionally configure '
'environment variables "MASTER_ADDR" and "MASTER_PORT". '
'If you are using `"file://..."`, then the process has to have '
"the access to the designated file. "
"See the documentation for `torch.distributed` for the detail. "
'If you are running the training in a single node, `"env://"` '
"should do. If you are running the training in multiple nodes, "
"you need to provide the file location where all the nodes have "
'access, using `"file://..."` protocol. (default: "env://")'
),
)
group.add_argument(
"--random-seed",
type=int,
help="Set random seed value. (default: None)",
)
parser.add_argument(
"rest", nargs=argparse.REMAINDER, help="Model-specific arguments."
)
namespace = parser.parse_args(args)
if namespace.worker_id is None:
if namespace.device_id is not None:
raise ValueError(
"`--device-id` cannot be provided when runing as master process."
)
if namespace.num_workers > max_world_size:
raise ValueError(
"--num-workers ({num_workers}) cannot exceed {device_count}."
)
if namespace.rest[:1] == ["--"]:
namespace.rest = namespace.rest[1:]
return namespace
def _main(cli_args):
args = _parse_args(cli_args)
if any(arg in ["--help", "-h"] for arg in args.rest):
_run_training(args.rest)
_init_logger(args.worker_id, args.debug)
if args.worker_id is None:
_run_training_subprocesses(args.num_workers, cli_args)
else:
dist_utils.setup_distributed(
world_size=args.num_workers,
rank=args.worker_id,
local_rank=args.device_id,
backend='nccl' if torch.cuda.is_available() else 'gloo',
init_method=args.sync_protocol,
)
if args.random_seed is not None:
torch.manual_seed(args.random_seed)
if torch.cuda.is_available():
torch.cuda.set_device(args.device_id)
_LG.info("CUDA device set to %s", args.device_id)
_run_training(args.rest)
def _run_training_subprocesses(num_workers, original_args):
workers = []
_LG.info("Spawning %s workers", num_workers)
for i in range(num_workers):
worker_arg = ["--worker-id", f"{i}", "--num-workers", f"{num_workers}"]
device_arg = ["--device-id", f"{i}"] if torch.cuda.is_available() else []
command = (
[sys.executable, "-u", sys.argv[0]]
+ worker_arg
+ device_arg
+ original_args
)
_LG.info("Launching worker %s: `%s`", i, " ".join(command))
worker = subprocess.Popen(command)
workers.append(worker)
num_failed = 0
for worker in workers:
worker.wait()
if worker.returncode != 0:
num_failed += 1
sys.exit(num_failed)
def _run_training(args):
import conv_tasnet.train
conv_tasnet.train.train(args)
def _init_logger(rank=None, debug=False):
worker_fmt = "[master]" if rank is None else f"[worker {rank:2d}]"
message_fmt = (
"%(levelname)5s: %(funcName)10s: %(message)s" if debug else "%(message)s"
)
logging.basicConfig(
level=logging.DEBUG if debug else logging.INFO,
format=f"%(asctime)s: {worker_fmt} {message_fmt}",
)
if __name__ == "__main__":
_main(sys.argv[1:])
|
#!/usr/bin/env python3
# pyre-strict
from pathlib import Path
from argparse import ArgumentParser
from typing import (
Any,
Callable,
Dict,
Mapping,
List,
Optional,
Tuple,
TypedDict,
Union,
)
import torch
import torchaudio
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.plugins import DDPPlugin
from torch import nn
from torch.optim.lr_scheduler import _LRScheduler
from torch.utils.data import DataLoader
from utils import metrics
from utils.dataset import utils as dataset_utils
class Batch(TypedDict):
mix: torch.Tensor # (batch, time)
src: torch.Tensor # (batch, source, time)
mask: torch.Tensor # (batch, source, time)
def sisdri_metric(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor
) -> torch.Tensor:
"""Compute the improvement of scale-invariant SDR. (SI-SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Tensor of dimension (batch, speakers == 1, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Improved SI-SDR. Tensor of dimension (batch, )
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
mix = mix - mix.mean(axis=2, keepdim=True)
si_sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return si_sdri.mean().item()
def sdri_metric(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor,
) -> torch.Tensor:
"""Compute the improvement of SDR. (SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Tensor of dimension (batch, speakers == 1, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Improved SDR. Tensor of dimension (batch, )
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return sdri.mean().item()
def si_sdr_loss(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: torch.Tensor
) -> torch.Tensor:
"""Compute the Si-SDR loss.
Args:
estimate (torch.Tensor): Estimated source signals.
Tensor of dimension (batch, speakers, time)
reference (torch.Tensor): Reference (original) source signals.
Tensor of dimension (batch, speakers, time)
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Tensor of dimension (batch, 1, time)
Returns:
torch.Tensor: Si-SDR loss. Tensor of dimension (batch, )
"""
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
si_sdri = metrics.sdr_pit(estimate, reference, mask=mask)
return -si_sdri.mean()
class ConvTasNetModule(LightningModule):
"""
The Lightning Module for speech separation.
Args:
model (Any): The model to use for the classification task.
train_loader (DataLoader): the training dataloader.
val_loader (DataLoader or None): the validation dataloader.
loss (Any): The loss function to use.
optim (Any): The optimizer to use.
metrics (List of methods): The metrics to track, which will be used for both train and validation.
lr_scheduler (Any or None): The LR Scheduler.
"""
def __init__(
self,
model: Any,
train_loader: DataLoader,
val_loader: Optional[DataLoader],
loss: Any,
optim: Any,
metrics: List[Any],
lr_scheduler: Optional[Any] = None,
) -> None:
super().__init__()
self.model: nn.Module = model
self.loss: nn.Module = loss
self.optim: torch.optim.Optimizer = optim
self.lr_scheduler: Optional[_LRScheduler] = None
if lr_scheduler:
self.lr_scheduler = lr_scheduler
self.metrics: Mapping[str, Callable] = metrics
self.train_metrics: Dict = {}
self.val_metrics: Dict = {}
self.test_metrics: Dict = {}
self.save_hyperparameters()
self.train_loader = train_loader
self.val_loader = val_loader
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
self.train_metrics.update(self.metrics)
self.val_metrics.update(self.metrics)
else:
self.test_metrics.update(self.metrics)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Forward defines the prediction/inference actions.
"""
return self.model(x)
def training_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
return self._step(batch, batch_idx, "train")
def validation_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Dict[str, Any]:
"""
Operates on a single batch of data from the validation set.
"""
return self._step(batch, batch_idx, "val")
def test_step(
self, batch: Batch, batch_idx: int, *args: Any, **kwargs: Any
) -> Optional[Dict[str, Any]]:
"""
Operates on a single batch of data from the test set.
"""
return self._step(batch, batch_idx, "test")
def _step(self, batch: Batch, batch_idx: int, phase_type: str) -> Dict[str, Any]:
"""
Common step for training, validation, and testing.
"""
mix, src, mask = batch
pred = self.model(mix)
loss = self.loss(pred, src, mask)
self.log(f"Losses/{phase_type}_loss", loss.item(), on_step=True, on_epoch=True)
metrics_result = self._compute_metrics(pred, src, mix, mask, phase_type)
self.log_dict(metrics_result, on_epoch=True)
return loss
def configure_optimizers(
self,
) -> Tuple[Any]:
lr_scheduler = self.lr_scheduler
if not lr_scheduler:
return self.optim
epoch_schedulers = {
'scheduler': lr_scheduler,
'monitor': 'Losses/val_loss',
'interval': 'epoch'
}
return [self.optim], [epoch_schedulers]
def _compute_metrics(
self,
pred: torch.Tensor,
label: torch.Tensor,
inputs: torch.Tensor,
mask: torch.Tensor,
phase_type: str,
) -> Dict[str, torch.Tensor]:
metrics_dict = getattr(self, f"{phase_type}_metrics")
metrics_result = {}
for name, metric in metrics_dict.items():
metrics_result[f"Metrics/{phase_type}/{name}"] = metric(pred, label, inputs, mask)
return metrics_result
def train_dataloader(self):
"""Training dataloader"""
return self.train_loader
def val_dataloader(self):
"""Validation dataloader"""
return self.val_loader
def _get_model(
num_sources,
enc_kernel_size=16,
enc_num_feats=512,
msk_kernel_size=3,
msk_num_feats=128,
msk_num_hidden_feats=512,
msk_num_layers=8,
msk_num_stacks=3,
msk_activate="relu",
):
model = torchaudio.models.ConvTasNet(
num_sources=num_sources,
enc_kernel_size=enc_kernel_size,
enc_num_feats=enc_num_feats,
msk_kernel_size=msk_kernel_size,
msk_num_feats=msk_num_feats,
msk_num_hidden_feats=msk_num_hidden_feats,
msk_num_layers=msk_num_layers,
msk_num_stacks=msk_num_stacks,
msk_activate=msk_activate,
)
return model
def _get_dataloader(
dataset_type: str,
root_dir: Union[str, Path],
num_speakers: int = 2,
sample_rate: int = 8000,
batch_size: int = 6,
num_workers: int = 4,
librimix_task: Optional[str] = None,
librimix_tr_split: Optional[str] = None,
) -> Tuple[DataLoader]:
"""Get dataloaders for training, validation, and testing.
Args:
dataset_type (str): the dataset to use.
root_dir (str or Path): the root directory of the dataset.
num_speakers (int, optional): the number of speakers in the mixture. (Default: 2)
sample_rate (int, optional): the sample rate of the audio. (Default: 8000)
batch_size (int, optional): the batch size of the dataset. (Default: 6)
num_workers (int, optional): the number of workers for each dataloader. (Default: 4)
librimix_task (str or None, optional): the task in LibriMix dataset.
librimix_tr_split (str or None, optional): the training split in LibriMix dataset.
Returns:
tuple: (train_loader, valid_loader, eval_loader)
"""
train_dataset, valid_dataset, eval_dataset = dataset_utils.get_dataset(
dataset_type, root_dir, num_speakers, sample_rate, librimix_task, librimix_tr_split
)
train_collate_fn = dataset_utils.get_collate_fn(
dataset_type, mode='train', sample_rate=sample_rate, duration=3
)
test_collate_fn = dataset_utils.get_collate_fn(dataset_type, mode='test', sample_rate=sample_rate)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=train_collate_fn,
num_workers=num_workers,
drop_last=True,
)
valid_loader = DataLoader(
valid_dataset,
batch_size=batch_size,
collate_fn=test_collate_fn,
num_workers=num_workers,
drop_last=True,
)
eval_loader = DataLoader(
eval_dataset,
batch_size=batch_size,
collate_fn=test_collate_fn,
num_workers=num_workers,
)
return train_loader, valid_loader, eval_loader
def cli_main():
parser = ArgumentParser()
parser.add_argument("--batch-size", default=6, type=int)
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0-mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir",
default=Path("./exp"),
type=Path,
help="The directory to save checkpoints and logs."
)
parser.add_argument(
"--epochs",
metavar="NUM_EPOCHS",
default=200,
type=int,
help="The number of epochs to train. (default: 200)",
)
parser.add_argument(
"--learning-rate",
default=1e-3,
type=float,
help="Initial learning rate. (default: 1e-3)",
)
parser.add_argument(
"--num-gpu",
default=1,
type=int,
help="The number of GPUs for training. (default: 1)",
)
parser.add_argument(
"--num-node",
default=1,
type=int,
help="The number of nodes in the cluster for training. (default: 1)",
)
parser.add_argument(
"--num-workers",
default=4,
type=int,
help="The number of workers for dataloader. (default: 4)",
)
args = parser.parse_args()
model = _get_model(num_sources=args.num_speakers)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="min", factor=0.5, patience=5
)
train_loader, valid_loader, eval_loader = _get_dataloader(
args.dataset,
args.root_dir,
args.num_speakers,
args.sample_rate,
args.batch_size,
args.num_workers,
args.librimix_task,
args.librimix_tr_split,
)
loss = si_sdr_loss
metric_dict = {
"sdri": sdri_metric,
"sisdri": sisdri_metric,
}
model = ConvTasNetModule(
model=model,
train_loader=train_loader,
val_loader=valid_loader,
loss=loss,
optim=optimizer,
metrics=metric_dict,
lr_scheduler=lr_scheduler,
)
checkpoint_dir = args.exp_dir / "checkpoints"
checkpoint = ModelCheckpoint(
checkpoint_dir,
monitor="Losses/val_loss",
mode="min",
save_top_k=5,
save_weights_only=True,
verbose=True
)
callbacks = [
checkpoint,
EarlyStopping(monitor="Losses/val_loss", mode="min", patience=30, verbose=True),
]
trainer = Trainer(
default_root_dir=args.exp_dir,
max_epochs=args.epochs,
gpus=args.num_gpu,
num_nodes=args.num_node,
accelerator="ddp",
plugins=DDPPlugin(find_unused_parameters=False), # make sure there is no unused params
limit_train_batches=1.0, # Useful for fast experiment
gradient_clip_val=5.0,
callbacks=callbacks,
)
trainer.fit(model)
model.load_from_checkpoint(checkpoint.best_model_path)
state_dict = torch.load(checkpoint.best_model_path, map_location="cpu")
state_dict = {k.replace("model.", ""): v for k, v in state_dict["state_dict"].items()}
torch.save(state_dict, args.exp_dir / "best_model.pth")
trainer.test(model, eval_loader)
if __name__ == "__main__":
cli_main()
|
from argparse import ArgumentParser
from pathlib import Path
from lightning_train import _get_model, _get_dataloader, sisdri_metric
import mir_eval
import torch
def _eval(model, data_loader, device):
results = torch.zeros(4)
with torch.no_grad():
for _, batch in enumerate(data_loader):
mix, src, mask = batch
mix, src, mask = mix.to(device), src.to(device), mask.to(device)
est = model(mix)
sisdri = sisdri_metric(est, src, mix, mask)
src = src.cpu().detach().numpy()
est = est.cpu().detach().numpy()
mix = mix.repeat(1, src.shape[1], 1).cpu().detach().numpy()
sdr, sir, sar, _ = mir_eval.separation.bss_eval_sources(src[0], est[0])
sdr_mix, sir_mix, sar_mix, _ = mir_eval.separation.bss_eval_sources(src[0], mix[0])
results += torch.tensor([
sdr.mean() - sdr_mix.mean(),
sisdri,
sir.mean() - sir_mix.mean(),
sar.mean() - sar_mix.mean()
])
results /= len(data_loader)
print("SDR improvement: ", results[0].item())
print("Si-SDR improvement: ", results[1].item())
print("SIR improvement: ", results[2].item())
print("SAR improvement: ", results[3].item())
def cli_main():
parser = ArgumentParser()
parser.add_argument("--dataset", default="librimix", type=str, choices=["wsj0-mix", "librimix"])
parser.add_argument(
"--root-dir",
type=Path,
help="The path to the directory where the directory ``Libri2Mix`` or ``Libri3Mix`` is stored.",
)
parser.add_argument(
"--librimix-tr-split",
default="train-360",
choices=["train-360", "train-100"],
help="The training partition of librimix dataset. (default: ``train-360``)",
)
parser.add_argument(
"--librimix-task",
default="sep_clean",
type=str,
choices=["sep_clean", "sep_noisy", "enh_single", "enh_both"],
help="The task to perform (separation or enhancement, noisy or clean). (default: ``sep_clean``)",
)
parser.add_argument(
"--num-speakers", default=2, type=int, help="The number of speakers in the mixture. (default: 2)"
)
parser.add_argument(
"--sample-rate",
default=8000,
type=int,
help="Sample rate of audio files in the given dataset. (default: 8000)",
)
parser.add_argument(
"--exp-dir",
default=Path("./exp"),
type=Path,
help="The directory to save checkpoints and logs."
)
parser.add_argument(
"--gpu-device",
default=-1,
type=int,
help="The gpu device for model inference. (default: -1)"
)
args = parser.parse_args()
model = _get_model(num_sources=2)
state_dict = torch.load(args.exp_dir / 'best_model.pth')
model.load_state_dict(state_dict)
if args.gpu_device != -1:
device = torch.device('cuda:' + str(args.gpu_device))
else:
device = torch.device('cpu')
model = model.to(device)
_, _, eval_loader = _get_dataloader(
args.dataset,
args.data_dir,
args.num_speakers,
args.sample_rate,
1, # batch size is set to 1 to avoid masking
0, # set num_workers to 0
args.librimix_task,
args.librimix_tr_split,
)
_eval(model, eval_loader, device)
if __name__ == "__main__":
cli_main()
|
import math
from typing import Optional
from itertools import permutations
import torch
def sdr(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Computes source-to-distortion ratio.
1. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
2. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L34-L56
"""
reference_pow = reference.pow(2).mean(axis=2, keepdim=True)
mix_pow = (estimate * reference).mean(axis=2, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
if mask is None:
reference_pow = reference_pow.mean(axis=2)
error_pow = error_pow.mean(axis=2)
else:
denom = mask.sum(axis=2)
reference_pow = (mask * reference_pow).sum(axis=2) / denom
error_pow = (mask * error_pow).sum(axis=2) / denom
return 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
class PIT(torch.nn.Module):
"""Applies utterance-level speaker permutation
Computes the maxium possible value of the given utility function
over the permutations of the speakers.
Args:
utility_func (function):
Function that computes the utility (opposite of loss) with signature of
(extimate: torch.Tensor, reference: torch.Tensor) -> torch.Tensor
where input Tensors are shape of [batch, speakers, frame] and
the output Tensor is shape of [batch, speakers].
References:
- Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of
Deep Recurrent Neural Networks
Morten Kolbæk, Dong Yu, Zheng-Hua Tan and Jesper Jensen
https://arxiv.org/abs/1703.06284
"""
def __init__(self, utility_func):
super().__init__()
self.utility_func = utility_func
def forward(
self,
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8
) -> torch.Tensor:
"""Compute utterance-level PIT Loss
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [bacth, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Maximum criterion over the speaker permutation.
Shape: [batch, ]
"""
assert estimate.shape == reference.shape
batch_size, num_speakers = reference.shape[:2]
num_permute = math.factorial(num_speakers)
util_mat = torch.zeros(
batch_size, num_permute, dtype=estimate.dtype, device=estimate.device
)
for i, idx in enumerate(permutations(range(num_speakers))):
util = self.utility_func(estimate, reference[:, idx, :], mask=mask, epsilon=epsilon)
util_mat[:, i] = util.mean(dim=1) # take the average over speaker dimension
return util_mat.max(dim=1).values
_sdr_pit = PIT(utility_func=sdr)
def sdr_pit(
estimate: torch.Tensor,
reference: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8):
"""Computes scale-invariant source-to-distortion ratio.
1. adjust both estimate and reference to have 0-mean
2. scale the reference signal with power(s_est * s_ref) / powr(s_ref * s_ref)
3. compute SNR between adjusted estimate and reference.
Args:
estimate (torch.Tensor): Estimtaed signal.
Shape: [batch, speakers (can be 1), time frame]
reference (torch.Tensor): Reference signal.
Shape: [batch, speakers, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: scale-invariant source-to-distortion ratio.
Shape: [batch, speaker]
References:
- Single-channel multi-speaker separation using deep clustering
Y. Isik, J. Le Roux, Z. Chen, S. Watanabe, and J. R. Hershey,
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
Notes:
This function is tested to produce the exact same result as the reference implementation,
*when the inputs have 0-mean*
https://github.com/naplab/Conv-TasNet/blob/e66d82a8f956a69749ec8a4ae382217faa097c5c/utility/sdr.py#L107-L153
"""
return _sdr_pit(estimate, reference, mask, epsilon)
def sdri(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: Optional[torch.Tensor] = None,
epsilon: float = 1e-8,
) -> torch.Tensor:
"""Compute the improvement of SDR (SDRi).
This function compute how much SDR is improved if the estimation is changed from
the original mixture signal to the actual estimated source signals. That is,
``SDR(estimate, reference) - SDR(mix, reference)``.
For computing ``SDR(estimate, reference)``, PIT (permutation invariant training) is applied,
so that best combination of sources between the reference signals and the esimate signals
are picked.
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor or None, optional): Binary mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
epsilon (float, optional): constant value used to stabilize division.
Returns:
torch.Tensor: Improved SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
sdr_ = sdr_pit(estimate, reference, mask=mask, epsilon=epsilon) # [batch, ]
base_sdr = sdr(mix, reference, mask=mask, epsilon=epsilon) # [batch, speaker]
return sdr_ - base_sdr.mean(dim=1)
|
from . import (
dataset,
dist_utils,
metrics,
)
__all__ = ['dataset', 'dist_utils', 'metrics']
|
import os
import csv
import types
import logging
import torch
import torch.distributed as dist
def _info_on_master(self, *args, **kwargs):
if dist.get_rank() == 0:
self.info(*args, **kwargs)
def getLogger(name):
"""Get logging.Logger module with additional ``info_on_master`` method."""
logger = logging.getLogger(name)
logger.info_on_master = types.MethodType(_info_on_master, logger)
return logger
_LG = getLogger(__name__)
def setup_distributed(
world_size, rank, local_rank, backend="nccl", init_method="env://"
):
"""Perform env setup and initialization for distributed training"""
if init_method == "env://":
_set_env_vars(world_size, rank, local_rank)
if world_size > 1 and "OMP_NUM_THREADS" not in os.environ:
_LG.info("Setting OMP_NUM_THREADS == 1")
os.environ["OMP_NUM_THREADS"] = "1"
params = {
"backend": backend,
"init_method": init_method,
"world_size": world_size,
"rank": rank,
}
_LG.info("Initializing distributed process group with %s", params)
dist.init_process_group(**params)
_LG.info("Initialized distributed process group.")
def _set_env_vars(world_size, rank, local_rank):
for key, default in [("MASTER_ADDR", "127.0.0.1"), ("MASTER_PORT", "29500")]:
if key not in os.environ:
os.environ[key] = default
os.environ["WORLD_SIZE"] = str(world_size)
os.environ["RANK"] = str(rank)
os.environ["LOCAL_RANK"] = str(local_rank)
def save_on_master(path, obj):
if dist.get_rank() == 0:
_LG.info("Saving %s", path)
torch.save(obj, path)
def write_csv_on_master(path, *rows):
if dist.get_rank() == 0:
with open(path, "a", newline="") as fileobj:
writer = csv.writer(fileobj)
for row in rows:
writer.writerow(row)
def synchronize_params(path, device, *modules):
if dist.get_world_size() < 2:
return
rank = dist.get_rank()
if rank == 0:
_LG.info("[Parameter Sync]: Saving parameters to a temp file...")
torch.save({f"{i}": m.state_dict() for i, m in enumerate(modules)}, path)
dist.barrier()
if rank != 0:
_LG.info("[Parameter Sync]: Loading parameters...")
data = torch.load(path, map_location=device)
for i, m in enumerate(modules):
m.load_state_dict(data[f"{i}"])
dist.barrier()
if rank == 0:
_LG.info("[Parameter Sync]: Removing the temp file...")
os.remove(path)
_LG.info_on_master("[Parameter Sync]: Complete.")
|
from . import utils, wsj0mix
__all__ = ['utils', 'wsj0mix']
|
from typing import List
from functools import partial
from collections import namedtuple
from torchaudio.datasets import LibriMix
import torch
from . import wsj0mix
Batch = namedtuple("Batch", ["mix", "src", "mask"])
def get_dataset(dataset_type, root_dir, num_speakers, sample_rate, task=None, librimix_tr_split=None):
if dataset_type == "wsj0mix":
train = wsj0mix.WSJ0Mix(root_dir / "tr", num_speakers, sample_rate)
validation = wsj0mix.WSJ0Mix(root_dir / "cv", num_speakers, sample_rate)
evaluation = wsj0mix.WSJ0Mix(root_dir / "tt", num_speakers, sample_rate)
elif dataset_type == "librimix":
train = LibriMix(root_dir, librimix_tr_split, num_speakers, sample_rate, task)
validation = LibriMix(root_dir, "dev", num_speakers, sample_rate, task)
evaluation = LibriMix(root_dir, "test", num_speakers, sample_rate, task)
else:
raise ValueError(f"Unexpected dataset: {dataset_type}")
return train, validation, evaluation
def _fix_num_frames(sample: wsj0mix.SampleType, target_num_frames: int, sample_rate: int, random_start=False):
"""Ensure waveform has exact number of frames by slicing or padding"""
mix = sample[1] # [1, time]
src = torch.cat(sample[2], 0) # [num_sources, time]
num_channels, num_frames = src.shape
num_seconds = torch.div(num_frames, sample_rate, rounding_mode='floor')
target_seconds = torch.div(target_num_frames, sample_rate, rounding_mode='floor')
if num_frames >= target_num_frames:
if random_start and num_frames > target_num_frames:
start_frame = torch.randint(num_seconds - target_seconds + 1, [1]) * sample_rate
mix = mix[:, start_frame:]
src = src[:, start_frame:]
mix = mix[:, :target_num_frames]
src = src[:, :target_num_frames]
mask = torch.ones_like(mix)
else:
num_padding = target_num_frames - num_frames
pad = torch.zeros([1, num_padding], dtype=mix.dtype, device=mix.device)
mix = torch.cat([mix, pad], 1)
src = torch.cat([src, pad.expand(num_channels, -1)], 1)
mask = torch.ones_like(mix)
mask[..., num_frames:] = 0
return mix, src, mask
def collate_fn_wsj0mix_train(samples: List[wsj0mix.SampleType], sample_rate, duration):
target_num_frames = int(duration * sample_rate)
mixes, srcs, masks = [], [], []
for sample in samples:
mix, src, mask = _fix_num_frames(sample, target_num_frames, sample_rate, random_start=True)
mixes.append(mix)
srcs.append(src)
masks.append(mask)
return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0))
def collate_fn_wsj0mix_test(samples: List[wsj0mix.SampleType], sample_rate):
max_num_frames = max(s[1].shape[-1] for s in samples)
mixes, srcs, masks = [], [], []
for sample in samples:
mix, src, mask = _fix_num_frames(sample, max_num_frames, sample_rate, random_start=False)
mixes.append(mix)
srcs.append(src)
masks.append(mask)
return Batch(torch.stack(mixes, 0), torch.stack(srcs, 0), torch.stack(masks, 0))
def get_collate_fn(dataset_type, mode, sample_rate=None, duration=4):
assert mode in ["train", "test"]
if dataset_type in ["wsj0mix", "librimix"]:
if mode == 'train':
if sample_rate is None:
raise ValueError("sample_rate is not given.")
return partial(collate_fn_wsj0mix_train, sample_rate=sample_rate, duration=duration)
return partial(collate_fn_wsj0mix_test, sample_rate=sample_rate)
raise ValueError(f"Unexpected dataset: {dataset_type}")
|
from pathlib import Path
from typing import Union, Tuple, List
import torch
from torch.utils.data import Dataset
import torchaudio
SampleType = Tuple[int, torch.Tensor, List[torch.Tensor]]
class WSJ0Mix(Dataset):
"""Create a Dataset for wsj0-mix.
Args:
root (str or Path): Path to the directory where the dataset is found.
num_speakers (int): The number of speakers, which determines the directories
to traverse. The Dataset will traverse ``s1`` to ``sN`` directories to collect
N source audios.
sample_rate (int): Expected sample rate of audio files. If any of the audio has a
different sample rate, raises ``ValueError``.
audio_ext (str, optional): The extension of audio files to find. (default: ".wav")
"""
def __init__(
self,
root: Union[str, Path],
num_speakers: int,
sample_rate: int,
audio_ext: str = ".wav",
):
self.root = Path(root)
self.sample_rate = sample_rate
self.mix_dir = (self.root / "mix").resolve()
self.src_dirs = [(self.root / f"s{i+1}").resolve() for i in range(num_speakers)]
self.files = [p.name for p in self.mix_dir.glob(f"*{audio_ext}")]
self.files.sort()
def _load_audio(self, path) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(path)
if sample_rate != self.sample_rate:
raise ValueError(
f"The dataset contains audio file of sample rate {sample_rate}, "
f"but the requested sample rate is {self.sample_rate}."
)
return waveform
def _load_sample(self, filename) -> SampleType:
mixed = self._load_audio(str(self.mix_dir / filename))
srcs = []
for i, dir_ in enumerate(self.src_dirs):
src = self._load_audio(str(dir_ / filename))
if mixed.shape != src.shape:
raise ValueError(
f"Different waveform shapes. mixed: {mixed.shape}, src[{i}]: {src.shape}"
)
srcs.append(src)
return self.sample_rate, mixed, srcs
def __len__(self) -> int:
return len(self.files)
def __getitem__(self, key: int) -> SampleType:
"""Load the n-th sample from the dataset.
Args:
key (int): The index of the sample to be loaded
Returns:
tuple: ``(sample_rate, mix_waveform, list_of_source_waveforms)``
"""
return self._load_sample(self.files[key])
|
from . import (
train,
trainer
)
__all__ = ['train', 'trainer']
|
#!/usr/bin/env python3
"""Train Conv-TasNet"""
import time
import pathlib
import argparse
import torch
import torchaudio
import torchaudio.models
import conv_tasnet
from utils import dist_utils
from utils.dataset import utils as dataset_utils
_LG = dist_utils.getLogger(__name__)
def _parse_args(args):
parser = argparse.ArgumentParser(description=__doc__,)
parser.add_argument(
"--debug",
action="store_true",
help="Enable debug behavior. Each epoch will end with just one batch.")
group = parser.add_argument_group("Model Options")
group.add_argument(
"--num-speakers", required=True, type=int, help="The number of speakers."
)
group = parser.add_argument_group("Dataset Options")
group.add_argument(
"--sample-rate",
required=True,
type=int,
help="Sample rate of audio files in the given dataset.",
)
group.add_argument(
"--dataset",
default="wsj0mix",
choices=["wsj0mix"],
help='Dataset type. (default: "wsj0mix")',
)
group.add_argument(
"--dataset-dir",
required=True,
type=pathlib.Path,
help=(
"Directory where dataset is found. "
'If the dataset type is "wsj9mix", then this is the directory where '
'"cv", "tt" and "tr" subdirectories are found.'
),
)
group = parser.add_argument_group("Save Options")
group.add_argument(
"--save-dir",
required=True,
type=pathlib.Path,
help=(
"Directory where the checkpoints and logs are saved. "
"Though, only the worker 0 saves checkpoint data, "
"all the worker processes must have access to the directory."
),
)
group = parser.add_argument_group("Dataloader Options")
group.add_argument(
"--batch-size",
type=int,
help="Batch size. (default: 16 // world_size)",
)
group = parser.add_argument_group("Training Options")
group.add_argument(
"--epochs",
metavar="NUM_EPOCHS",
default=100,
type=int,
help="The number of epochs to train. (default: 100)",
)
group.add_argument(
"--learning-rate",
default=1e-3,
type=float,
help="Initial learning rate. (default: 1e-3)",
)
group.add_argument(
"--grad-clip",
metavar="CLIP_VALUE",
default=5.0,
type=float,
help="Gradient clip value (l2 norm). (default: 5.0)",
)
group.add_argument(
"--resume",
metavar="CHECKPOINT_PATH",
help="Previous checkpoint file from which the training is resumed.",
)
args = parser.parse_args(args)
# Delaing the default value initialization until parse_args is done because
# if `--help` is given, distributed training is not enabled.
if args.batch_size is None:
args.batch_size = 16 // torch.distributed.get_world_size()
return args
def _get_model(
num_sources,
enc_kernel_size=16,
enc_num_feats=512,
msk_kernel_size=3,
msk_num_feats=128,
msk_num_hidden_feats=512,
msk_num_layers=8,
msk_num_stacks=3,
):
model = torchaudio.models.ConvTasNet(
num_sources=num_sources,
enc_kernel_size=enc_kernel_size,
enc_num_feats=enc_num_feats,
msk_kernel_size=msk_kernel_size,
msk_num_feats=msk_num_feats,
msk_num_hidden_feats=msk_num_hidden_feats,
msk_num_layers=msk_num_layers,
msk_num_stacks=msk_num_stacks,
)
_LG.info_on_master("Model Configuration:")
_LG.info_on_master(" - N: %d", enc_num_feats)
_LG.info_on_master(" - L: %d", enc_kernel_size)
_LG.info_on_master(" - B: %d", msk_num_feats)
_LG.info_on_master(" - H: %d", msk_num_hidden_feats)
_LG.info_on_master(" - Sc: %d", msk_num_feats)
_LG.info_on_master(" - P: %d", msk_kernel_size)
_LG.info_on_master(" - X: %d", msk_num_layers)
_LG.info_on_master(" - R: %d", msk_num_stacks)
_LG.info_on_master(
" - Receptive Field: %s [samples]", model.mask_generator.receptive_field,
)
return model
def _get_dataloader(dataset_type, dataset_dir, num_speakers, sample_rate, batch_size, task=None):
train_dataset, valid_dataset, eval_dataset = dataset_utils.get_dataset(
dataset_type, dataset_dir, num_speakers, sample_rate, task
)
train_collate_fn = dataset_utils.get_collate_fn(
dataset_type, mode='train', sample_rate=sample_rate, duration=4
)
test_collate_fn = dataset_utils.get_collate_fn(dataset_type, mode='test')
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(train_dataset),
collate_fn=train_collate_fn,
pin_memory=True,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(valid_dataset),
collate_fn=test_collate_fn,
pin_memory=True,
)
eval_loader = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size,
sampler=torch.utils.data.distributed.DistributedSampler(eval_dataset),
collate_fn=test_collate_fn,
pin_memory=True,
)
return train_loader, valid_loader, eval_loader
def _write_header(log_path, args):
rows = [
[f"# torch: {torch.__version__}", ],
[f"# torchaudio: {torchaudio.__version__}", ]
]
rows.append(["# arguments"])
for key, item in vars(args).items():
rows.append([f"# {key}: {item}"])
dist_utils.write_csv_on_master(log_path, *rows)
def train(args):
args = _parse_args(args)
_LG.info("%s", args)
args.save_dir.mkdir(parents=True, exist_ok=True)
if "sox_io" in torchaudio.list_audio_backends():
torchaudio.set_audio_backend("sox_io")
start_epoch = 1
if args.resume:
checkpoint = torch.load(args.resume)
if args.sample_rate != checkpoint["sample_rate"]:
raise ValueError(
"The provided sample rate ({args.sample_rate}) does not match "
"the sample rate from the check point ({checkpoint['sample_rate']})."
)
if args.num_speakers != checkpoint["num_speakers"]:
raise ValueError(
"The provided #of speakers ({args.num_speakers}) does not match "
"the #of speakers from the check point ({checkpoint['num_speakers']}.)"
)
start_epoch = checkpoint["epoch"]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
_LG.info("Using: %s", device)
model = _get_model(num_sources=args.num_speakers)
model.to(device)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[device] if torch.cuda.is_available() else None
)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
if args.resume:
_LG.info("Loading parameters from the checkpoint...")
model.module.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
dist_utils.synchronize_params(
str(args.save_dir / "tmp.pt"), device, model, optimizer
)
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer, mode="max", factor=0.5, patience=3
)
train_loader, valid_loader, eval_loader = _get_dataloader(
args.dataset,
args.dataset_dir,
args.num_speakers,
args.sample_rate,
args.batch_size,
)
num_train_samples = len(train_loader.dataset)
num_valid_samples = len(valid_loader.dataset)
num_eval_samples = len(eval_loader.dataset)
_LG.info_on_master("Datasets:")
_LG.info_on_master(" - Train: %s", num_train_samples)
_LG.info_on_master(" - Valid: %s", num_valid_samples)
_LG.info_on_master(" - Eval: %s", num_eval_samples)
trainer = conv_tasnet.trainer.Trainer(
model,
optimizer,
train_loader,
valid_loader,
eval_loader,
args.grad_clip,
device,
debug=args.debug,
)
log_path = args.save_dir / "log.csv"
_write_header(log_path, args)
dist_utils.write_csv_on_master(
log_path,
[
"epoch",
"learning_rate",
"valid_si_snri",
"valid_sdri",
"eval_si_snri",
"eval_sdri",
],
)
_LG.info_on_master("Running %s epochs", args.epochs)
for epoch in range(start_epoch, start_epoch + args.epochs):
_LG.info_on_master("=" * 70)
_LG.info_on_master("Epoch: %s", epoch)
_LG.info_on_master("Learning rate: %s", optimizer.param_groups[0]["lr"])
_LG.info_on_master("=" * 70)
t0 = time.monotonic()
trainer.train_one_epoch()
train_sps = num_train_samples / (time.monotonic() - t0)
_LG.info_on_master("-" * 70)
t0 = time.monotonic()
valid_metric = trainer.validate()
valid_sps = num_valid_samples / (time.monotonic() - t0)
_LG.info_on_master("Valid: %s", valid_metric)
_LG.info_on_master("-" * 70)
t0 = time.monotonic()
eval_metric = trainer.evaluate()
eval_sps = num_eval_samples / (time.monotonic() - t0)
_LG.info_on_master(" Eval: %s", eval_metric)
_LG.info_on_master("-" * 70)
_LG.info_on_master("Train: Speed: %6.2f [samples/sec]", train_sps)
_LG.info_on_master("Valid: Speed: %6.2f [samples/sec]", valid_sps)
_LG.info_on_master(" Eval: Speed: %6.2f [samples/sec]", eval_sps)
_LG.info_on_master("-" * 70)
dist_utils.write_csv_on_master(
log_path,
[
epoch,
optimizer.param_groups[0]["lr"],
valid_metric.si_snri,
valid_metric.sdri,
eval_metric.si_snri,
eval_metric.sdri,
],
)
lr_scheduler.step(valid_metric.si_snri)
save_path = args.save_dir / f"epoch_{epoch}.pt"
dist_utils.save_on_master(
save_path,
{
"model": model.module.state_dict(),
"optimizer": optimizer.state_dict(),
"num_speakers": args.num_speakers,
"sample_rate": args.sample_rate,
"epoch": epoch,
},
)
|
import time
from typing import Tuple
from collections import namedtuple
import torch
import torch.distributed as dist
from utils import dist_utils, metrics
_LG = dist_utils.getLogger(__name__)
Metric = namedtuple("SNR", ["si_snri", "sdri"])
Metric.__str__ = (
lambda self: f"SI-SNRi: {self.si_snri:10.3e}, SDRi: {self.sdri:10.3e}"
)
def si_sdr_improvement(
estimate: torch.Tensor,
reference: torch.Tensor,
mix: torch.Tensor,
mask: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Compute the improvement of scale-invariant SDR. (SI-SNRi) and bare SDR (SDRi).
Args:
estimate (torch.Tensor): Estimated source signals.
Shape: [batch, speakers, time frame]
reference (torch.Tensor): Reference (original) source signals.
Shape: [batch, speakers, time frame]
mix (torch.Tensor): Mixed souce signals, from which the setimated signals were generated.
Shape: [batch, speakers == 1, time frame]
mask (torch.Tensor): Mask to indicate padded value (0) or valid value (1).
Shape: [batch, 1, time frame]
Returns:
torch.Tensor: Improved SI-SDR. Shape: [batch, ]
torch.Tensor: Absolute SI-SDR. Shape: [batch, ]
References:
- Conv-TasNet: Surpassing Ideal Time--Frequency Magnitude Masking for Speech Separation
Luo, Yi and Mesgarani, Nima
https://arxiv.org/abs/1809.07454
"""
with torch.no_grad():
sdri = metrics.sdri(estimate, reference, mix, mask=mask)
estimate = estimate - estimate.mean(axis=2, keepdim=True)
reference = reference - reference.mean(axis=2, keepdim=True)
mix = mix - mix.mean(axis=2, keepdim=True)
si_sdri = metrics.sdri(estimate, reference, mix, mask=mask)
return si_sdri, sdri
class OccasionalLogger:
"""Simple helper class to log once in a while or when progress is quick enough"""
def __init__(self, time_interval=180, progress_interval=0.1):
self.time_interval = time_interval
self.progress_interval = progress_interval
self.last_time = 0.0
self.last_progress = 0.0
def log(self, metric, progress, force=False):
now = time.monotonic()
if (
force
or now > self.last_time + self.time_interval
or progress > self.last_progress + self.progress_interval
):
self.last_time = now
self.last_progress = progress
_LG.info_on_master("train: %s [%3d%%]", metric, 100 * progress)
class Trainer:
def __init__(
self,
model,
optimizer,
train_loader,
valid_loader,
eval_loader,
grad_clip,
device,
*,
debug,
):
self.model = model
self.optimizer = optimizer
self.train_loader = train_loader
self.valid_loader = valid_loader
self.eval_loader = eval_loader
self.grad_clip = grad_clip
self.device = device
self.debug = debug
def train_one_epoch(self):
self.model.train()
logger = OccasionalLogger()
num_batches = len(self.train_loader)
for i, batch in enumerate(self.train_loader, start=1):
mix = batch.mix.to(self.device)
src = batch.src.to(self.device)
mask = batch.mask.to(self.device)
estimate = self.model(mix)
si_snri, sdri = si_sdr_improvement(estimate, src, mix, mask)
si_snri = si_snri.mean()
sdri = sdri.mean()
loss = -si_snri
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(
self.model.parameters(), self.grad_clip, norm_type=2.0
)
self.optimizer.step()
metric = Metric(si_snri.item(), sdri.item())
logger.log(metric, progress=i / num_batches, force=i == num_batches)
if self.debug:
break
def evaluate(self):
with torch.no_grad():
return self._test(self.eval_loader)
def validate(self):
with torch.no_grad():
return self._test(self.valid_loader)
def _test(self, loader):
self.model.eval()
total_si_snri = torch.zeros(1, dtype=torch.float32, device=self.device)
total_sdri = torch.zeros(1, dtype=torch.float32, device=self.device)
for batch in loader:
mix = batch.mix.to(self.device)
src = batch.src.to(self.device)
mask = batch.mask.to(self.device)
estimate = self.model(mix)
si_snri, sdri = si_sdr_improvement(estimate, src, mix, mask)
total_si_snri += si_snri.sum()
total_sdri += sdri.sum()
if self.debug:
break
dist.all_reduce(total_si_snri, dist.ReduceOp.SUM)
dist.all_reduce(total_sdri, dist.ReduceOp.SUM)
num_samples = len(loader.dataset)
metric = Metric(total_si_snri.item() / num_samples, total_sdri.item() / num_samples)
return metric
|
import torch
class Normalize(torch.nn.Module):
def forward(self, tensor):
return (tensor - tensor.mean(-1, keepdim=True)) / tensor.std(-1, keepdim=True)
class UnsqueezeFirst(torch.nn.Module):
def forward(self, tensor):
return tensor.unsqueeze(0)
|
import torch
from torchaudio.datasets import LIBRISPEECH
class MapMemoryCache(torch.utils.data.Dataset):
"""
Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms, encode):
self.dataset = dataset
self.transforms = transforms
self.encode = encode
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
transformed = item[0]
target = item[2].lower()
transformed = self.transforms(transformed)
transformed = transformed[0, ...].transpose(0, -1)
target = self.encode(target)
target = torch.tensor(target, dtype=torch.long, device=transformed.device)
return transformed, target
def split_process_librispeech(
datasets, transforms, language_model, root, folder_in_archive,
):
def create(tags, cache=True):
if isinstance(tags, str):
tags = [tags]
if isinstance(transforms, list):
transform_list = transforms
else:
transform_list = [transforms]
data = torch.utils.data.ConcatDataset(
[
Processed(
LIBRISPEECH(
root, tag, folder_in_archive=folder_in_archive, download=False,
),
transform,
language_model.encode,
)
for tag, transform in zip(tags, transform_list)
]
)
data = MapMemoryCache(data)
return data
# For performance, we cache all datasets
return tuple(create(dataset) for dataset in datasets)
def collate_factory(model_length_function, transforms=None):
if transforms is None:
transforms = torch.nn.Sequential()
def collate_fn(batch):
tensors = [transforms(b[0]) for b in batch if b]
tensors_lengths = torch.tensor(
[model_length_function(t) for t in tensors],
dtype=torch.long,
device=tensors[0].device,
)
tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)
tensors = tensors.transpose(1, -1)
targets = [b[1] for b in batch if b]
target_lengths = torch.tensor(
[target.shape[0] for target in targets],
dtype=torch.long,
device=tensors.device,
)
targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)
return tensors, targets, tensors_lengths, target_lengths
return collate_fn
|
import json
import logging
import os
import shutil
from collections import defaultdict
import torch
class MetricLogger(defaultdict):
def __init__(self, name, print_freq=1, disable=False):
super().__init__(lambda: 0.0)
self.disable = disable
self.print_freq = print_freq
self._iter = 0
self["name"] = name
def __str__(self):
return json.dumps(self)
def __call__(self):
self._iter = (self._iter + 1) % self.print_freq
if not self.disable and not self._iter:
print(self, flush=True)
def save_checkpoint(state, is_best, filename, disable):
"""
Save the model to a temporary file first,
then copy it to filename, in case the signal interrupts
the torch.save() process.
"""
if disable:
return
if filename == "":
return
tempfile = filename + ".temp"
# Remove tempfile in case interuption during the copying from tempfile to filename
if os.path.isfile(tempfile):
os.remove(tempfile)
torch.save(state, tempfile)
if os.path.isfile(tempfile):
os.rename(tempfile, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
logging.warning("Checkpoint: saved")
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
from torch import topk
class GreedyDecoder:
def __call__(self, outputs):
"""Greedy Decoder. Returns highest probability of class labels for each timestep
Args:
outputs (torch.Tensor): shape (input length, batch size, number of classes (including blank))
Returns:
torch.Tensor: class labels per time step.
"""
_, indices = topk(outputs, k=1, dim=-1)
return indices[..., 0]
|
import collections
import itertools
class LanguageModel:
def __init__(self, labels, char_blank, char_space):
self.char_space = char_space
self.char_blank = char_blank
labels = list(labels)
self.length = len(labels)
enumerated = list(enumerate(labels))
flipped = [(sub[1], sub[0]) for sub in enumerated]
d1 = collections.OrderedDict(enumerated)
d2 = collections.OrderedDict(flipped)
self.mapping = {**d1, **d2}
def encode(self, iterable):
if isinstance(iterable, list):
return [self.encode(i) for i in iterable]
else:
return [self.mapping[i] + self.mapping[self.char_blank] for i in iterable]
def decode(self, tensor):
if len(tensor) > 0 and isinstance(tensor[0], list):
return [self.decode(t) for t in tensor]
else:
# not idempotent, since clean string
x = (self.mapping[i] for i in tensor)
x = "".join(i for i, _ in itertools.groupby(x))
x = x.replace(self.char_blank, "")
# x = x.strip()
return x
def __len__(self):
return self.length
|
import argparse
import logging
import os
import string
from datetime import datetime
from time import time
import torch
import torchaudio
from torch.optim import SGD, Adadelta, Adam, AdamW
from torch.optim.lr_scheduler import ExponentialLR, ReduceLROnPlateau
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.functional import edit_distance
from torchaudio.models.wav2letter import Wav2Letter
from ctc_decoders import GreedyDecoder
from datasets import collate_factory, split_process_librispeech
from languagemodels import LanguageModel
from transforms import Normalize, UnsqueezeFirst
from utils import MetricLogger, count_parameters, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--type",
metavar="T",
default="mfcc",
choices=["waveform", "mfcc"],
help="input type for model",
)
parser.add_argument(
"--freq-mask",
default=0,
type=int,
metavar="N",
help="maximal width of frequency mask",
)
parser.add_argument(
"--win-length",
default=400,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--hop-length",
default=160,
type=int,
metavar="N",
help="width of spectrogram window",
)
parser.add_argument(
"--time-mask",
default=0,
type=int,
metavar="N",
help="maximal width of time mask",
)
parser.add_argument(
"--workers",
default=0,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint",
)
parser.add_argument(
"--epochs",
default=200,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="manual epoch number"
)
parser.add_argument(
"--reduce-lr-valid",
action="store_true",
help="reduce learning rate based on validation loss",
)
parser.add_argument(
"--normalize", action="store_true", help="normalize model input"
)
parser.add_argument(
"--progress-bar", action="store_true", help="use progress bar while training"
)
parser.add_argument(
"--decoder",
metavar="D",
default="greedy",
choices=["greedy"],
help="decoder to use",
)
parser.add_argument(
"--batch-size", default=128, type=int, metavar="N", help="mini-batch size"
)
parser.add_argument(
"--n-bins",
default=13,
type=int,
metavar="N",
help="number of bins in transforms",
)
parser.add_argument(
"--optimizer",
metavar="OPT",
default="adadelta",
choices=["sgd", "adadelta", "adam", "adamw"],
help="optimizer to use",
)
parser.add_argument(
"--scheduler",
metavar="S",
default="reduceonplateau",
choices=["exponential", "reduceonplateau"],
help="optimizer to use",
)
parser.add_argument(
"--learning-rate",
default=0.6,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"--gamma",
default=0.99,
type=float,
metavar="GAMMA",
help="learning rate exponential decay constant",
)
parser.add_argument(
"--momentum", default=0.8, type=float, metavar="M", help="momentum"
)
parser.add_argument(
"--weight-decay", default=1e-5, type=float, metavar="W", help="weight decay"
)
parser.add_argument("--eps", metavar="EPS", type=float, default=1e-8)
parser.add_argument("--rho", metavar="RHO", type=float, default=0.95)
parser.add_argument("--clip-grad", metavar="NORM", type=float, default=0.0)
parser.add_argument(
"--dataset-root",
type=str,
help="specify dataset root folder",
)
parser.add_argument(
"--dataset-folder-in-archive",
type=str,
help="specify dataset folder in archive",
)
parser.add_argument(
"--dataset-train",
default=["train-clean-100"],
nargs="+",
type=str,
help="select which part of librispeech to train with",
)
parser.add_argument(
"--dataset-valid",
default=["dev-clean"],
nargs="+",
type=str,
help="select which part of librispeech to validate with",
)
parser.add_argument(
"--distributed", action="store_true", help="enable DistributedDataParallel"
)
parser.add_argument("--seed", type=int, default=0, help="random seed")
parser.add_argument(
"--world-size", type=int, default=8, help="the world size to initiate DPP"
)
parser.add_argument("--jit", action="store_true", help="if used, model is jitted")
args = parser.parse_args()
logging.info(args)
return args
def setup_distributed(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
torch.distributed.init_process_group("nccl", rank=rank, world_size=world_size)
def model_length_function(tensor):
if tensor.shape[1] == 1:
# waveform mode
return int(tensor.shape[0]) // 160 // 2 + 1
return int(tensor.shape[0]) // 2 + 1
def compute_error_rates(outputs, targets, decoder, language_model, metric):
output = outputs.transpose(0, 1).to("cpu")
output = decoder(output)
# Compute CER
output = language_model.decode(output.tolist())
target = language_model.decode(targets.tolist())
print_length = 20
for i in range(2):
# Print a few examples
output_print = output[i].ljust(print_length)[:print_length]
target_print = target[i].ljust(print_length)[:print_length]
logging.info("Target: %s Output: %s", target_print, output_print)
cers = [edit_distance(t, o) for t, o in zip(target, output)]
cers = sum(cers)
n = sum(len(t) for t in target)
metric["batch char error"] = cers
metric["batch char total"] = n
metric["batch char error rate"] = cers / n
metric["epoch char error"] += cers
metric["epoch char total"] += n
metric["epoch char error rate"] = metric["epoch char error"] / metric["epoch char total"]
# Compute WER
output = [o.split(language_model.char_space) for o in output]
target = [t.split(language_model.char_space) for t in target]
wers = [edit_distance(t, o) for t, o in zip(target, output)]
wers = sum(wers)
n = sum(len(t) for t in target)
metric["batch word error"] = wers
metric["batch word total"] = n
metric["batch word error rate"] = wers / n
metric["epoch word error"] += wers
metric["epoch word total"] += n
metric["epoch word error rate"] = metric["epoch word error"] / metric["epoch word total"]
def train_one_epoch(
model,
criterion,
optimizer,
scheduler,
data_loader,
decoder,
language_model,
device,
epoch,
clip_grad,
disable_logger=False,
reduce_lr_on_plateau=False,
):
model.train()
metric = MetricLogger("train", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
start = time()
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
loss = criterion(outputs, targets, tensors_lengths, target_lengths)
optimizer.zero_grad()
loss.backward()
if clip_grad > 0:
metric["gradient"] = torch.nn.utils.clip_grad_norm_(
model.parameters(), clip_grad
)
optimizer.step()
compute_error_rates(outputs, targets, decoder, language_model, metric)
try:
metric["lr"] = scheduler.get_last_lr()[0]
except AttributeError:
metric["lr"] = optimizer.param_groups[0]["lr"]
metric["batch size"] = len(inputs)
metric["n_channel"] = inputs.shape[1]
metric["n_time"] = inputs.shape[-1]
metric["dataset length"] += metric["batch size"]
metric["iteration"] += 1
metric["loss"] = loss.item()
metric["cumulative loss"] += metric["loss"]
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["iteration time"] = time() - start
metric["epoch time"] += metric["iteration time"]
metric()
if reduce_lr_on_plateau and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(metric["average loss"])
elif not isinstance(scheduler, ReduceLROnPlateau):
scheduler.step()
def evaluate(
model,
criterion,
data_loader,
decoder,
language_model,
device,
epoch,
disable_logger=False,
):
with torch.no_grad():
model.eval()
start = time()
metric = MetricLogger("validation", disable=disable_logger)
metric["epoch"] = epoch
for inputs, targets, tensors_lengths, target_lengths in bg_iterator(
data_loader, maxsize=2
):
inputs = inputs.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
# keep batch first for data parallel
outputs = model(inputs).transpose(-1, -2).transpose(0, 1)
# CTC
# outputs: input length, batch size, number of classes (including blank)
# targets: batch size, max target length
# input_lengths: batch size
# target_lengths: batch size
metric["cumulative loss"] += criterion(
outputs, targets, tensors_lengths, target_lengths
).item()
metric["dataset length"] += len(inputs)
metric["iteration"] += 1
compute_error_rates(outputs, targets, decoder, language_model, metric)
metric["average loss"] = metric["cumulative loss"] / metric["iteration"]
metric["validation time"] = time() - start
metric()
return metric["average loss"]
def main(rank, args):
# Distributed setup
if args.distributed:
setup_distributed(rank, args.world_size)
not_main_rank = args.distributed and rank != 0
logging.info("Start time: %s", datetime.now())
# Explicitly set seed to make sure models created in separate processes
# start from same random weights and biases
torch.manual_seed(args.seed)
# Empty CUDA cache
torch.cuda.empty_cache()
# Change backend for flac files
torchaudio.set_audio_backend("soundfile")
# Transforms
melkwargs = {
"n_fft": args.win_length,
"n_mels": args.n_bins,
"hop_length": args.hop_length,
}
sample_rate_original = 16000
if args.type == "mfcc":
transforms = torch.nn.Sequential(
torchaudio.transforms.MFCC(
sample_rate=sample_rate_original,
n_mfcc=args.n_bins,
melkwargs=melkwargs,
),
)
num_features = args.n_bins
elif args.type == "waveform":
transforms = torch.nn.Sequential(UnsqueezeFirst())
num_features = 1
else:
raise ValueError("Model type not supported")
if args.normalize:
transforms = torch.nn.Sequential(transforms, Normalize())
augmentations = torch.nn.Sequential()
if args.freq_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.FrequencyMasking(freq_mask_param=args.freq_mask),
)
if args.time_mask:
augmentations = torch.nn.Sequential(
augmentations,
torchaudio.transforms.TimeMasking(time_mask_param=args.time_mask),
)
# Text preprocessing
char_blank = "*"
char_space = " "
char_apostrophe = "'"
labels = char_blank + char_space + char_apostrophe + string.ascii_lowercase
language_model = LanguageModel(labels, char_blank, char_space)
# Dataset
training, validation = split_process_librispeech(
[args.dataset_train, args.dataset_valid],
[transforms, transforms],
language_model,
root=args.dataset_root,
folder_in_archive=args.dataset_folder_in_archive,
)
# Decoder
if args.decoder == "greedy":
decoder = GreedyDecoder()
else:
raise ValueError("Selected decoder not supported")
# Model
model = Wav2Letter(
num_classes=language_model.length,
input_type=args.type,
num_features=num_features,
)
if args.jit:
model = torch.jit.script(model)
if args.distributed:
n = torch.cuda.device_count() // args.world_size
devices = list(range(rank * n, (rank + 1) * n))
model = model.to(devices[0])
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=devices)
else:
devices = ["cuda" if torch.cuda.is_available() else "cpu"]
model = model.to(devices[0], non_blocking=True)
model = torch.nn.DataParallel(model)
n = count_parameters(model)
logging.info("Number of parameters: %s", n)
# Optimizer
if args.optimizer == "adadelta":
optimizer = Adadelta(
model.parameters(),
lr=args.learning_rate,
weight_decay=args.weight_decay,
eps=args.eps,
rho=args.rho,
)
elif args.optimizer == "sgd":
optimizer = SGD(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adam":
optimizer = Adam(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
elif args.optimizer == "adamw":
optimizer = AdamW(
model.parameters(),
lr=args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
else:
raise ValueError("Selected optimizer not supported")
if args.scheduler == "exponential":
scheduler = ExponentialLR(optimizer, gamma=args.gamma)
elif args.scheduler == "reduceonplateau":
scheduler = ReduceLROnPlateau(optimizer, patience=10, threshold=1e-3)
else:
raise ValueError("Selected scheduler not supported")
criterion = torch.nn.CTCLoss(
blank=language_model.mapping[char_blank], zero_infinity=False
)
# Data Loader
collate_fn_train = collate_factory(model_length_function, augmentations)
collate_fn_valid = collate_factory(model_length_function)
loader_training_params = {
"num_workers": args.workers,
"pin_memory": True,
"shuffle": True,
"drop_last": True,
}
loader_validation_params = loader_training_params.copy()
loader_validation_params["shuffle"] = False
loader_training = DataLoader(
training,
batch_size=args.batch_size,
collate_fn=collate_fn_train,
**loader_training_params,
)
loader_validation = DataLoader(
validation,
batch_size=args.batch_size,
collate_fn=collate_fn_valid,
**loader_validation_params,
)
# Setup checkpoint
best_loss = 1.0
load_checkpoint = args.checkpoint and os.path.isfile(args.checkpoint)
if args.distributed:
torch.distributed.barrier()
if load_checkpoint:
logging.info("Checkpoint: loading %s", args.checkpoint)
checkpoint = torch.load(args.checkpoint)
args.start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
logging.info(
"Checkpoint: loaded '%s' at epoch %s", args.checkpoint, checkpoint["epoch"]
)
else:
logging.info("Checkpoint: not found")
save_checkpoint(
{
"epoch": args.start_epoch,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
False,
args.checkpoint,
not_main_rank,
)
if args.distributed:
torch.distributed.barrier()
torch.autograd.set_detect_anomaly(False)
for epoch in range(args.start_epoch, args.epochs):
logging.info("Epoch: %s", epoch)
train_one_epoch(
model,
criterion,
optimizer,
scheduler,
loader_training,
decoder,
language_model,
devices[0],
epoch,
args.clip_grad,
not_main_rank,
not args.reduce_lr_valid,
)
loss = evaluate(
model,
criterion,
loader_validation,
decoder,
language_model,
devices[0],
epoch,
not_main_rank,
)
if args.reduce_lr_valid and isinstance(scheduler, ReduceLROnPlateau):
scheduler.step(loss)
is_best = loss < best_loss
best_loss = min(loss, best_loss)
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_best,
args.checkpoint,
not_main_rank,
)
logging.info("End time: %s", datetime.now())
if args.distributed:
torch.distributed.destroy_process_group()
def spawn_main(main, args):
if args.distributed:
torch.multiprocessing.spawn(
main, args=(args,), nprocs=args.world_size, join=True
)
else:
main(0, args)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parse_args()
spawn_main(main, args)
|
# -*- coding: utf-8 -*-
"""
Audio Resampling
================
Here, we will walk through resampling audio waveforms using ``torchaudio``.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import math
import time
import librosa
import matplotlib.pyplot as plt
from IPython.display import Audio, display
import pandas as pd
DEFAULT_OFFSET = 201
SWEEP_MAX_SAMPLE_RATE = 48000
DEFAULT_LOWPASS_FILTER_WIDTH = 6
DEFAULT_ROLLOFF = 0.99
DEFAULT_RESAMPLING_METHOD = "sinc_interpolation"
def _get_log_freq(sample_rate, max_sweep_rate, offset):
"""Get freqs evenly spaced out in log-scale, between [0, max_sweep_rate // 2]
offset is used to avoid negative infinity `log(offset + x)`.
"""
start, stop = math.log(offset), math.log(offset + max_sweep_rate // 2)
return (
torch.exp(torch.linspace(start, stop, sample_rate, dtype=torch.double)) - offset
)
def _get_inverse_log_freq(freq, sample_rate, offset):
"""Find the time where the given frequency is given by _get_log_freq"""
half = sample_rate // 2
return sample_rate * (math.log(1 + freq / offset) / math.log(1 + half / offset))
def _get_freq_ticks(sample_rate, offset, f_max):
# Given the original sample rate used for generating the sweep,
# find the x-axis value where the log-scale major frequency values fall in
time, freq = [], []
for exp in range(2, 5):
for v in range(1, 10):
f = v * 10 ** exp
if f < sample_rate // 2:
t = _get_inverse_log_freq(f, sample_rate, offset) / sample_rate
time.append(t)
freq.append(f)
t_max = _get_inverse_log_freq(f_max, sample_rate, offset) / sample_rate
time.append(t_max)
freq.append(f_max)
return time, freq
def get_sine_sweep(sample_rate, offset=DEFAULT_OFFSET):
max_sweep_rate = sample_rate
freq = _get_log_freq(sample_rate, max_sweep_rate, offset)
delta = 2 * math.pi * freq / sample_rate
cummulative = torch.cumsum(delta, dim=0)
signal = torch.sin(cummulative).unsqueeze(dim=0)
return signal
def plot_sweep(
waveform,
sample_rate,
title,
max_sweep_rate=SWEEP_MAX_SAMPLE_RATE,
offset=DEFAULT_OFFSET,
):
x_ticks = [100, 500, 1000, 5000, 10000, 20000, max_sweep_rate // 2]
y_ticks = [1000, 5000, 10000, 20000, sample_rate // 2]
time, freq = _get_freq_ticks(max_sweep_rate, offset, sample_rate // 2)
freq_x = [f if f in x_ticks and f <= max_sweep_rate // 2 else None for f in freq]
freq_y = [f for f in freq if f >= 1000 and f in y_ticks and f <= sample_rate // 2]
figure, axis = plt.subplots(1, 1)
axis.specgram(waveform[0].numpy(), Fs=sample_rate)
plt.xticks(time, freq_x)
plt.yticks(freq_y, freq_y)
axis.set_xlabel("Original Signal Frequency (Hz, log scale)")
axis.set_ylabel("Waveform Frequency (Hz)")
axis.xaxis.grid(True, alpha=0.67)
axis.yaxis.grid(True, alpha=0.67)
figure.suptitle(f"{title} (sample rate: {sample_rate} Hz)")
plt.show(block=True)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def benchmark_resample(
method,
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=DEFAULT_LOWPASS_FILTER_WIDTH,
rolloff=DEFAULT_ROLLOFF,
resampling_method=DEFAULT_RESAMPLING_METHOD,
beta=None,
librosa_type=None,
iters=5,
):
if method == "functional":
begin = time.time()
for _ in range(iters):
F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff,
resampling_method=resampling_method,
)
elapsed = time.time() - begin
return elapsed / iters
elif method == "transforms":
resampler = T.Resample(
sample_rate,
resample_rate,
lowpass_filter_width=lowpass_filter_width,
rolloff=rolloff,
resampling_method=resampling_method,
dtype=waveform.dtype,
)
begin = time.time()
for _ in range(iters):
resampler(waveform)
elapsed = time.time() - begin
return elapsed / iters
elif method == "librosa":
waveform_np = waveform.squeeze().numpy()
begin = time.time()
for _ in range(iters):
librosa.resample(
waveform_np, sample_rate, resample_rate, res_type=librosa_type
)
elapsed = time.time() - begin
return elapsed / iters
######################################################################
# To resample an audio waveform from one freqeuncy to another, you can use
# ``transforms.Resample`` or ``functional.resample``.
# ``transforms.Resample`` precomputes and caches the kernel used for
# resampling, while ``functional.resample`` computes it on the fly, so
# using ``transforms.Resample`` will result in a speedup when resampling
# multiple waveforms using the same parameters (see Benchmarking section).
#
# Both resampling methods use `bandlimited sinc
# interpolation <https://ccrma.stanford.edu/~jos/resample/>`__ to compute
# signal values at arbitrary time steps. The implementation involves
# convolution, so we can take advantage of GPU / multithreading for
# performance improvements. When using resampling in multiple
# subprocesses, such as data loading with multiple worker processes, your
# application might create more threads than your system can handle
# efficiently. Setting ``torch.set_num_threads(1)`` might help in this
# case.
#
# Because a finite number of samples can only represent a finite number of
# frequencies, resampling does not produce perfect results, and a variety
# of parameters can be used to control for its quality and computational
# speed. We demonstrate these properties through resampling a logarithmic
# sine sweep, which is a sine wave that increases exponentially in
# frequency over time.
#
# The spectrograms below show the frequency representation of the signal,
# where the x-axis corresponds to the frequency of the original
# waveform (in log scale), y-axis the frequency of the
# plotted waveform, and color intensity the amplitude.
#
sample_rate = 48000
resample_rate = 32000
waveform = get_sine_sweep(sample_rate)
plot_sweep(waveform, sample_rate, title="Original Waveform")
play_audio(waveform, sample_rate)
resampler = T.Resample(sample_rate, resample_rate, dtype=waveform.dtype)
resampled_waveform = resampler(waveform)
plot_sweep(resampled_waveform, resample_rate, title="Resampled Waveform")
play_audio(waveform, sample_rate)
######################################################################
# Controling resampling quality with parameters
# ---------------------------------------------
#
# Lowpass filter width
# ~~~~~~~~~~~~~~~~~~~~
#
# Because the filter used for interpolation extends infinitely, the
# ``lowpass_filter_width`` parameter is used to control for the width of
# the filter to use to window the interpolation. It is also referred to as
# the number of zero crossings, since the interpolation passes through
# zero at every time unit. Using a larger ``lowpass_filter_width``
# provides a sharper, more precise filter, but is more computationally
# expensive.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, lowpass_filter_width=6
)
plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=6")
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, lowpass_filter_width=128
)
plot_sweep(resampled_waveform, resample_rate, title="lowpass_filter_width=128")
######################################################################
# Rolloff
# ~~~~~~~
#
# The ``rolloff`` parameter is represented as a fraction of the Nyquist
# frequency, which is the maximal frequency representable by a given
# finite sample rate. ``rolloff`` determines the lowpass filter cutoff and
# controls the degree of aliasing, which takes place when frequencies
# higher than the Nyquist are mapped to lower frequencies. A lower rolloff
# will therefore reduce the amount of aliasing, but it will also reduce
# some of the higher frequencies.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.99)
plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.99")
resampled_waveform = F.resample(waveform, sample_rate, resample_rate, rolloff=0.8)
plot_sweep(resampled_waveform, resample_rate, title="rolloff=0.8")
######################################################################
# Window function
# ~~~~~~~~~~~~~~~
#
# By default, ``torchaudio``’s resample uses the Hann window filter, which is
# a weighted cosine function. It additionally supports the Kaiser window,
# which is a near optimal window function that contains an additional
# ``beta`` parameter that allows for the design of the smoothness of the
# filter and width of impulse. This can be controlled using the
# ``resampling_method`` parameter.
#
sample_rate = 48000
resample_rate = 32000
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, resampling_method="sinc_interpolation"
)
plot_sweep(resampled_waveform, resample_rate, title="Hann Window Default")
resampled_waveform = F.resample(
waveform, sample_rate, resample_rate, resampling_method="kaiser_window"
)
plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Default")
######################################################################
# Comparison against librosa
# --------------------------
#
# ``torchaudio``’s resample function can be used to produce results similar to
# that of librosa (resampy)’s kaiser window resampling, with some noise
#
sample_rate = 48000
resample_rate = 32000
# kaiser_best
resampled_waveform = F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
plot_sweep(resampled_waveform, resample_rate, title="Kaiser Window Best (torchaudio)")
librosa_resampled_waveform = torch.from_numpy(
librosa.resample(
waveform.squeeze().numpy(), sample_rate, resample_rate, res_type="kaiser_best"
)
).unsqueeze(0)
plot_sweep(
librosa_resampled_waveform, resample_rate, title="Kaiser Window Best (librosa)"
)
mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item()
print("torchaudio and librosa kaiser best MSE:", mse)
# kaiser_fast
resampled_waveform = F.resample(
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
plot_specgram(
resampled_waveform, resample_rate, title="Kaiser Window Fast (torchaudio)"
)
librosa_resampled_waveform = torch.from_numpy(
librosa.resample(
waveform.squeeze().numpy(), sample_rate, resample_rate, res_type="kaiser_fast"
)
).unsqueeze(0)
plot_sweep(
librosa_resampled_waveform, resample_rate, title="Kaiser Window Fast (librosa)"
)
mse = torch.square(resampled_waveform - librosa_resampled_waveform).mean().item()
print("torchaudio and librosa kaiser fast MSE:", mse)
######################################################################
# Performance Benchmarking
# ------------------------
#
# Below are benchmarks for downsampling and upsampling waveforms between
# two pairs of sampling rates. We demonstrate the performance implications
# that the ``lowpass_filter_wdith``, window type, and sample rates can
# have. Additionally, we provide a comparison against ``librosa``\ ’s
# ``kaiser_best`` and ``kaiser_fast`` using their corresponding parameters
# in ``torchaudio``.
#
# To elaborate on the results:
#
# - a larger ``lowpass_filter_width`` results in a larger resampling kernel,
# and therefore increases computation time for both the kernel computation
# and convolution
# - using ``kaiser_window`` results in longer computation times than the default
# ``sinc_interpolation`` because it is more complex to compute the intermediate
# window values - a large GCD between the sample and resample rate will result
# in a simplification that allows for a smaller kernel and faster kernel computation.
#
configs = {
"downsample (48 -> 44.1 kHz)": [48000, 44100],
"downsample (16 -> 8 kHz)": [16000, 8000],
"upsample (44.1 -> 48 kHz)": [44100, 48000],
"upsample (8 -> 16 kHz)": [8000, 16000],
}
for label in configs:
times, rows = [], []
sample_rate = configs[label][0]
resample_rate = configs[label][1]
waveform = get_sine_sweep(sample_rate)
# sinc 64 zero-crossings
f_time = benchmark_resample(
"functional", waveform, sample_rate, resample_rate, lowpass_filter_width=64
)
t_time = benchmark_resample(
"transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=64
)
times.append([None, 1000 * f_time, 1000 * t_time])
rows.append("sinc (width 64)")
# sinc 6 zero-crossings
f_time = benchmark_resample(
"functional", waveform, sample_rate, resample_rate, lowpass_filter_width=16
)
t_time = benchmark_resample(
"transforms", waveform, sample_rate, resample_rate, lowpass_filter_width=16
)
times.append([None, 1000 * f_time, 1000 * t_time])
rows.append("sinc (width 16)")
# kaiser best
lib_time = benchmark_resample(
"librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_best"
)
f_time = benchmark_resample(
"functional",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
t_time = benchmark_resample(
"transforms",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=64,
rolloff=0.9475937167399596,
resampling_method="kaiser_window",
beta=14.769656459379492,
)
times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time])
rows.append("kaiser_best")
# kaiser fast
lib_time = benchmark_resample(
"librosa", waveform, sample_rate, resample_rate, librosa_type="kaiser_fast"
)
f_time = benchmark_resample(
"functional",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
t_time = benchmark_resample(
"transforms",
waveform,
sample_rate,
resample_rate,
lowpass_filter_width=16,
rolloff=0.85,
resampling_method="kaiser_window",
beta=8.555504641634386,
)
times.append([1000 * lib_time, 1000 * f_time, 1000 * t_time])
rows.append("kaiser_fast")
df = pd.DataFrame(
times, columns=["librosa", "functional", "transforms"], index=rows
)
df.columns = pd.MultiIndex.from_product([[f"{label} time (ms)"], df.columns])
display(df.round(2))
|
"""
Speech Recognition with Wav2Vec2
================================
**Author**: `Moto Hira <[email protected]>`__
This tutorial shows how to perform speech recognition using using
pre-trained models from wav2vec 2.0
[`paper <https://arxiv.org/abs/2006.11477>`__].
"""
######################################################################
# Overview
# --------
#
# The process of speech recognition looks like the following.
#
# 1. Extract the acoustic features from audio waveform
#
# 2. Estimate the class of the acoustic features frame-by-frame
#
# 3. Generate hypothesis from the sequence of the class probabilities
#
# Torchaudio provides easy access to the pre-trained weights and
# associated information, such as the expected sample rate and class
# labels. They are bundled together and available under
# ``torchaudio.pipelines`` module.
#
######################################################################
# Preparation
# -----------
#
# First we import the necessary packages, and fetch data that we work on.
#
# %matplotlib inline
import os
import torch
import torchaudio
import requests
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SPEECH_FILE = "_assets/speech.wav"
if not os.path.exists(SPEECH_FILE):
os.makedirs("_assets", exist_ok=True)
with open(SPEECH_FILE, "wb") as file:
file.write(requests.get(SPEECH_URL).content)
######################################################################
# Creating a pipeline
# -------------------
#
# First, we will create a Wav2Vec2 model that performs the feature
# extraction and the classification.
#
# There are two types of Wav2Vec2 pre-trained weights available in
# torchaudio. The ones fine-tuned for ASR task, and the ones not
# fine-tuned.
#
# Wav2Vec2 (and HuBERT) models are trained in self-supervised manner. They
# are firstly trained with audio only for representation learning, then
# fine-tuned for a specific task with additional labels.
#
# The pre-trained weights without fine-tuning can be fine-tuned
# for other downstream tasks as well, but this tutorial does not
# cover that.
#
# We will use :py:func:`torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H` here.
#
# There are multiple models available as
# :py:mod:`torchaudio.pipelines`. Please check the documentation for
# the detail of how they are trained.
#
# The bundle object provides the interface to instantiate model and other
# information. Sampling rate and the class labels are found as follow.
#
bundle = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H
print("Sample Rate:", bundle.sample_rate)
print("Labels:", bundle.get_labels())
######################################################################
# Model can be constructed as following. This process will automatically
# fetch the pre-trained weights and load it into the model.
#
model = bundle.get_model().to(device)
print(model.__class__)
######################################################################
# Loading data
# ------------
#
# We will use the speech data from `VOiCES
# dataset <https://iqtlabs.github.io/voices/>`__, which is licensed under
# Creative Commos BY 4.0.
#
IPython.display.Audio(SPEECH_FILE)
######################################################################
# To load data, we use :py:func:`torchaudio.load`.
#
# If the sampling rate is different from what the pipeline expects, then
# we can use :py:func:`torchaudio.functional.resample` for resampling.
#
# .. note::
#
# - :py:func:`torchaudio.functional.resample` works on CUDA tensors as well.
# - When performing resampling multiple times on the same set of sample rates,
# using :py:func:`torchaudio.transforms.Resample` might improve the performace.
#
waveform, sample_rate = torchaudio.load(SPEECH_FILE)
waveform = waveform.to(device)
if sample_rate != bundle.sample_rate:
waveform = torchaudio.functional.resample(waveform, sample_rate, bundle.sample_rate)
######################################################################
# Extracting acoustic features
# ----------------------------
#
# The next step is to extract acoustic features from the audio.
#
# .. note::
# Wav2Vec2 models fine-tuned for ASR task can perform feature
# extraction and classification with one step, but for the sake of the
# tutorial, we also show how to perform feature extraction here.
#
with torch.inference_mode():
features, _ = model.extract_features(waveform)
######################################################################
# The returned features is a list of tensors. Each tensor is the output of
# a transformer layer.
#
fig, ax = plt.subplots(len(features), 1, figsize=(16, 4.3 * len(features)))
for i, feats in enumerate(features):
ax[i].imshow(feats[0].cpu())
ax[i].set_title(f"Feature from transformer layer {i+1}")
ax[i].set_xlabel("Feature dimension")
ax[i].set_ylabel("Frame (time-axis)")
plt.tight_layout()
plt.show()
######################################################################
# Feature classification
# ----------------------
#
# Once the acoustic features are extracted, the next step is to classify
# them into a set of categories.
#
# Wav2Vec2 model provides method to perform the feature extraction and
# classification in one step.
#
with torch.inference_mode():
emission, _ = model(waveform)
######################################################################
# The output is in the form of logits. It is not in the form of
# probability.
#
# Let’s visualize this.
#
plt.imshow(emission[0].cpu().T)
plt.title("Classification result")
plt.xlabel("Frame (time-axis)")
plt.ylabel("Class")
plt.show()
print("Class labels:", bundle.get_labels())
######################################################################
# We can see that there are strong indications to certain labels across
# the time line.
#
######################################################################
# Generating transcripts
# ----------------------
#
# From the sequence of label probabilities, now we want to generate
# transcripts. The process to generate hypotheses is often called
# “decoding”.
#
# Decoding is more elaborate than simple classification because
# decoding at certain time step can be affected by surrounding
# observations.
#
# For example, take a word like ``night`` and ``knight``. Even if their
# prior probability distribution are differnt (in typical conversations,
# ``night`` would occur way more often than ``knight``), to accurately
# generate transcripts with ``knight``, such as ``a knight with a sword``,
# the decoding process has to postpone the final decision until it sees
# enough context.
#
# There are many decoding techniques proposed, and they require external
# resources, such as word dictionary and language models.
#
# In this tutorial, for the sake of simplicity, we will perform greedy
# decoding which does not depend on such external components, and simply
# pick up the best hypothesis at each time step. Therefore, the context
# information are not used, and only one transcript can be generated.
#
# We start by defining greedy decoding algorithm.
#
class GreedyCTCDecoder(torch.nn.Module):
def __init__(self, labels, blank=0):
super().__init__()
self.labels = labels
self.blank = blank
def forward(self, emission: torch.Tensor) -> str:
"""Given a sequence emission over labels, get the best path string
Args:
emission (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
indices = torch.argmax(emission, dim=-1) # [num_seq,]
indices = torch.unique_consecutive(indices, dim=-1)
indices = [i for i in indices if i != self.blank]
return "".join([self.labels[i] for i in indices])
######################################################################
# Now create the decoder object and decode the transcript.
#
decoder = GreedyCTCDecoder(labels=bundle.get_labels())
transcript = decoder(emission[0])
######################################################################
# Let’s check the result and listen again to the audio.
#
print(transcript)
IPython.display.Audio(SPEECH_FILE)
######################################################################
# The ASR model is fine-tuned using a loss function called Connectionist Temporal Classification (CTC).
# The detail of CTC loss is explained
# `here <https://distill.pub/2017/ctc/>`__. In CTC a blank token (ϵ) is a
# special token which represents a repetition of the previous symbol. In
# decoding, these are simply ignored.
#
######################################################################
# Conclusion
# ----------
#
# In this tutorial, we looked at how to use :py:mod:`torchaudio.pipelines` to
# perform acoustic feature extraction and speech recognition. Constructing
# a model and getting the emission is as short as two lines.
#
# ::
#
# model = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H.get_model()
# emission = model(waveforms, ...)
#
|
# -*- coding: utf-8 -*-
"""
Audio I/O
=========
``torchaudio`` integrates ``libsox`` and provides a rich set of audio I/O.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio boto3
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
import io
import os
import requests
import tarfile
import boto3
from botocore import UNSIGNED
from botocore.config import Config
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_MP3_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.mp3"
SAMPLE_MP3_PATH = os.path.join(_SAMPLE_DIR, "steam.mp3")
SAMPLE_GSM_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.gsm"
SAMPLE_GSM_PATH = os.path.join(_SAMPLE_DIR, "steam.gsm")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_TAR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit.tar.gz"
SAMPLE_TAR_PATH = os.path.join(_SAMPLE_DIR, "sample.tar.gz")
SAMPLE_TAR_ITEM = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
S3_BUCKET = "pytorch-tutorial-assets"
S3_KEY = "VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
def _fetch_data():
os.makedirs(_SAMPLE_DIR, exist_ok=True)
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_MP3_URL, SAMPLE_MP3_PATH),
(SAMPLE_GSM_URL, SAMPLE_GSM_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_TAR_URL, SAMPLE_TAR_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def inspect_file(path):
print("-" * 10)
print("Source:", path)
print("-" * 10)
print(f" - File size: {os.path.getsize(path)} bytes")
print(f" - {torchaudio.info(path)}")
######################################################################
# Querying audio metadata
# -----------------------
#
# Function ``torchaudio.info`` fetches audio metadata. You can provide
# a path-like object or file-like object.
#
metadata = torchaudio.info(SAMPLE_WAV_PATH)
print(metadata)
######################################################################
# Where
#
# - ``sample_rate`` is the sampling rate of the audio
# - ``num_channels`` is the number of channels
# - ``num_frames`` is the number of frames per channel
# - ``bits_per_sample`` is bit depth
# - ``encoding`` is the sample coding format
#
# ``encoding`` can take on one of the following values:
#
# - ``"PCM_S"``: Signed integer linear PCM
# - ``"PCM_U"``: Unsigned integer linear PCM
# - ``"PCM_F"``: Floating point linear PCM
# - ``"FLAC"``: Flac, `Free Lossless Audio
# Codec <https://xiph.org/flac/>`__
# - ``"ULAW"``: Mu-law,
# [`wikipedia <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`__]
# - ``"ALAW"``: A-law
# [`wikipedia <https://en.wikipedia.org/wiki/A-law_algorithm>`__]
# - ``"MP3"`` : MP3, MPEG-1 Audio Layer III
# - ``"VORBIS"``: OGG Vorbis [`xiph.org <https://xiph.org/vorbis/>`__]
# - ``"AMR_NB"``: Adaptive Multi-Rate
# [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_audio_codec>`__]
# - ``"AMR_WB"``: Adaptive Multi-Rate Wideband
# [`wikipedia <https://en.wikipedia.org/wiki/Adaptive_Multi-Rate_Wideband>`__]
# - ``"OPUS"``: Opus [`opus-codec.org <https://opus-codec.org/>`__]
# - ``"GSM"``: GSM-FR
# [`wikipedia <https://en.wikipedia.org/wiki/Full_Rate>`__]
# - ``"UNKNOWN"`` None of above
#
######################################################################
# **Note**
#
# - ``bits_per_sample`` can be ``0`` for formats with compression and/or
# variable bit rate (such as MP3).
# - ``num_frames`` can be ``0`` for GSM-FR format.
#
metadata = torchaudio.info(SAMPLE_MP3_PATH)
print(metadata)
metadata = torchaudio.info(SAMPLE_GSM_PATH)
print(metadata)
######################################################################
# Querying file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ``info`` works on file-like objects.
#
print("Source:", SAMPLE_WAV_URL)
with requests.get(SAMPLE_WAV_URL, stream=True) as response:
metadata = torchaudio.info(response.raw)
print(metadata)
######################################################################
# **Note** When passing a file-like object, ``info`` does not read
# all of the underlying data; rather, it reads only a portion
# of the data from the beginning.
# Therefore, for a given audio format, it may not be able to retrieve the
# correct metadata, including the format itself.
# The following example illustrates this.
#
# - Use argument ``format`` to specify the audio format of the input.
# - The returned metadata has ``num_frames = 0``
#
print("Source:", SAMPLE_MP3_URL)
with requests.get(SAMPLE_MP3_URL, stream=True) as response:
metadata = torchaudio.info(response.raw, format="mp3")
print(f"Fetched {response.raw.tell()} bytes.")
print(metadata)
######################################################################
# Loading audio data into Tensor
# ------------------------------
#
# To load audio data, you can use ``torchaudio.load``.
#
# This function accepts a path-like object or file-like object as input.
#
# The returned value is a tuple of waveform (``Tensor``) and sample rate
# (``int``).
#
# By default, the resulting tensor object has ``dtype=torch.float32`` and
# its value range is normalized within ``[-1.0, 1.0]``.
#
# For the list of supported format, please refer to `the torchaudio
# documentation <https://pytorch.org/audio>`__.
#
waveform, sample_rate = torchaudio.load(SAMPLE_WAV_SPEECH_PATH)
print_stats(waveform, sample_rate=sample_rate)
plot_waveform(waveform, sample_rate)
plot_specgram(waveform, sample_rate)
play_audio(waveform, sample_rate)
######################################################################
# Loading from file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# ``torchaudio``\ ’s I/O functions now support file-like objects. This
# allows for fetching and decoding audio data from locations
# within and beyond the local file system.
# The following examples illustrate this.
#
# Load audio data as HTTP request
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform, sample_rate = torchaudio.load(response.raw)
plot_specgram(waveform, sample_rate, title="HTTP datasource")
# Load audio from tar file
with tarfile.open(SAMPLE_TAR_PATH, mode="r") as tarfile_:
fileobj = tarfile_.extractfile(SAMPLE_TAR_ITEM)
waveform, sample_rate = torchaudio.load(fileobj)
plot_specgram(waveform, sample_rate, title="TAR file")
# Load audio from S3
client = boto3.client("s3", config=Config(signature_version=UNSIGNED))
response = client.get_object(Bucket=S3_BUCKET, Key=S3_KEY)
waveform, sample_rate = torchaudio.load(response["Body"])
plot_specgram(waveform, sample_rate, title="From S3")
######################################################################
# Tips on slicing
# ~~~~~~~~~~~~~~~
#
# Providing ``num_frames`` and ``frame_offset`` arguments restricts
# decoding to the corresponding segment of the input.
#
# The same result can be achieved using vanilla Tensor slicing,
# (i.e. ``waveform[:, frame_offset:frame_offset+num_frames]``). However,
# providing ``num_frames`` and ``frame_offset`` arguments is more
# efficient.
#
# This is because the function will end data acquisition and decoding
# once it finishes decoding the requested frames. This is advantageous
# when the audio data are transferred via network as the data transfer will
# stop as soon as the necessary amount of data is fetched.
#
# The following example illustrates this.
#
# Illustration of two different decoding methods.
# The first one will fetch all the data and decode them, while
# the second one will stop fetching data once it completes decoding.
# The resulting waveforms are identical.
frame_offset, num_frames = 16000, 16000 # Fetch and decode the 1 - 2 seconds
print("Fetching all the data...")
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform1, sample_rate1 = torchaudio.load(response.raw)
waveform1 = waveform1[:, frame_offset: frame_offset + num_frames]
print(f" - Fetched {response.raw.tell()} bytes")
print("Fetching until the requested frames are available...")
with requests.get(SAMPLE_WAV_SPEECH_URL, stream=True) as response:
waveform2, sample_rate2 = torchaudio.load(
response.raw, frame_offset=frame_offset, num_frames=num_frames
)
print(f" - Fetched {response.raw.tell()} bytes")
print("Checking the resulting waveform ... ", end="")
assert (waveform1 == waveform2).all()
print("matched!")
######################################################################
# Saving audio to file
# --------------------
#
# To save audio data in formats interpretable by common applications,
# you can use ``torchaudio.save``.
#
# This function accepts a path-like object or file-like object.
#
# When passing a file-like object, you also need to provide argument ``format``
# so that the function knows which format it should use. In the
# case of a path-like object, the function will infer the format from
# the extension. If you are saving to a file without an extension, you need
# to provide argument ``format``.
#
# When saving WAV-formatted data, the default encoding for ``float32`` Tensor
# is 32-bit floating-point PCM. You can provide arguments ``encoding`` and
# ``bits_per_sample`` to change this behavior. For example, to save data
# in 16-bit signed integer PCM, you can do the following.
#
# **Note** Saving data in encodings with lower bit depth reduces the
# resulting file size but also precision.
#
waveform, sample_rate = get_sample()
print_stats(waveform, sample_rate=sample_rate)
# Save without any encoding option.
# The function will pick up the encoding which
# the provided data fit
path = f"{_SAMPLE_DIR}/save_example_default.wav"
torchaudio.save(path, waveform, sample_rate)
inspect_file(path)
# Save as 16-bit signed integer Linear PCM
# The resulting file occupies half the storage but loses precision
path = f"{_SAMPLE_DIR}/save_example_PCM_S16.wav"
torchaudio.save(path, waveform, sample_rate, encoding="PCM_S", bits_per_sample=16)
inspect_file(path)
######################################################################
# ``torchaudio.save`` can also handle other formats. To name a few:
#
waveform, sample_rate = get_sample(resample=8000)
formats = [
"mp3",
"flac",
"vorbis",
"sph",
"amb",
"amr-nb",
"gsm",
]
for format in formats:
path = f"{_SAMPLE_DIR}/save_example.{format}"
torchaudio.save(path, waveform, sample_rate, format=format)
inspect_file(path)
######################################################################
# Saving to file-like object
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Similar to the other I/O functions, you can save audio to file-like
# objects. When saving to a file-like object, argument ``format`` is
# required.
#
waveform, sample_rate = get_sample()
# Saving to bytes buffer
buffer_ = io.BytesIO()
torchaudio.save(buffer_, waveform, sample_rate, format="wav")
buffer_.seek(0)
print(buffer_.read(16))
|
"""
Forced Alignment with Wav2Vec2
==============================
**Author** `Moto Hira <[email protected]>`__
This tutorial shows how to align transcript to speech with
``torchaudio``, using CTC segmentation algorithm described in
`CTC-Segmentation of Large Corpora for German End-to-end Speech
Recognition <https://arxiv.org/abs/2007.09127>`__.
"""
######################################################################
# Overview
# --------
#
# The process of alignment looks like the following.
#
# 1. Estimate the frame-wise label probability from audio waveform
# 2. Generate the trellis matrix which represents the probability of
# labels aligned at time step.
# 3. Find the most likely path from the trellis matrix.
#
# In this example, we use ``torchaudio``\ ’s ``Wav2Vec2`` model for
# acoustic feature extraction.
#
######################################################################
# Preparation
# -----------
#
# First we import the necessary packages, and fetch data that we work on.
#
# %matplotlib inline
import os
from dataclasses import dataclass
import torch
import torchaudio
import requests
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
SPEECH_URL = "https://download.pytorch.org/torchaudio/tutorial-assets/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav"
SPEECH_FILE = "_assets/speech.wav"
if not os.path.exists(SPEECH_FILE):
os.makedirs("_assets", exist_ok=True)
with open(SPEECH_FILE, "wb") as file:
file.write(requests.get(SPEECH_URL).content)
######################################################################
# Generate frame-wise label probability
# -------------------------------------
#
# The first step is to generate the label class porbability of each aduio
# frame. We can use a Wav2Vec2 model that is trained for ASR. Here we use
# :py:func:`torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H`.
#
# ``torchaudio`` provides easy access to pretrained models with associated
# labels.
#
# .. note::
#
# In the subsequent sections, we will compute the probability in
# log-domain to avoid numerical instability. For this purpose, we
# normalize the ``emission`` with :py:func:`torch.log_softmax`.
#
bundle = torchaudio.pipelines.WAV2VEC2_ASR_BASE_960H
model = bundle.get_model().to(device)
labels = bundle.get_labels()
with torch.inference_mode():
waveform, _ = torchaudio.load(SPEECH_FILE)
emissions, _ = model(waveform.to(device))
emissions = torch.log_softmax(emissions, dim=-1)
emission = emissions[0].cpu().detach()
################################################################################
# Visualization
################################################################################
print(labels)
plt.imshow(emission.T)
plt.colorbar()
plt.title("Frame-wise class probability")
plt.xlabel("Time")
plt.ylabel("Labels")
plt.show()
######################################################################
# Generate alignment probability (trellis)
# ----------------------------------------
#
# From the emission matrix, next we generate the trellis which represents
# the probability of transcript labels occur at each time frame.
#
# Trellis is 2D matrix with time axis and label axis. The label axis
# represents the transcript that we are aligning. In the following, we use
# :math:`t` to denote the index in time axis and :math:`j` to denote the
# index in label axis. :math:`c_j` represents the label at label index
# :math:`j`.
#
# To generate, the probability of time step :math:`t+1`, we look at the
# trellis from time step :math:`t` and emission at time step :math:`t+1`.
# There are two path to reach to time step :math:`t+1` with label
# :math:`c_{j+1}`. The first one is the case where the label was
# :math:`c_{j+1}` at :math:`t` and there was no label change from
# :math:`t` to :math:`t+1`. The other case is where the label was
# :math:`c_j` at :math:`t` and it transitioned to the next label
# :math:`c_{j+1}` at :math:`t+1`.
#
# The follwoing diagram illustrates this transition.
#
# .. image:: https://download.pytorch.org/torchaudio/tutorial-assets/ctc-forward.png
#
# Since we are looking for the most likely transitions, we take the more
# likely path for the value of :math:`k_{(t+1, j+1)}`, that is
#
# :math:`k_{(t+1, j+1)} = max( k_{(t, j)} p(t+1, c_{j+1}), k_{(t, j+1)} p(t+1, repeat) )`
#
# where :math:`k` represents is trellis matrix, and :math:`p(t, c_j)`
# represents the probability of label :math:`c_j` at time step :math:`t`.
# :math:`repeat` represents the blank token from CTC formulation. (For the
# detail of CTC algorithm, please refer to the *Sequence Modeling with CTC*
# [`distill.pub <https://distill.pub/2017/ctc/>`__])
#
transcript = "I|HAD|THAT|CURIOSITY|BESIDE|ME|AT|THIS|MOMENT"
dictionary = {c: i for i, c in enumerate(labels)}
tokens = [dictionary[c] for c in transcript]
print(list(zip(transcript, tokens)))
def get_trellis(emission, tokens, blank_id=0):
num_frame = emission.size(0)
num_tokens = len(tokens)
# Trellis has extra diemsions for both time axis and tokens.
# The extra dim for tokens represents <SoS> (start-of-sentence)
# The extra dim for time axis is for simplification of the code.
trellis = torch.full((num_frame + 1, num_tokens + 1), -float("inf"))
trellis[:, 0] = 0
for t in range(num_frame):
trellis[t + 1, 1:] = torch.maximum(
# Score for staying at the same token
trellis[t, 1:] + emission[t, blank_id],
# Score for changing to the next token
trellis[t, :-1] + emission[t, tokens],
)
return trellis
trellis = get_trellis(emission, tokens)
################################################################################
# Visualization
################################################################################
plt.imshow(trellis[1:, 1:].T, origin="lower")
plt.annotate("- Inf", (trellis.size(1) / 5, trellis.size(1) / 1.5))
plt.colorbar()
plt.show()
######################################################################
# In the above visualization, we can see that there is a trace of high
# probability crossing the matrix diagonally.
#
######################################################################
# Find the most likely path (backtracking)
# ----------------------------------------
#
# Once the trellis is generated, we will traverse it following the
# elements with high probability.
#
# We will start from the last label index with the time step of highest
# probability, then, we traverse back in time, picking stay
# (:math:`c_j \rightarrow c_j`) or transition
# (:math:`c_j \rightarrow c_{j+1}`), based on the post-transition
# probability :math:`k_{t, j} p(t+1, c_{j+1})` or
# :math:`k_{t, j+1} p(t+1, repeat)`.
#
# Transition is done once the label reaches the beginning.
#
# The trellis matrix is used for path-finding, but for the final
# probability of each segment, we take the frame-wise probability from
# emission matrix.
#
@dataclass
class Point:
token_index: int
time_index: int
score: float
def backtrack(trellis, emission, tokens, blank_id=0):
# Note:
# j and t are indices for trellis, which has extra dimensions
# for time and tokens at the beginning.
# When referring to time frame index `T` in trellis,
# the corresponding index in emission is `T-1`.
# Similarly, when referring to token index `J` in trellis,
# the corresponding index in transcript is `J-1`.
j = trellis.size(1) - 1
t_start = torch.argmax(trellis[:, j]).item()
path = []
for t in range(t_start, 0, -1):
# 1. Figure out if the current position was stay or change
# Note (again):
# `emission[J-1]` is the emission at time frame `J` of trellis dimension.
# Score for token staying the same from time frame J-1 to T.
stayed = trellis[t - 1, j] + emission[t - 1, blank_id]
# Score for token changing from C-1 at T-1 to J at T.
changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]]
# 2. Store the path with frame-wise probability.
prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item()
# Return token index and time index in non-trellis coordinate.
path.append(Point(j - 1, t - 1, prob))
# 3. Update the token
if changed > stayed:
j -= 1
if j == 0:
break
else:
raise ValueError("Failed to align")
return path[::-1]
path = backtrack(trellis, emission, tokens)
print(path)
################################################################################
# Visualization
################################################################################
def plot_trellis_with_path(trellis, path):
# To plot trellis with path, we take advantage of 'nan' value
trellis_with_path = trellis.clone()
for _, p in enumerate(path):
trellis_with_path[p.time_index, p.token_index] = float("nan")
plt.imshow(trellis_with_path[1:, 1:].T, origin="lower")
plot_trellis_with_path(trellis, path)
plt.title("The path found by backtracking")
plt.show()
######################################################################
# Looking good. Now this path contains repetations for the same labels, so
# let’s merge them to make it close to the original transcript.
#
# When merging the multiple path points, we simply take the average
# probability for the merged segments.
#
# Merge the labels
@dataclass
class Segment:
label: str
start: int
end: int
score: float
def __repr__(self):
return f"{self.label}\t({self.score:4.2f}): [{self.start:5d}, {self.end:5d})"
@property
def length(self):
return self.end - self.start
def merge_repeats(path):
i1, i2 = 0, 0
segments = []
while i1 < len(path):
while i2 < len(path) and path[i1].token_index == path[i2].token_index:
i2 += 1
score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1)
segments.append(
Segment(
transcript[path[i1].token_index],
path[i1].time_index,
path[i2 - 1].time_index + 1,
score,
)
)
i1 = i2
return segments
segments = merge_repeats(path)
for seg in segments:
print(seg)
################################################################################
# Visualization
################################################################################
def plot_trellis_with_segments(trellis, segments, transcript):
# To plot trellis with path, we take advantage of 'nan' value
trellis_with_path = trellis.clone()
for i, seg in enumerate(segments):
if seg.label != "|":
trellis_with_path[seg.start + 1: seg.end + 1, i + 1] = float("nan")
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9.5))
ax1.set_title("Path, label and probability for each label")
ax1.imshow(trellis_with_path.T, origin="lower")
ax1.set_xticks([])
for i, seg in enumerate(segments):
if seg.label != "|":
ax1.annotate(seg.label, (seg.start + 0.7, i + 0.3), weight="bold")
ax1.annotate(f"{seg.score:.2f}", (seg.start - 0.3, i + 4.3))
ax2.set_title("Label probability with and without repetation")
xs, hs, ws = [], [], []
for seg in segments:
if seg.label != "|":
xs.append((seg.end + seg.start) / 2 + 0.4)
hs.append(seg.score)
ws.append(seg.end - seg.start)
ax2.annotate(seg.label, (seg.start + 0.8, -0.07), weight="bold")
ax2.bar(xs, hs, width=ws, color="gray", alpha=0.5, edgecolor="black")
xs, hs = [], []
for p in path:
label = transcript[p.token_index]
if label != "|":
xs.append(p.time_index + 1)
hs.append(p.score)
ax2.bar(xs, hs, width=0.5, alpha=0.5)
ax2.axhline(0, color="black")
ax2.set_xlim(ax1.get_xlim())
ax2.set_ylim(-0.1, 1.1)
plot_trellis_with_segments(trellis, segments, transcript)
plt.tight_layout()
plt.show()
######################################################################
# Looks good. Now let’s merge the words. The Wav2Vec2 model uses ``'|'``
# as the word boundary, so we merge the segments before each occurance of
# ``'|'``.
#
# Then, finally, we segment the original audio into segmented audio and
# listen to them to see if the segmentation is correct.
#
# Merge words
def merge_words(segments, separator="|"):
words = []
i1, i2 = 0, 0
while i1 < len(segments):
if i2 >= len(segments) or segments[i2].label == separator:
if i1 != i2:
segs = segments[i1:i2]
word = "".join([seg.label for seg in segs])
score = sum(seg.score * seg.length for seg in segs) / sum(
seg.length for seg in segs
)
words.append(
Segment(word, segments[i1].start, segments[i2 - 1].end, score)
)
i1 = i2 + 1
i2 = i1
else:
i2 += 1
return words
word_segments = merge_words(segments)
for word in word_segments:
print(word)
################################################################################
# Visualization
################################################################################
def plot_alignments(trellis, segments, word_segments, waveform):
trellis_with_path = trellis.clone()
for i, seg in enumerate(segments):
if seg.label != "|":
trellis_with_path[seg.start + 1: seg.end + 1, i + 1] = float("nan")
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9.5))
ax1.imshow(trellis_with_path[1:, 1:].T, origin="lower")
ax1.set_xticks([])
ax1.set_yticks([])
for word in word_segments:
ax1.axvline(word.start - 0.5)
ax1.axvline(word.end - 0.5)
for i, seg in enumerate(segments):
if seg.label != "|":
ax1.annotate(seg.label, (seg.start, i + 0.3))
ax1.annotate(f"{seg.score:.2f}", (seg.start, i + 4), fontsize=8)
# The original waveform
ratio = waveform.size(0) / (trellis.size(0) - 1)
ax2.plot(waveform)
for word in word_segments:
x0 = ratio * word.start
x1 = ratio * word.end
ax2.axvspan(x0, x1, alpha=0.1, color="red")
ax2.annotate(f"{word.score:.2f}", (x0, 0.8))
for seg in segments:
if seg.label != "|":
ax2.annotate(seg.label, (seg.start * ratio, 0.9))
xticks = ax2.get_xticks()
plt.xticks(xticks, xticks / bundle.sample_rate)
ax2.set_xlabel("time [second]")
ax2.set_yticks([])
ax2.set_ylim(-1.0, 1.0)
ax2.set_xlim(0, waveform.size(-1))
plot_alignments(
trellis,
segments,
word_segments,
waveform[0],
)
plt.show()
# A trick to embed the resulting audio to the generated file.
# `IPython.display.Audio` has to be the last call in a cell,
# and there should be only one call par cell.
def display_segment(i):
ratio = waveform.size(1) / (trellis.size(0) - 1)
word = word_segments[i]
x0 = int(ratio * word.start)
x1 = int(ratio * word.end)
filename = f"_assets/{i}_{word.label}.wav"
torchaudio.save(filename, waveform[:, x0:x1], bundle.sample_rate)
print(
f"{word.label} ({word.score:.2f}): {x0 / bundle.sample_rate:.3f} - {x1 / bundle.sample_rate:.3f} sec"
)
return IPython.display.Audio(filename)
######################################################################
#
# Generate the audio for each segment
print(transcript)
IPython.display.Audio(SPEECH_FILE)
######################################################################
#
display_segment(0)
######################################################################
#
display_segment(1)
######################################################################
#
display_segment(2)
######################################################################
#
display_segment(3)
######################################################################
#
display_segment(4)
######################################################################
#
display_segment(5)
######################################################################
#
display_segment(6)
######################################################################
#
display_segment(7)
######################################################################
#
display_segment(8)
######################################################################
# Conclusion
# ----------
#
# In this tutorial, we looked how to use torchaudio’s Wav2Vec2 model to
# perform CTC segmentation for forced alignment.
#
|
# -*- coding: utf-8 -*-
"""
Audio Feature Augmentation
==========================
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import requests
import librosa
import matplotlib.pyplot as plt
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def get_spectrogram(
n_fft=400,
win_len=None,
hop_len=None,
power=2.0,
):
waveform, _ = get_speech_sample()
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_len,
hop_length=hop_len,
center=True,
pad_mode="reflect",
power=power,
)
return spectrogram(waveform)
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
######################################################################
# SpecAugment
# -----------
#
# `SpecAugment <https://ai.googleblog.com/2019/04/specaugment-new-data-augmentation.html>`__
# is a popular spectrogram augmentation technique.
#
# ``torchaudio`` implements ``TimeStretch``, ``TimeMasking`` and
# ``FrequencyMasking``.
#
# TimeStretch
# ~~~~~~~~~~~
#
spec = get_spectrogram(power=None)
stretch = T.TimeStretch()
rate = 1.2
spec_ = stretch(spec, rate)
plot_spectrogram(
torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304
)
plot_spectrogram(torch.abs(spec[0]), title="Original", aspect="equal", xmax=304)
rate = 0.9
spec_ = stretch(spec, rate)
plot_spectrogram(
torch.abs(spec_[0]), title=f"Stretched x{rate}", aspect="equal", xmax=304
)
######################################################################
# TimeMasking
# ~~~~~~~~~~~
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.TimeMasking(time_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along time axis")
######################################################################
# FrequencyMasking
# ~~~~~~~~~~~~~~~~
#
torch.random.manual_seed(4)
spec = get_spectrogram()
plot_spectrogram(spec[0], title="Original")
masking = T.FrequencyMasking(freq_mask_param=80)
spec = masking(spec)
plot_spectrogram(spec[0], title="Masked along frequency axis")
|
"""
Text-to-Speech with Tacotron2
=============================
**Author** `Yao-Yuan Yang <https://github.com/yangarbiter>`__,
`Moto Hira <[email protected]>`__
"""
######################################################################
# Overview
# --------
#
# This tutorial shows how to build text-to-speech pipeline, using the
# pretrained Tacotron2 in torchaudio.
#
# The text-to-speech pipeline goes as follows:
#
# 1. Text preprocessing
#
# First, the input text is encoded into a list of symbols. In this
# tutorial, we will use English characters and phonemes as the symbols.
#
# 2. Spectrogram generation
#
# From the encoded text, a spectrogram is generated. We use ``Tacotron2``
# model for this.
#
# 3. Time-domain conversion
#
# The last step is converting the spectrogram into the waveform. The
# process to generate speech from spectrogram is also called Vocoder.
# In this tutorial, three different vocoders are used,
# `WaveRNN <https://pytorch.org/audio/stable/models/wavernn.html>`__,
# `Griffin-Lim <https://pytorch.org/audio/stable/transforms.html#griffinlim>`__,
# and
# `Nvidia's WaveGlow <https://pytorch.org/hub/nvidia_deeplearningexamples_tacotron2/>`__.
#
#
# The following figure illustrates the whole process.
#
# .. image:: https://download.pytorch.org/torchaudio/tutorial-assets/tacotron2_tts_pipeline.png
#
# All the related components are bundled in :py:func:`torchaudio.pipelines.Tacotron2TTSBundle`,
# but this tutorial will also cover the process under the hood.
######################################################################
# Preparation
# -----------
#
# First, we install the necessary dependencies. In addition to
# ``torchaudio``, ``DeepPhonemizer`` is required to perform phoneme-based
# encoding.
#
# When running this example in notebook, install DeepPhonemizer
# !pip3 install deep_phonemizer
import torch
import torchaudio
import matplotlib
import matplotlib.pyplot as plt
import IPython
matplotlib.rcParams["figure.figsize"] = [16.0, 4.8]
torch.random.manual_seed(0)
device = "cuda" if torch.cuda.is_available() else "cpu"
print(torch.__version__)
print(torchaudio.__version__)
print(device)
######################################################################
# Text Processing
# ---------------
#
######################################################################
# Character-based encoding
# ~~~~~~~~~~~~~~~~~~~~~~~~
#
# In this section, we will go through how the character-based encoding
# works.
#
# Since the pre-trained Tacotron2 model expects specific set of symbol
# tables, the same functionalities available in ``torchaudio``. This
# section is more for the explanation of the basis of encoding.
#
# Firstly, we define the set of symbols. For example, we can use
# ``'_-!\'(),.:;? abcdefghijklmnopqrstuvwxyz'``. Then, we will map the
# each character of the input text into the index of the corresponding
# symbol in the table.
#
# The following is an example of such processing. In the example, symbols
# that are not in the table are ignored.
#
symbols = "_-!'(),.:;? abcdefghijklmnopqrstuvwxyz"
look_up = {s: i for i, s in enumerate(symbols)}
symbols = set(symbols)
def text_to_sequence(text):
text = text.lower()
return [look_up[s] for s in text if s in symbols]
text = "Hello world! Text to speech!"
print(text_to_sequence(text))
######################################################################
# As mentioned in the above, the symbol table and indices must match
# what the pretrained Tacotron2 model expects. ``torchaudio`` provides the
# transform along with the pretrained model. For example, you can
# instantiate and use such transform as follow.
#
processor = torchaudio.pipelines.TACOTRON2_WAVERNN_CHAR_LJSPEECH.get_text_processor()
text = "Hello world! Text to speech!"
processed, lengths = processor(text)
print(processed)
print(lengths)
######################################################################
# The ``processor`` object takes either a text or list of texts as inputs.
# When a list of texts are provided, the returned ``lengths`` variable
# represents the valid length of each processed tokens in the output
# batch.
#
# The intermediate representation can be retrieved as follow.
#
print([processor.tokens[i] for i in processed[0, : lengths[0]]])
######################################################################
# Phoneme-based encoding
# ~~~~~~~~~~~~~~~~~~~~~~
#
# Phoneme-based encoding is similar to character-based encoding, but it
# uses a symbol table based on phonemes and a G2P (Grapheme-to-Phoneme)
# model.
#
# The detail of the G2P model is out of scope of this tutorial, we will
# just look at what the conversion looks like.
#
# Similar to the case of character-based encoding, the encoding process is
# expected to match what a pretrained Tacotron2 model is trained on.
# ``torchaudio`` has an interface to create the process.
#
# The following code illustrates how to make and use the process. Behind
# the scene, a G2P model is created using ``DeepPhonemizer`` package, and
# the pretrained weights published by the author of ``DeepPhonemizer`` is
# fetched.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
print(processed)
print(lengths)
######################################################################
# Notice that the encoded values are different from the example of
# character-based encoding.
#
# The intermediate representation looks like the following.
#
print([processor.tokens[i] for i in processed[0, : lengths[0]]])
######################################################################
# Spectrogram Generation
# ----------------------
#
# ``Tacotron2`` is the model we use to generate spectrogram from the
# encoded text. For the detail of the model, please refer to `the
# paper <https://arxiv.org/abs/1712.05884>`__.
#
# It is easy to instantiate a Tacotron2 model with pretrained weight,
# however, note that the input to Tacotron2 models need to be processed
# by the matching text processor.
#
# :py:func:`torchaudio.pipelines.Tacotron2TTSBundle` bundles the matching
# models and processors together so that it is easy to create the pipeline.
#
# For the available bundles, and its usage, please refer to :py:mod:`torchaudio.pipelines`.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, _, _ = tacotron2.infer(processed, lengths)
plt.imshow(spec[0].cpu().detach())
######################################################################
# Note that ``Tacotron2.infer`` method perfoms multinomial sampling,
# therefor, the process of generating the spectrogram incurs randomness.
#
fig, ax = plt.subplots(3, 1, figsize=(16, 4.3 * 3))
for i in range(3):
with torch.inference_mode():
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
print(spec[0].shape)
ax[i].imshow(spec[0].cpu().detach())
plt.show()
######################################################################
# Waveform Generation
# -------------------
#
# Once the spectrogram is generated, the last process is to recover the
# waveform from the spectrogram.
#
# ``torchaudio`` provides vocoders based on ``GriffinLim`` and
# ``WaveRNN``.
#
######################################################################
# WaveRNN
# ~~~~~~~
#
# Continuing from the previous section, we can instantiate the matching
# WaveRNN model from the same bundle.
#
bundle = torchaudio.pipelines.TACOTRON2_WAVERNN_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
vocoder = bundle.get_vocoder().to(device)
text = "Hello world! Text to speech!"
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
waveforms, lengths = vocoder(spec, spec_lengths)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save(
"_assets/output_wavernn.wav", waveforms[0:1].cpu(), sample_rate=vocoder.sample_rate
)
IPython.display.Audio("_assets/output_wavernn.wav")
######################################################################
# Griffin-Lim
# ~~~~~~~~~~~
#
# Using the Griffin-Lim vocoder is same as WaveRNN. You can instantiate
# the vocode object with ``get_vocoder`` method and pass the spectrogram.
#
bundle = torchaudio.pipelines.TACOTRON2_GRIFFINLIM_PHONE_LJSPEECH
processor = bundle.get_text_processor()
tacotron2 = bundle.get_tacotron2().to(device)
vocoder = bundle.get_vocoder().to(device)
with torch.inference_mode():
processed, lengths = processor(text)
processed = processed.to(device)
lengths = lengths.to(device)
spec, spec_lengths, _ = tacotron2.infer(processed, lengths)
waveforms, lengths = vocoder(spec, spec_lengths)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save(
"_assets/output_griffinlim.wav",
waveforms[0:1].cpu(),
sample_rate=vocoder.sample_rate,
)
IPython.display.Audio("_assets/output_griffinlim.wav")
######################################################################
# Waveglow
# ~~~~~~~~
#
# Waveglow is a vocoder published by Nvidia. The pretrained weight is
# publishe on Torch Hub. One can instantiate the model using ``torch.hub``
# module.
#
# Workaround to load model mapped on GPU
# https://stackoverflow.com/a/61840832
waveglow = torch.hub.load(
"NVIDIA/DeepLearningExamples:torchhub",
"nvidia_waveglow",
model_math="fp32",
pretrained=False,
)
checkpoint = torch.hub.load_state_dict_from_url(
"https://api.ngc.nvidia.com/v2/models/nvidia/waveglowpyt_fp32/versions/1/files/nvidia_waveglowpyt_fp32_20190306.pth", # noqa: E501
progress=False,
map_location=device,
)
state_dict = {
key.replace("module.", ""): value for key, value in checkpoint["state_dict"].items()
}
waveglow.load_state_dict(state_dict)
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow = waveglow.to(device)
waveglow.eval()
with torch.no_grad():
waveforms = waveglow.infer(spec)
fig, [ax1, ax2] = plt.subplots(2, 1, figsize=(16, 9))
ax1.imshow(spec[0].cpu().detach())
ax2.plot(waveforms[0].cpu().detach())
torchaudio.save("_assets/output_waveglow.wav", waveforms[0:1].cpu(), sample_rate=22050)
IPython.display.Audio("_assets/output_waveglow.wav")
|
# -*- coding: utf-8 -*-
"""
Audio Feature Extractions
=========================
``torchaudio`` implements feature extractions commonly used in the audio
domain. They are available in ``torchaudio.functional`` and
``torchaudio.transforms``.
``functional`` implements features as standalone functions.
They are stateless.
``transforms`` implements features as objects,
using implementations from ``functional`` and ``torch.nn.Module``. Because all
transforms are subclasses of ``torch.nn.Module``, they can be serialized
using TorchScript.
For the complete list of available features, please refer to the
documentation. In this tutorial, we will look into converting between the
time domain and frequency domain (``Spectrogram``, ``GriffinLim``,
``MelSpectrogram``).
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio librosa
import torch
import torchaudio
import torchaudio.functional as F
import torchaudio.transforms as T
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import os
import requests
import librosa
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_spectrogram(spec, title=None, ylabel="freq_bin", aspect="auto", xmax=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Spectrogram (db)")
axs.set_ylabel(ylabel)
axs.set_xlabel("frame")
im = axs.imshow(librosa.power_to_db(spec), origin="lower", aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
plt.show(block=False)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def plot_mel_fbank(fbank, title=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or "Filter bank")
axs.imshow(fbank, aspect="auto")
axs.set_ylabel("frequency bin")
axs.set_xlabel("mel bin")
plt.show(block=False)
def plot_pitch(waveform, sample_rate, pitch):
figure, axis = plt.subplots(1, 1)
axis.set_title("Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color="gray", alpha=0.3)
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, pitch.shape[1])
axis2.plot(time_axis, pitch[0], linewidth=2, label="Pitch", color="green")
axis2.legend(loc=0)
plt.show(block=False)
def plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc):
figure, axis = plt.subplots(1, 1)
axis.set_title("Kaldi Pitch Feature")
axis.grid(True)
end_time = waveform.shape[1] / sample_rate
time_axis = torch.linspace(0, end_time, waveform.shape[1])
axis.plot(time_axis, waveform[0], linewidth=1, color="gray", alpha=0.3)
time_axis = torch.linspace(0, end_time, pitch.shape[1])
ln1 = axis.plot(time_axis, pitch[0], linewidth=2, label="Pitch", color="green")
axis.set_ylim((-1.3, 1.3))
axis2 = axis.twinx()
time_axis = torch.linspace(0, end_time, nfcc.shape[1])
ln2 = axis2.plot(
time_axis, nfcc[0], linewidth=2, label="NFCC", color="blue", linestyle="--"
)
lns = ln1 + ln2
labels = [l.get_label() for l in lns]
axis.legend(lns, labels, loc=0)
plt.show(block=False)
######################################################################
# Spectrogram
# -----------
#
# To get the frequency make-up of an audio signal as it varies with time,
# you can use ``Spectrogram``.
#
waveform, sample_rate = get_speech_sample()
n_fft = 1024
win_length = None
hop_length = 512
# define transformation
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
)
# Perform transformation
spec = spectrogram(waveform)
print_stats(spec)
plot_spectrogram(spec[0], title="torchaudio")
######################################################################
# GriffinLim
# ----------
#
# To recover a waveform from a spectrogram, you can use ``GriffinLim``.
#
torch.random.manual_seed(0)
waveform, sample_rate = get_speech_sample()
plot_waveform(waveform, sample_rate, title="Original")
play_audio(waveform, sample_rate)
n_fft = 1024
win_length = None
hop_length = 512
spec = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)(waveform)
griffin_lim = T.GriffinLim(
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
)
waveform = griffin_lim(spec)
plot_waveform(waveform, sample_rate, title="Reconstructed")
play_audio(waveform, sample_rate)
######################################################################
# Mel Filter Bank
# ---------------
#
# ``torchaudio.functional.melscale_fbanks`` generates the filter bank
# for converting frequency bins to mel-scale bins.
#
# Since this function does not require input audio/features, there is no
# equivalent transform in ``torchaudio.transforms``.
#
n_fft = 256
n_mels = 64
sample_rate = 6000
mel_filters = F.melscale_fbanks(
int(n_fft // 2 + 1),
n_mels=n_mels,
f_min=0.0,
f_max=sample_rate / 2.0,
sample_rate=sample_rate,
norm="slaney",
)
plot_mel_fbank(mel_filters, "Mel Filter Bank - torchaudio")
######################################################################
# Comparison against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For reference, here is the equivalent way to get the mel filter bank
# with ``librosa``.
#
mel_filters_librosa = librosa.filters.mel(
sample_rate,
n_fft,
n_mels=n_mels,
fmin=0.0,
fmax=sample_rate / 2.0,
norm="slaney",
htk=True,
).T
plot_mel_fbank(mel_filters_librosa, "Mel Filter Bank - librosa")
mse = torch.square(mel_filters - mel_filters_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# MelSpectrogram
# --------------
#
# Generating a mel-scale spectrogram involves generating a spectrogram
# and performing mel-scale conversion. In ``torchaudio``, ``MelSpectrogram`` provides
# this functionality.
#
waveform, sample_rate = get_speech_sample()
n_fft = 1024
win_length = None
hop_length = 512
n_mels = 128
mel_spectrogram = T.MelSpectrogram(
sample_rate=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
center=True,
pad_mode="reflect",
power=2.0,
norm="slaney",
onesided=True,
n_mels=n_mels,
mel_scale="htk",
)
melspec = mel_spectrogram(waveform)
plot_spectrogram(melspec[0], title="MelSpectrogram - torchaudio", ylabel="mel freq")
######################################################################
# Comparison against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For reference, here is the equivalent means of generating mel-scale
# spectrograms with ``librosa``.
#
melspec_librosa = librosa.feature.melspectrogram(
waveform.numpy()[0],
sr=sample_rate,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
center=True,
pad_mode="reflect",
power=2.0,
n_mels=n_mels,
norm="slaney",
htk=True,
)
plot_spectrogram(melspec_librosa, title="MelSpectrogram - librosa", ylabel="mel freq")
mse = torch.square(melspec - melspec_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# MFCC
# ----
#
waveform, sample_rate = get_speech_sample()
n_fft = 2048
win_length = None
hop_length = 512
n_mels = 256
n_mfcc = 256
mfcc_transform = T.MFCC(
sample_rate=sample_rate,
n_mfcc=n_mfcc,
melkwargs={
"n_fft": n_fft,
"n_mels": n_mels,
"hop_length": hop_length,
"mel_scale": "htk",
},
)
mfcc = mfcc_transform(waveform)
plot_spectrogram(mfcc[0])
######################################################################
# Comparing against librosa
# ~~~~~~~~~~~~~~~~~~~~~~~~~
#
melspec = librosa.feature.melspectrogram(
y=waveform.numpy()[0],
sr=sample_rate,
n_fft=n_fft,
win_length=win_length,
hop_length=hop_length,
n_mels=n_mels,
htk=True,
norm=None,
)
mfcc_librosa = librosa.feature.mfcc(
S=librosa.core.spectrum.power_to_db(melspec),
n_mfcc=n_mfcc,
dct_type=2,
norm="ortho",
)
plot_spectrogram(mfcc_librosa)
mse = torch.square(mfcc - mfcc_librosa).mean().item()
print("Mean Square Difference: ", mse)
######################################################################
# Pitch
# -----
#
waveform, sample_rate = get_speech_sample()
pitch = F.detect_pitch_frequency(waveform, sample_rate)
plot_pitch(waveform, sample_rate, pitch)
play_audio(waveform, sample_rate)
######################################################################
# Kaldi Pitch (beta)
# ------------------
#
# Kaldi Pitch feature [1] is a pitch detection mechanism tuned for automatic
# speech recognition (ASR) applications. This is a beta feature in ``torchaudio``,
# and it is available only in ``functional``.
#
# 1. A pitch extraction algorithm tuned for automatic speech recognition
#
# Ghahremani, B. BabaAli, D. Povey, K. Riedhammer, J. Trmal and S.
# Khudanpur
#
# 2014 IEEE International Conference on Acoustics, Speech and Signal
# Processing (ICASSP), Florence, 2014, pp. 2494-2498, doi:
# 10.1109/ICASSP.2014.6854049.
# [`abstract <https://ieeexplore.ieee.org/document/6854049>`__],
# [`paper <https://danielpovey.com/files/2014_icassp_pitch.pdf>`__]
#
waveform, sample_rate = get_speech_sample(resample=16000)
pitch_feature = F.compute_kaldi_pitch(waveform, sample_rate)
pitch, nfcc = pitch_feature[..., 0], pitch_feature[..., 1]
plot_kaldi_pitch(waveform, sample_rate, pitch, nfcc)
play_audio(waveform, sample_rate)
|
"""
MVDR with torchaudio
====================
**Author** `Zhaoheng Ni <[email protected]>`__
"""
######################################################################
# Overview
# --------
#
# This is a tutorial on how to apply MVDR beamforming by using `torchaudio <https://github.com/pytorch/audio>`__.
#
# Steps
#
# - Ideal Ratio Mask (IRM) is generated by dividing the clean/noise
# magnitude by the mixture magnitude.
# - We test all three solutions (``ref_channel``, ``stv_evd``, ``stv_power``)
# of torchaudio's MVDR module.
# - We test the single-channel and multi-channel masks for MVDR beamforming.
# The multi-channel mask is averaged along channel dimension when computing
# the covariance matrices of speech and noise, respectively.
######################################################################
# Preparation
# -----------
#
# First, we import the necessary packages and retrieve the data.
#
# The multi-channel audio example is selected from
# `ConferencingSpeech <https://github.com/ConferencingSpeech/ConferencingSpeech2021>`__
# dataset.
#
# The original filename is
#
# ``SSB07200001\#noise-sound-bible-0038\#7.86_6.16_3.00_3.14_4.84_134.5285_191.7899_0.4735\#15217\#25.16333303751458\#0.2101221178590021.wav``
#
# which was generated with;
#
# - ``SSB07200001.wav`` from `AISHELL-3 <https://www.openslr.org/93/>`__ (Apache License v.2.0)
# - ``noise-sound-bible-0038.wav`` from `MUSAN <http://www.openslr.org/17/>`__ (Attribution 4.0 International — CC BY 4.0) # noqa: E501
#
import os
import requests
import torch
import torchaudio
import IPython.display as ipd
torch.random.manual_seed(0)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(torch.__version__)
print(torchaudio.__version__)
print(device)
filenames = [
"mix.wav",
"reverb_clean.wav",
"clean.wav",
]
base_url = "https://download.pytorch.org/torchaudio/tutorial-assets/mvdr"
for filename in filenames:
os.makedirs("_assets", exist_ok=True)
if not os.path.exists(filename):
with open(f"_assets/{filename}", "wb") as file:
file.write(requests.get(f"{base_url}/{filename}").content)
######################################################################
# Generate the Ideal Ratio Mask (IRM)
# -----------------------------------
#
######################################################################
# Loading audio data
# ~~~~~~~~~~~~~~~~~~
#
mix, sr = torchaudio.load("_assets/mix.wav")
reverb_clean, sr2 = torchaudio.load("_assets/reverb_clean.wav")
clean, sr3 = torchaudio.load("_assets/clean.wav")
assert sr == sr2
noise = mix - reverb_clean
######################################################################
#
# .. note::
# The MVDR Module requires ``torch.cdouble`` dtype for noisy STFT.
# We need to convert the dtype of the waveforms to ``torch.double``
#
mix = mix.to(torch.double)
noise = noise.to(torch.double)
clean = clean.to(torch.double)
reverb_clean = reverb_clean.to(torch.double)
######################################################################
# Compute STFT
# ~~~~~~~~~~~~
#
stft = torchaudio.transforms.Spectrogram(
n_fft=1024,
hop_length=256,
power=None,
)
istft = torchaudio.transforms.InverseSpectrogram(n_fft=1024, hop_length=256)
spec_mix = stft(mix)
spec_clean = stft(clean)
spec_reverb_clean = stft(reverb_clean)
spec_noise = stft(noise)
######################################################################
# Generate the Ideal Ratio Mask (IRM)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# .. note::
# We found using the mask directly peforms better than using the
# square root of it. This is slightly different from the definition of IRM.
#
def get_irms(spec_clean, spec_noise):
mag_clean = spec_clean.abs() ** 2
mag_noise = spec_noise.abs() ** 2
irm_speech = mag_clean / (mag_clean + mag_noise)
irm_noise = mag_noise / (mag_clean + mag_noise)
return irm_speech, irm_noise
######################################################################
# .. note::
# We use reverberant clean speech as the target here,
# you can also set it to dry clean speech.
irm_speech, irm_noise = get_irms(spec_reverb_clean, spec_noise)
######################################################################
# Apply MVDR
# ----------
#
######################################################################
# Apply MVDR beamforming by using multi-channel masks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
results_multi = {}
for solution in ["ref_channel", "stv_evd", "stv_power"]:
mvdr = torchaudio.transforms.MVDR(ref_channel=0, solution=solution, multi_mask=True)
stft_est = mvdr(spec_mix, irm_speech, irm_noise)
est = istft(stft_est, length=mix.shape[-1])
results_multi[solution] = est
######################################################################
# Apply MVDR beamforming by using single-channel masks
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We use the 1st channel as an example.
# The channel selection may depend on the design of the microphone array
results_single = {}
for solution in ["ref_channel", "stv_evd", "stv_power"]:
mvdr = torchaudio.transforms.MVDR(
ref_channel=0, solution=solution, multi_mask=False
)
stft_est = mvdr(spec_mix, irm_speech[0], irm_noise[0])
est = istft(stft_est, length=mix.shape[-1])
results_single[solution] = est
######################################################################
# Compute Si-SDR scores
# ~~~~~~~~~~~~~~~~~~~~~
#
def si_sdr(estimate, reference, epsilon=1e-8):
estimate = estimate - estimate.mean()
reference = reference - reference.mean()
reference_pow = reference.pow(2).mean(axis=1, keepdim=True)
mix_pow = (estimate * reference).mean(axis=1, keepdim=True)
scale = mix_pow / (reference_pow + epsilon)
reference = scale * reference
error = estimate - reference
reference_pow = reference.pow(2)
error_pow = error.pow(2)
reference_pow = reference_pow.mean(axis=1)
error_pow = error_pow.mean(axis=1)
sisdr = 10 * torch.log10(reference_pow) - 10 * torch.log10(error_pow)
return sisdr.item()
######################################################################
# Results
# -------
#
######################################################################
# Single-channel mask results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
for solution in results_single:
print(
solution + ": ", si_sdr(results_single[solution][None, ...], reverb_clean[0:1])
)
######################################################################
# Multi-channel mask results
# ~~~~~~~~~~~~~~~~~~~~~~~~~~
#
for solution in results_multi:
print(
solution + ": ", si_sdr(results_multi[solution][None, ...], reverb_clean[0:1])
)
######################################################################
# Original audio
# --------------
#
######################################################################
# Mixture speech
# ~~~~~~~~~~~~~~
#
ipd.Audio(mix[0], rate=16000)
######################################################################
# Noise
# ~~~~~
#
ipd.Audio(noise[0], rate=16000)
######################################################################
# Clean speech
# ~~~~~~~~~~~~
#
ipd.Audio(clean[0], rate=16000)
######################################################################
# Enhanced audio
# --------------
#
######################################################################
# Multi-channel mask, ref_channel solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["ref_channel"], rate=16000)
######################################################################
# Multi-channel mask, stv_evd solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["stv_evd"], rate=16000)
######################################################################
# Multi-channel mask, stv_power solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_multi["stv_power"], rate=16000)
######################################################################
# Single-channel mask, ref_channel solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["ref_channel"], rate=16000)
######################################################################
# Single-channel mask, stv_evd solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["stv_evd"], rate=16000)
######################################################################
# Single-channel mask, stv_power solution
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
ipd.Audio(results_single["stv_power"], rate=16000)
|
# -*- coding: utf-8 -*-
"""
Audio Datasets
==============
``torchaudio`` provides easy access to common, publicly accessible
datasets. Please refer to the official documentation for the list of
available datasets.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import multiprocessing
import os
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
YESNO_DATASET_PATH = os.path.join(_SAMPLE_DIR, "yes_no")
os.makedirs(YESNO_DATASET_PATH, exist_ok=True)
def _download_yesno():
if os.path.exists(os.path.join(YESNO_DATASET_PATH, "waves_yesno.tar.gz")):
return
torchaudio.datasets.YESNO(root=YESNO_DATASET_PATH, download=True)
YESNO_DOWNLOAD_PROCESS = multiprocessing.Process(target=_download_yesno)
YESNO_DOWNLOAD_PROCESS.start()
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
######################################################################
# Here, we show how to use the ``YESNO`` dataset.
#
YESNO_DOWNLOAD_PROCESS.join()
dataset = torchaudio.datasets.YESNO(YESNO_DATASET_PATH, download=True)
for i in [1, 3, 5]:
waveform, sample_rate, label = dataset[i]
plot_specgram(waveform, sample_rate, title=f"Sample {i}: {label}")
play_audio(waveform, sample_rate)
|
# -*- coding: utf-8 -*-
"""
Audio Data Augmentation
=======================
``torchaudio`` provides a variety of ways to augment audio data.
"""
# When running this tutorial in Google Colab, install the required packages
# with the following.
# !pip install torchaudio
import torch
import torchaudio
import torchaudio.functional as F
print(torch.__version__)
print(torchaudio.__version__)
######################################################################
# Preparing data and utility functions (skip this section)
# --------------------------------------------------------
#
# @title Prepare data and utility functions. {display-mode: "form"}
# @markdown
# @markdown You do not need to look into this cell.
# @markdown Just execute once and you are good to go.
# @markdown
# @markdown In this tutorial, we will use a speech data from [VOiCES dataset](https://iqtlabs.github.io/voices/),
# @markdown which is licensed under Creative Commos BY 4.0.
# -------------------------------------------------------------------------------
# Preparation of data and helper functions.
# -------------------------------------------------------------------------------
import math
import os
import requests
import matplotlib.pyplot as plt
from IPython.display import Audio, display
_SAMPLE_DIR = "_assets"
SAMPLE_WAV_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/steam-train-whistle-daniel_simon.wav"
SAMPLE_WAV_PATH = os.path.join(_SAMPLE_DIR, "steam.wav")
SAMPLE_RIR_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/room-response/rm1/impulse/Lab41-SRI-VOiCES-rm1-impulse-mc01-stu-clo.wav" # noqa: E501
SAMPLE_RIR_PATH = os.path.join(_SAMPLE_DIR, "rir.wav")
SAMPLE_WAV_SPEECH_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/source-16k/train/sp0307/Lab41-SRI-VOiCES-src-sp0307-ch127535-sg0042.wav" # noqa: E501
SAMPLE_WAV_SPEECH_PATH = os.path.join(_SAMPLE_DIR, "speech.wav")
SAMPLE_NOISE_URL = "https://pytorch-tutorial-assets.s3.amazonaws.com/VOiCES_devkit/distant-16k/distractors/rm1/babb/Lab41-SRI-VOiCES-rm1-babb-mc01-stu-clo.wav" # noqa: E501
SAMPLE_NOISE_PATH = os.path.join(_SAMPLE_DIR, "bg.wav")
os.makedirs(_SAMPLE_DIR, exist_ok=True)
def _fetch_data():
uri = [
(SAMPLE_WAV_URL, SAMPLE_WAV_PATH),
(SAMPLE_RIR_URL, SAMPLE_RIR_PATH),
(SAMPLE_WAV_SPEECH_URL, SAMPLE_WAV_SPEECH_PATH),
(SAMPLE_NOISE_URL, SAMPLE_NOISE_PATH),
]
for url, path in uri:
with open(path, "wb") as file_:
file_.write(requests.get(url).content)
_fetch_data()
def _get_sample(path, resample=None):
effects = [["remix", "1"]]
if resample:
effects.extend(
[
["lowpass", f"{resample // 2}"],
["rate", f"{resample}"],
]
)
return torchaudio.sox_effects.apply_effects_file(path, effects=effects)
def get_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_PATH, resample=resample)
def get_speech_sample(*, resample=None):
return _get_sample(SAMPLE_WAV_SPEECH_PATH, resample=resample)
def plot_waveform(waveform, sample_rate, title="Waveform", xlim=None, ylim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
time_axis = torch.arange(0, num_frames) / sample_rate
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].plot(time_axis, waveform[c], linewidth=1)
axes[c].grid(True)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
if ylim:
axes[c].set_ylim(ylim)
figure.suptitle(title)
plt.show(block=False)
def print_stats(waveform, sample_rate=None, src=None):
if src:
print("-" * 10)
print("Source:", src)
print("-" * 10)
if sample_rate:
print("Sample Rate:", sample_rate)
print("Shape:", tuple(waveform.shape))
print("Dtype:", waveform.dtype)
print(f" - Max: {waveform.max().item():6.3f}")
print(f" - Min: {waveform.min().item():6.3f}")
print(f" - Mean: {waveform.mean().item():6.3f}")
print(f" - Std Dev: {waveform.std().item():6.3f}")
print()
print(waveform)
print()
def plot_specgram(waveform, sample_rate, title="Spectrogram", xlim=None):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
figure, axes = plt.subplots(num_channels, 1)
if num_channels == 1:
axes = [axes]
for c in range(num_channels):
axes[c].specgram(waveform[c], Fs=sample_rate)
if num_channels > 1:
axes[c].set_ylabel(f"Channel {c+1}")
if xlim:
axes[c].set_xlim(xlim)
figure.suptitle(title)
plt.show(block=False)
def play_audio(waveform, sample_rate):
waveform = waveform.numpy()
num_channels, num_frames = waveform.shape
if num_channels == 1:
display(Audio(waveform[0], rate=sample_rate))
elif num_channels == 2:
display(Audio((waveform[0], waveform[1]), rate=sample_rate))
else:
raise ValueError("Waveform with more than 2 channels are not supported.")
def get_rir_sample(*, resample=None, processed=False):
rir_raw, sample_rate = _get_sample(SAMPLE_RIR_PATH, resample=resample)
if not processed:
return rir_raw, sample_rate
rir = rir_raw[:, int(sample_rate * 1.01): int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
return rir, sample_rate
def get_noise_sample(*, resample=None):
return _get_sample(SAMPLE_NOISE_PATH, resample=resample)
######################################################################
# Applying effects and filtering
# ------------------------------
#
# ``torchaudio.sox_effects`` allows for directly applying filters similar to
# those available in ``sox`` to Tensor objects and file object audio sources.
#
# There are two functions for this:
#
# - ``torchaudio.sox_effects.apply_effects_tensor`` for applying effects
# to Tensor.
# - ``torchaudio.sox_effects.apply_effects_file`` for applying effects to
# other audio sources.
#
# Both functions accept effect definitions in the form
# ``List[List[str]]``.
# This is mostly consistent with how ``sox`` command works, but one caveat is
# that ``sox`` adds some effects automatically, whereas ``torchaudio``’s
# implementation does not.
#
# For the list of available effects, please refer to `the sox
# documentation <http://sox.sourceforge.net/sox.html>`__.
#
# **Tip** If you need to load and resample your audio data on the fly,
# then you can use ``torchaudio.sox_effects.apply_effects_file`` with
# effect ``"rate"``.
#
# **Note** ``apply_effects_file`` accepts a file-like object or path-like
# object. Similar to ``torchaudio.load``, when the audio format cannot be
# inferred from either the file extension or header, you can provide
# argument ``format`` to specify the format of the audio source.
#
# **Note** This process is not differentiable.
#
# Load the data
waveform1, sample_rate1 = get_sample(resample=16000)
# Define effects
effects = [
["lowpass", "-1", "300"], # apply single-pole lowpass filter
["speed", "0.8"], # reduce the speed
# This only changes sample rate, so it is necessary to
# add `rate` effect with original sample rate after this.
["rate", f"{sample_rate1}"],
["reverb", "-w"], # Reverbration gives some dramatic feeling
]
# Apply effects
waveform2, sample_rate2 = torchaudio.sox_effects.apply_effects_tensor(
waveform1, sample_rate1, effects
)
plot_waveform(waveform1, sample_rate1, title="Original", xlim=(-0.1, 3.2))
plot_waveform(waveform2, sample_rate2, title="Effects Applied", xlim=(-0.1, 3.2))
print_stats(waveform1, sample_rate=sample_rate1, src="Original")
print_stats(waveform2, sample_rate=sample_rate2, src="Effects Applied")
######################################################################
# Note that the number of frames and number of channels are different from
# those of the original after the effects are applied. Let’s listen to the
# audio. Doesn’t it sound more dramatic?
#
plot_specgram(waveform1, sample_rate1, title="Original", xlim=(0, 3.04))
play_audio(waveform1, sample_rate1)
plot_specgram(waveform2, sample_rate2, title="Effects Applied", xlim=(0, 3.04))
play_audio(waveform2, sample_rate2)
######################################################################
# Simulating room reverberation
# -----------------------------
#
# `Convolution
# reverb <https://en.wikipedia.org/wiki/Convolution_reverb>`__ is a
# technique that's used to make clean audio sound as though it has been
# produced in a different environment.
#
# Using Room Impulse Response (RIR), for instance, we can make clean speech
# sound as though it has been uttered in a conference room.
#
# For this process, we need RIR data. The following data are from the VOiCES
# dataset, but you can record your own — just turn on your microphone
# and clap your hands.
#
sample_rate = 8000
rir_raw, _ = get_rir_sample(resample=sample_rate)
plot_waveform(rir_raw, sample_rate, title="Room Impulse Response (raw)", ylim=None)
plot_specgram(rir_raw, sample_rate, title="Room Impulse Response (raw)")
play_audio(rir_raw, sample_rate)
######################################################################
# First, we need to clean up the RIR. We extract the main impulse, normalize
# the signal power, then flip along the time axis.
#
rir = rir_raw[:, int(sample_rate * 1.01): int(sample_rate * 1.3)]
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
print_stats(rir)
plot_waveform(rir, sample_rate, title="Room Impulse Response", ylim=None)
######################################################################
# Then, we convolve the speech signal with the RIR filter.
#
speech, _ = get_speech_sample(resample=sample_rate)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
augmented = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_waveform(speech, sample_rate, title="Original", ylim=None)
plot_waveform(augmented, sample_rate, title="RIR Applied", ylim=None)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
plot_specgram(augmented, sample_rate, title="RIR Applied")
play_audio(augmented, sample_rate)
######################################################################
# Adding background noise
# -----------------------
#
# To add background noise to audio data, you can simply add a noise Tensor to
# the Tensor representing the audio data. A common method to adjust the
# intensity of noise is changing the Signal-to-Noise Ratio (SNR).
# [`wikipedia <https://en.wikipedia.org/wiki/Signal-to-noise_ratio>`__]
#
# \begin{align}\mathrm{SNR} = \frac{P_{\mathrm{signal}}}{P_{\mathrm{noise}}}\end{align}
#
# \begin{align}{\mathrm {SNR_{{dB}}}}=10\log _{{10}}\left({\mathrm {SNR}}\right)\end{align}
#
sample_rate = 8000
speech, _ = get_speech_sample(resample=sample_rate)
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
plot_waveform(noise, sample_rate, title="Background noise")
plot_specgram(noise, sample_rate, title="Background noise")
play_audio(noise, sample_rate)
speech_power = speech.norm(p=2)
noise_power = noise.norm(p=2)
for snr_db in [20, 10, 3]:
snr = math.exp(snr_db / 10)
scale = snr * noise_power / speech_power
noisy_speech = (scale * speech + noise) / 2
plot_waveform(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
plot_specgram(noisy_speech, sample_rate, title=f"SNR: {snr_db} [dB]")
play_audio(noisy_speech, sample_rate)
######################################################################
# Applying codec to Tensor object
# -------------------------------
#
# ``torchaudio.functional.apply_codec`` can apply codecs to a Tensor object.
#
# **Note** This process is not differentiable.
#
waveform, sample_rate = get_speech_sample(resample=8000)
plot_specgram(waveform, sample_rate, title="Original")
play_audio(waveform, sample_rate)
configs = [
({"format": "wav", "encoding": "ULAW", "bits_per_sample": 8}, "8 bit mu-law"),
({"format": "gsm"}, "GSM-FR"),
({"format": "mp3", "compression": -9}, "MP3"),
({"format": "vorbis", "compression": -1}, "Vorbis"),
]
for param, title in configs:
augmented = F.apply_codec(waveform, sample_rate, **param)
plot_specgram(augmented, sample_rate, title=title)
play_audio(augmented, sample_rate)
######################################################################
# Simulating a phone recoding
# ---------------------------
#
# Combining the previous techniques, we can simulate audio that sounds
# like a person talking over a phone in a echoey room with people talking
# in the background.
#
sample_rate = 16000
speech, _ = get_speech_sample(resample=sample_rate)
plot_specgram(speech, sample_rate, title="Original")
play_audio(speech, sample_rate)
# Apply RIR
rir, _ = get_rir_sample(resample=sample_rate, processed=True)
speech_ = torch.nn.functional.pad(speech, (rir.shape[1] - 1, 0))
speech = torch.nn.functional.conv1d(speech_[None, ...], rir[None, ...])[0]
plot_specgram(speech, sample_rate, title="RIR Applied")
play_audio(speech, sample_rate)
# Add background noise
# Because the noise is recorded in the actual environment, we consider that
# the noise contains the acoustic feature of the environment. Therefore, we add
# the noise after RIR application.
noise, _ = get_noise_sample(resample=sample_rate)
noise = noise[:, : speech.shape[1]]
snr_db = 8
scale = math.exp(snr_db / 10) * noise.norm(p=2) / speech.norm(p=2)
speech = (scale * speech + noise) / 2
plot_specgram(speech, sample_rate, title="BG noise added")
play_audio(speech, sample_rate)
# Apply filtering and change sample rate
speech, sample_rate = torchaudio.sox_effects.apply_effects_tensor(
speech,
sample_rate,
effects=[
["lowpass", "4000"],
[
"compand",
"0.02,0.05",
"-60,-60,-30,-10,-20,-8,-5,-8,-2,-8",
"-8",
"-7",
"0.05",
],
["rate", "8000"],
],
)
plot_specgram(speech, sample_rate, title="Filtered")
play_audio(speech, sample_rate)
# Apply telephony codec
speech = F.apply_codec(speech, sample_rate, format="gsm")
plot_specgram(speech, sample_rate, title="GSM Codec Applied")
play_audio(speech, sample_rate)
|
import random
import torch
from torch.utils.data.dataset import random_split
from torchaudio.datasets import LJSPEECH, LIBRITTS
from torchaudio.transforms import MuLawEncoding
from processing import bits_to_normalized_waveform, normalized_waveform_to_bits
class MapMemoryCache(torch.utils.data.Dataset):
r"""Wrap a dataset so that, whenever a new item is returned, it is saved to memory.
"""
def __init__(self, dataset):
self.dataset = dataset
self._cache = [None] * len(dataset)
def __getitem__(self, n):
if self._cache[n] is not None:
return self._cache[n]
item = self.dataset[n]
self._cache[n] = item
return item
def __len__(self):
return len(self.dataset)
class Processed(torch.utils.data.Dataset):
def __init__(self, dataset, transforms):
self.dataset = dataset
self.transforms = transforms
def __getitem__(self, key):
item = self.dataset[key]
return self.process_datapoint(item)
def __len__(self):
return len(self.dataset)
def process_datapoint(self, item):
specgram = self.transforms(item[0])
return item[0].squeeze(0), specgram
def split_process_dataset(args, transforms):
if args.dataset == 'ljspeech':
data = LJSPEECH(root=args.file_path, download=False)
val_length = int(len(data) * args.val_ratio)
lengths = [len(data) - val_length, val_length]
train_dataset, val_dataset = random_split(data, lengths)
elif args.dataset == 'libritts':
train_dataset = LIBRITTS(root=args.file_path, url='train-clean-100', download=False)
val_dataset = LIBRITTS(root=args.file_path, url='dev-clean', download=False)
else:
raise ValueError(f"Expected dataset: `ljspeech` or `libritts`, but found {args.dataset}")
train_dataset = Processed(train_dataset, transforms)
val_dataset = Processed(val_dataset, transforms)
train_dataset = MapMemoryCache(train_dataset)
val_dataset = MapMemoryCache(val_dataset)
return train_dataset, val_dataset
def collate_factory(args):
def raw_collate(batch):
pad = (args.kernel_size - 1) // 2
# input waveform length
wave_length = args.hop_length * args.seq_len_factor
# input spectrogram length
spec_length = args.seq_len_factor + pad * 2
# max start postion in spectrogram
max_offsets = [x[1].shape[-1] - (spec_length + pad * 2) for x in batch]
# random start postion in spectrogram
spec_offsets = [random.randint(0, offset) for offset in max_offsets]
# random start postion in waveform
wave_offsets = [(offset + pad) * args.hop_length for offset in spec_offsets]
waveform_combine = [
x[0][wave_offsets[i]: wave_offsets[i] + wave_length + 1]
for i, x in enumerate(batch)
]
specgram = [
x[1][:, spec_offsets[i]: spec_offsets[i] + spec_length]
for i, x in enumerate(batch)
]
specgram = torch.stack(specgram)
waveform_combine = torch.stack(waveform_combine)
waveform = waveform_combine[:, :wave_length]
target = waveform_combine[:, 1:]
# waveform: [-1, 1], target: [0, 2**bits-1] if loss = 'crossentropy'
if args.loss == "crossentropy":
if args.mulaw:
mulaw_encode = MuLawEncoding(2 ** args.n_bits)
waveform = mulaw_encode(waveform)
target = mulaw_encode(target)
waveform = bits_to_normalized_waveform(waveform, args.n_bits)
else:
target = normalized_waveform_to_bits(target, args.n_bits)
return waveform.unsqueeze(1), specgram.unsqueeze(1), target.unsqueeze(1)
return raw_collate
|
# *****************************************************************************
# Copyright (c) 2019 fatchord (https://github.com/fatchord)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# *****************************************************************************
from torchaudio.models.wavernn import WaveRNN
import torch
import torchaudio
from torch import Tensor
from processing import normalized_waveform_to_bits
def _fold_with_overlap(x: Tensor, timesteps: int, overlap: int) -> Tensor:
r'''Fold the tensor with overlap for quick batched inference.
Overlap will be used for crossfading in xfade_and_unfold().
x = [[h1, h2, ... hn]]
Where each h is a vector of conditioning channels
Eg: timesteps=2, overlap=1 with x.size(1)=10
folded = [[h1, h2, h3, h4],
[h4, h5, h6, h7],
[h7, h8, h9, h10]]
Args:
x (tensor): Upsampled conditioning channels of size (1, timesteps, channel).
timesteps (int): Timesteps for each index of batch.
overlap (int): Timesteps for both xfade and rnn warmup.
Return:
folded (tensor): folded tensor of size (n_folds, timesteps + 2 * overlap, channel).
'''
_, channels, total_len = x.size()
# Calculate variables needed
n_folds = (total_len - overlap) // (timesteps + overlap)
extended_len = n_folds * (overlap + timesteps) + overlap
remaining = total_len - extended_len
# Pad if some time steps poking out
if remaining != 0:
n_folds += 1
padding = timesteps + 2 * overlap - remaining
x = torch.nn.functional.pad(x, (0, padding))
folded = torch.zeros((n_folds, channels, timesteps + 2 * overlap), device=x.device)
# Get the values for the folded tensor
for i in range(n_folds):
start = i * (timesteps + overlap)
end = start + timesteps + 2 * overlap
folded[i] = x[0, :, start:end]
return folded
def _xfade_and_unfold(y: Tensor, overlap: int) -> Tensor:
r'''Applies a crossfade and unfolds into a 1d array.
y = [[seq1],
[seq2],
[seq3]]
Apply a gain envelope at both ends of the sequences
y = [[seq1_in, seq1_timesteps, seq1_out],
[seq2_in, seq2_timesteps, seq2_out],
[seq3_in, seq3_timesteps, seq3_out]]
Stagger and add up the groups of samples:
[seq1_in, seq1_timesteps, (seq1_out + seq2_in), seq2_timesteps, ...]
Args:
y (Tensor): Batched sequences of audio samples of size
(num_folds, channels, timesteps + 2 * overlap).
overlap (int): Timesteps for both xfade and rnn warmup.
Returns:
unfolded waveform (Tensor) : waveform in a 1d tensor of size (channels, total_len).
'''
num_folds, channels, length = y.shape
timesteps = length - 2 * overlap
total_len = num_folds * (timesteps + overlap) + overlap
# Need some silence for the rnn warmup
silence_len = overlap // 2
fade_len = overlap - silence_len
silence = torch.zeros((silence_len), dtype=y.dtype, device=y.device)
linear = torch.ones((silence_len), dtype=y.dtype, device=y.device)
# Equal power crossfade
t = torch.linspace(-1, 1, fade_len, dtype=y.dtype, device=y.device)
fade_in = torch.sqrt(0.5 * (1 + t))
fade_out = torch.sqrt(0.5 * (1 - t))
# Concat the silence to the fades
fade_in = torch.cat([silence, fade_in])
fade_out = torch.cat([linear, fade_out])
# Apply the gain to the overlap samples
y[:, :, :overlap] *= fade_in
y[:, :, -overlap:] *= fade_out
unfolded = torch.zeros((channels, total_len), dtype=y.dtype, device=y.device)
# Loop to add up all the samples
for i in range(num_folds):
start = i * (timesteps + overlap)
end = start + timesteps + 2 * overlap
unfolded[:, start:end] += y[i]
return unfolded
class WaveRNNInferenceWrapper(torch.nn.Module):
def __init__(self, wavernn: WaveRNN):
super().__init__()
self.wavernn_model = wavernn
def forward(self,
specgram: Tensor,
mulaw: bool = True,
batched: bool = True,
timesteps: int = 100,
overlap: int = 5) -> Tensor:
r"""Inference function for WaveRNN.
Based on the implementation from
https://github.com/fatchord/WaveRNN/blob/master/models/fatchord_version.py.
Currently only supports multinomial sampling.
Args:
specgram (Tensor): spectrogram of size (n_mels, n_time)
mulaw (bool, optional): Whether to perform mulaw decoding (Default: ``True``).
batched (bool, optional): Whether to perform batch prediction. Using batch prediction
will significantly increase the inference speed (Default: ``True``).
timesteps (int, optional): The time steps for each batch. Only used when `batched`
is set to True (Default: ``100``).
overlap (int, optional): The overlapping time steps between batches. Only used when
`batched` is set to True (Default: ``5``).
Returns:
waveform (Tensor): Reconstructed waveform of size (1, n_time, ).
1 represents single channel.
"""
specgram = specgram.unsqueeze(0)
if batched:
specgram = _fold_with_overlap(specgram, timesteps, overlap)
output = self.wavernn_model.infer(specgram).cpu()
if mulaw:
output = normalized_waveform_to_bits(output, self.wavernn_model.n_bits)
output = torchaudio.functional.mu_law_decoding(output, self.wavernn_model.n_classes)
if batched:
output = _xfade_and_unfold(output, overlap)
else:
output = output[0]
return output
|
import logging
import os
import shutil
from collections import defaultdict, deque
import torch
class MetricLogger:
r"""Logger for model metrics
"""
def __init__(self, group, print_freq=1):
self.print_freq = print_freq
self._iter = 0
self.data = defaultdict(lambda: deque(maxlen=self.print_freq))
self.data["group"].append(group)
def __setitem__(self, key, value):
self.data[key].append(value)
def _get_last(self):
return {k: v[-1] for k, v in self.data.items()}
def __str__(self):
return str(self._get_last())
def __call__(self):
self._iter = (self._iter + 1) % self.print_freq
if not self._iter:
print(self, flush=True)
def save_checkpoint(state, is_best, filename):
r"""Save the model to a temporary file first,
then copy it to filename, in case the signal interrupts
the torch.save() process.
"""
if filename == "":
return
tempfile = filename + ".temp"
# Remove tempfile in case interuption during the copying from tempfile to filename
if os.path.isfile(tempfile):
os.remove(tempfile)
torch.save(state, tempfile)
if os.path.isfile(tempfile):
os.rename(tempfile, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
logging.info("Checkpoint: saved")
def count_parameters(model):
r"""Count the total number of parameters in the model
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
|
import argparse
import torch
import torchaudio
from torchaudio.transforms import MelSpectrogram
from torchaudio.models import wavernn
from torchaudio.models.wavernn import _MODEL_CONFIG_AND_URLS
from torchaudio.datasets import LJSPEECH
from wavernn_inference_wrapper import WaveRNNInferenceWrapper
from processing import NormalizeDB
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--output-wav-path", default="./output.wav", type=str, metavar="PATH",
help="The path to output the reconstructed wav file.",
)
parser.add_argument(
"--jit", default=False, action="store_true",
help="If used, the model and inference function is jitted."
)
parser.add_argument(
"--no-batch-inference", default=False, action="store_true",
help="Don't use batch inference."
)
parser.add_argument(
"--no-mulaw", default=False, action="store_true",
help="Don't use mulaw decoder to decoder the signal."
)
parser.add_argument(
"--checkpoint-name", default="wavernn_10k_epochs_8bits_ljspeech",
choices=list(_MODEL_CONFIG_AND_URLS.keys()),
help="Select the WaveRNN checkpoint."
)
parser.add_argument(
"--batch-timesteps", default=100, type=int,
help="The time steps for each batch. Only used when batch inference is used",
)
parser.add_argument(
"--batch-overlap", default=5, type=int,
help="The overlapping time steps between batches. Only used when batch inference is used",
)
args = parser.parse_args()
return args
def main(args):
device = "cuda" if torch.cuda.is_available() else "cpu"
waveform, sample_rate, _, _ = LJSPEECH("./", download=True)[0]
mel_kwargs = {
'sample_rate': sample_rate,
'n_fft': 2048,
'f_min': 40.,
'n_mels': 80,
'win_length': 1100,
'hop_length': 275,
'mel_scale': 'slaney',
'norm': 'slaney',
'power': 1,
}
transforms = torch.nn.Sequential(
MelSpectrogram(**mel_kwargs),
NormalizeDB(min_level_db=-100, normalization=True),
)
mel_specgram = transforms(waveform)
wavernn_model = wavernn(args.checkpoint_name).eval().to(device)
wavernn_inference_model = WaveRNNInferenceWrapper(wavernn_model)
if args.jit:
wavernn_inference_model = torch.jit.script(wavernn_inference_model)
with torch.no_grad():
output = wavernn_inference_model(mel_specgram.to(device),
mulaw=(not args.no_mulaw),
batched=(not args.no_batch_inference),
timesteps=args.batch_timesteps,
overlap=args.batch_overlap,)
torchaudio.save(args.output_wav_path, output, sample_rate=sample_rate)
if __name__ == "__main__":
args = parse_args()
main(args)
|
import math
import torch
from torch import nn as nn
from torch.nn import functional as F
class LongCrossEntropyLoss(nn.Module):
r""" CrossEntropy loss
"""
def __init__(self):
super(LongCrossEntropyLoss, self).__init__()
def forward(self, output, target):
output = output.transpose(1, 2)
target = target.long()
criterion = nn.CrossEntropyLoss()
return criterion(output, target)
class MoLLoss(nn.Module):
r""" Discretized mixture of logistic distributions loss
Adapted from wavenet vocoder
(https://github.com/r9y9/wavenet_vocoder/blob/master/wavenet_vocoder/mixture.py)
Explanation of loss (https://github.com/Rayhane-mamah/Tacotron-2/issues/155)
Args:
y_hat (Tensor): Predicted output (n_batch x n_time x n_channel)
y (Tensor): Target (n_batch x n_time x 1)
num_classes (int): Number of classes
log_scale_min (float): Log scale minimum value
reduce (bool): If True, the losses are averaged or summed for each minibatch
Returns
Tensor: loss
"""
def __init__(self, num_classes=65536, log_scale_min=None, reduce=True):
super(MoLLoss, self).__init__()
self.num_classes = num_classes
self.log_scale_min = log_scale_min
self.reduce = reduce
def forward(self, y_hat, y):
y = y.unsqueeze(-1)
if self.log_scale_min is None:
self.log_scale_min = math.log(1e-14)
assert y_hat.dim() == 3
assert y_hat.size(-1) % 3 == 0
nr_mix = y_hat.size(-1) // 3
# unpack parameters (n_batch, n_time, num_mixtures) x 3
logit_probs = y_hat[:, :, :nr_mix]
means = y_hat[:, :, nr_mix: 2 * nr_mix]
log_scales = torch.clamp(
y_hat[:, :, 2 * nr_mix: 3 * nr_mix], min=self.log_scale_min
)
# (n_batch x n_time x 1) to (n_batch x n_time x num_mixtures)
y = y.expand_as(means)
centered_y = y - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_y + 1.0 / (self.num_classes - 1))
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_y - 1.0 / (self.num_classes - 1))
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
# equivalent: torch.log(F.sigmoid(plus_in))
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
# equivalent: (1 - F.sigmoid(min_in)).log()
log_one_minus_cdf_min = -F.softplus(min_in)
# probability for all other cases
cdf_delta = cdf_plus - cdf_min
mid_in = inv_stdv * centered_y
# log probability in the center of the bin, to be used in extreme cases
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(
torch.clamp(cdf_delta, min=1e-12)
) + (1.0 - inner_inner_cond) * (
log_pdf_mid - math.log((self.num_classes - 1) / 2)
)
inner_cond = (y > 0.999).float()
inner_out = (
inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
)
cond = (y < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = log_probs + F.log_softmax(logit_probs, -1)
if self.reduce:
return -torch.mean(_log_sum_exp(log_probs))
else:
return -_log_sum_exp(log_probs).unsqueeze(-1)
def _log_sum_exp(x):
r""" Numerically stable log_sum_exp implementation that prevents overflow
"""
axis = len(x.size()) - 1
m, _ = torch.max(x, dim=axis)
m2, _ = torch.max(x, dim=axis, keepdim=True)
return m + torch.log(torch.sum(torch.exp(x - m2), dim=axis))
|
import torch
import torch.nn as nn
class NormalizeDB(nn.Module):
r"""Normalize the spectrogram with a minimum db value
"""
def __init__(self, min_level_db, normalization):
super().__init__()
self.min_level_db = min_level_db
self.normalization = normalization
def forward(self, specgram):
specgram = torch.log10(torch.clamp(specgram.squeeze(0), min=1e-5))
if self.normalization:
return torch.clamp(
(self.min_level_db - 20 * specgram) / self.min_level_db, min=0, max=1
)
return specgram
def normalized_waveform_to_bits(waveform: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform waveform [-1, 1] to label [0, 2 ** bits - 1]
"""
assert abs(waveform).max() <= 1.0
waveform = (waveform + 1.0) * (2 ** bits - 1) / 2
return torch.clamp(waveform, 0, 2 ** bits - 1).int()
def bits_to_normalized_waveform(label: torch.Tensor, bits: int) -> torch.Tensor:
r"""Transform label [0, 2 ** bits - 1] to waveform [-1, 1]
"""
return 2 * label / (2 ** bits - 1.0) - 1.0
|
import argparse
import logging
import os
from collections import defaultdict
from datetime import datetime
from time import time
from typing import List
import torch
import torchaudio
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchaudio.datasets.utils import bg_iterator
from torchaudio.models.wavernn import WaveRNN
from datasets import collate_factory, split_process_dataset
from losses import LongCrossEntropyLoss, MoLLoss
from processing import NormalizeDB
from utils import MetricLogger, count_parameters, save_checkpoint
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint",
)
parser.add_argument(
"--epochs",
default=8000,
type=int,
metavar="N",
help="number of total epochs to run",
)
parser.add_argument(
"--start-epoch", default=0, type=int, metavar="N", help="manual epoch number"
)
parser.add_argument(
"--print-freq",
default=10,
type=int,
metavar="N",
help="print frequency in epochs",
)
parser.add_argument(
"--dataset",
default="ljspeech",
choices=["ljspeech", "libritts"],
type=str,
help="select dataset to train with",
)
parser.add_argument(
"--batch-size", default=256, type=int, metavar="N", help="mini-batch size"
)
parser.add_argument(
"--learning-rate", default=1e-4, type=float, metavar="LR", help="learning rate",
)
parser.add_argument("--clip-grad", metavar="NORM", type=float, default=4.0)
parser.add_argument(
"--mulaw",
default=True,
action="store_true",
help="if used, waveform is mulaw encoded",
)
parser.add_argument(
"--jit", default=False, action="store_true", help="if used, model is jitted"
)
parser.add_argument(
"--upsample-scales",
default=[5, 5, 11],
type=List[int],
help="the list of upsample scales",
)
parser.add_argument(
"--n-bits", default=8, type=int, help="the bits of output waveform",
)
parser.add_argument(
"--sample-rate",
default=22050,
type=int,
help="the rate of audio dimensions (samples per second)",
)
parser.add_argument(
"--hop-length",
default=275,
type=int,
help="the number of samples between the starts of consecutive frames",
)
parser.add_argument(
"--win-length", default=1100, type=int, help="the length of the STFT window",
)
parser.add_argument(
"--f-min", default=40.0, type=float, help="the minimum frequency",
)
parser.add_argument(
"--min-level-db",
default=-100,
type=float,
help="the minimum db value for spectrogam normalization",
)
parser.add_argument(
"--n-res-block", default=10, type=int, help="the number of ResBlock in stack",
)
parser.add_argument(
"--n-rnn", default=512, type=int, help="the dimension of RNN layer",
)
parser.add_argument(
"--n-fc", default=512, type=int, help="the dimension of fully connected layer",
)
parser.add_argument(
"--kernel-size",
default=5,
type=int,
help="the number of kernel size in the first Conv1d layer",
)
parser.add_argument(
"--n-freq", default=80, type=int, help="the number of spectrogram bins to use",
)
parser.add_argument(
"--n-hidden-melresnet",
default=128,
type=int,
help="the number of hidden dimensions of resblock in melresnet",
)
parser.add_argument(
"--n-output-melresnet", default=128, type=int, help="the output dimension of melresnet",
)
parser.add_argument(
"--n-fft", default=2048, type=int, help="the number of Fourier bins",
)
parser.add_argument(
"--loss",
default="crossentropy",
choices=["crossentropy", "mol"],
type=str,
help="the type of loss",
)
parser.add_argument(
"--seq-len-factor",
default=5,
type=int,
help="the length of each waveform to process per batch = hop_length * seq_len_factor",
)
parser.add_argument(
"--val-ratio",
default=0.1,
type=float,
help="the ratio of waveforms for validation",
)
parser.add_argument(
"--file-path", default="", type=str, help="the path of audio files",
)
parser.add_argument(
"--normalization", default=True, action="store_true", help="if True, spectrogram is normalized",
)
args = parser.parse_args()
return args
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch):
model.train()
sums = defaultdict(lambda: 0.0)
start1 = time()
metric = MetricLogger("train_iteration")
metric["epoch"] = epoch
for waveform, specgram, target in bg_iterator(data_loader, maxsize=2):
start2 = time()
waveform = waveform.to(device)
specgram = specgram.to(device)
target = target.to(device)
output = model(waveform, specgram)
output, target = output.squeeze(1), target.squeeze(1)
loss = criterion(output, target)
loss_item = loss.item()
sums["loss"] += loss_item
metric["loss"] = loss_item
optimizer.zero_grad()
loss.backward()
if args.clip_grad > 0:
gradient = torch.nn.utils.clip_grad_norm_(
model.parameters(), args.clip_grad
)
sums["gradient"] += gradient.item()
metric["gradient"] = gradient.item()
optimizer.step()
metric["iteration"] = sums["iteration"]
metric["time"] = time() - start2
metric()
sums["iteration"] += 1
avg_loss = sums["loss"] / len(data_loader)
metric = MetricLogger("train_epoch")
metric["epoch"] = epoch
metric["loss"] = sums["loss"] / len(data_loader)
metric["gradient"] = avg_loss
metric["time"] = time() - start1
metric()
def validate(model, criterion, data_loader, device, epoch):
with torch.no_grad():
model.eval()
sums = defaultdict(lambda: 0.0)
start = time()
for waveform, specgram, target in bg_iterator(data_loader, maxsize=2):
waveform = waveform.to(device)
specgram = specgram.to(device)
target = target.to(device)
output = model(waveform, specgram)
output, target = output.squeeze(1), target.squeeze(1)
loss = criterion(output, target)
sums["loss"] += loss.item()
avg_loss = sums["loss"] / len(data_loader)
metric = MetricLogger("validation")
metric["epoch"] = epoch
metric["loss"] = avg_loss
metric["time"] = time() - start
metric()
return avg_loss
def main(args):
devices = ["cuda" if torch.cuda.is_available() else "cpu"]
logging.info("Start time: {}".format(str(datetime.now())))
melkwargs = {
"n_fft": args.n_fft,
"power": 1,
"hop_length": args.hop_length,
"win_length": args.win_length,
}
transforms = torch.nn.Sequential(
torchaudio.transforms.MelSpectrogram(
sample_rate=args.sample_rate,
n_mels=args.n_freq,
f_min=args.f_min,
mel_scale='slaney',
norm='slaney',
**melkwargs,
),
NormalizeDB(min_level_db=args.min_level_db, normalization=args.normalization),
)
train_dataset, val_dataset = split_process_dataset(args, transforms)
loader_training_params = {
"num_workers": args.workers,
"pin_memory": False,
"shuffle": True,
"drop_last": False,
}
loader_validation_params = loader_training_params.copy()
loader_validation_params["shuffle"] = False
collate_fn = collate_factory(args)
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
collate_fn=collate_fn,
**loader_training_params,
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size,
collate_fn=collate_fn,
**loader_validation_params,
)
n_classes = 2 ** args.n_bits if args.loss == "crossentropy" else 30
model = WaveRNN(
upsample_scales=args.upsample_scales,
n_classes=n_classes,
hop_length=args.hop_length,
n_res_block=args.n_res_block,
n_rnn=args.n_rnn,
n_fc=args.n_fc,
kernel_size=args.kernel_size,
n_freq=args.n_freq,
n_hidden=args.n_hidden_melresnet,
n_output=args.n_output_melresnet,
)
if args.jit:
model = torch.jit.script(model)
model = torch.nn.DataParallel(model)
model = model.to(devices[0], non_blocking=True)
n = count_parameters(model)
logging.info(f"Number of parameters: {n}")
# Optimizer
optimizer_params = {
"lr": args.learning_rate,
}
optimizer = Adam(model.parameters(), **optimizer_params)
criterion = LongCrossEntropyLoss() if args.loss == "crossentropy" else MoLLoss()
best_loss = 10.0
if args.checkpoint and os.path.isfile(args.checkpoint):
logging.info(f"Checkpoint: loading '{args.checkpoint}'")
checkpoint = torch.load(args.checkpoint)
args.start_epoch = checkpoint["epoch"]
best_loss = checkpoint["best_loss"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
f"Checkpoint: loaded '{args.checkpoint}' at epoch {checkpoint['epoch']}"
)
else:
logging.info("Checkpoint: not found")
save_checkpoint(
{
"epoch": args.start_epoch,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
False,
args.checkpoint,
)
for epoch in range(args.start_epoch, args.epochs):
train_one_epoch(
model, criterion, optimizer, train_loader, devices[0], epoch,
)
if not (epoch + 1) % args.print_freq or epoch == args.epochs - 1:
sum_loss = validate(model, criterion, val_loader, devices[0], epoch)
is_best = sum_loss < best_loss
best_loss = min(sum_loss, best_loss)
save_checkpoint(
{
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_loss": best_loss,
"optimizer": optimizer.state_dict(),
},
is_best,
args.checkpoint,
)
logging.info(f"End time: {datetime.now()}")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parse_args()
main(args)
|
"""
This script finds the merger responsible for labeling a PR by a commit SHA. It is used by the workflow in
'.github/workflows/pr-labels.yml'. If there exists no PR associated with the commit or the PR is properly labeled,
this script is a no-op.
Note: we ping the merger only, not the reviewers, as the reviewers can sometimes be external to torchaudio
with no labeling responsibility, so we don't want to bother them.
"""
import sys
from typing import Any, Optional, Set, Tuple
import requests
# For a PR to be properly labeled it should have one primary label and one secondary label
# For a PR with primary label "other", it does not require an additional secondary label
PRIMARY_LABELS = {
"BC-breaking",
"deprecation",
"bug fix",
"new feature",
"improvement",
"example",
"prototype",
"other",
}
SECONDARY_LABELS = {
"module: I/O",
"module: ops",
"module: models",
"module: pipelines",
"module: datasets",
"module: docs",
"module: tests",
"build",
"style",
"perf",
"other",
}
def query_torchaudio(cmd: str, *, accept) -> Any:
response = requests.get(f"https://api.github.com/repos/pytorch/audio/{cmd}", headers=dict(Accept=accept))
return response.json()
def get_pr_number(commit_hash: str) -> Optional[int]:
# See https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit
data = query_torchaudio(f"commits/{commit_hash}/pulls", accept="application/vnd.github.groot-preview+json")
if not data:
return None
return data[0]["number"]
def get_pr_merger_and_labels(pr_number: int) -> Tuple[str, Set[str]]:
# See https://docs.github.com/en/rest/reference/pulls#get-a-pull-request
data = query_torchaudio(f"pulls/{pr_number}", accept="application/vnd.github.v3+json")
merger = data["merged_by"]["login"]
labels = {label["name"] for label in data["labels"]}
return merger, labels
if __name__ == "__main__":
commit_hash = sys.argv[1]
pr_number = get_pr_number(commit_hash)
if not pr_number:
sys.exit(0)
merger, labels = get_pr_merger_and_labels(pr_number)
is_properly_labeled = bool(PRIMARY_LABELS.intersection(labels) and SECONDARY_LABELS.intersection(labels))
if not is_properly_labeled:
print(f"@{merger}")
|
# -*- coding: utf-8 -*-
import math
import warnings
from typing import Callable, Optional
import torch
from torch import Tensor
from torchaudio import functional as F
from .functional.functional import (
_get_sinc_resample_kernel,
_apply_sinc_resample_kernel,
)
__all__ = [
'Spectrogram',
'InverseSpectrogram',
'GriffinLim',
'AmplitudeToDB',
'MelScale',
'InverseMelScale',
'MelSpectrogram',
'MFCC',
'LFCC',
'MuLawEncoding',
'MuLawDecoding',
'Resample',
'TimeStretch',
'Fade',
'FrequencyMasking',
'TimeMasking',
'SlidingWindowCmn',
'Vad',
'SpectralCentroid',
'Vol',
'ComputeDeltas',
'PitchShift',
'RNNTLoss',
'PSD',
'MVDR',
]
class Spectrogram(torch.nn.Module):
r"""Create a spectrogram from a audio signal.
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float or None, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc.
If None, then the complex spectrum is returned instead. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy (Default: ``True``)
return_complex (bool, optional):
Deprecated and not used.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.Spectrogram(n_fft=800)
>>> spectrogram = transform(waveform)
"""
__constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized']
def __init__(self,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: Optional[float] = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
return_complex: Optional[bool] = None) -> None:
super(Spectrogram, self).__init__()
self.n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequencies due to onesided=True in torch.stft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
self.power = power
self.normalized = normalized
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
if return_complex is not None:
warnings.warn(
"`return_complex` argument is now deprecated and is not effective."
"`torchaudio.transforms.Spectrogram(power=None)` always returns a tensor with "
"complex dtype. Please remove the argument in the function call."
)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Dimension (..., freq, time), where freq is
``n_fft // 2 + 1`` where ``n_fft`` is the number of
Fourier bins, and time is the number of window hops (n_frame).
"""
return F.spectrogram(
waveform,
self.pad,
self.window,
self.n_fft,
self.hop_length,
self.win_length,
self.power,
self.normalized,
self.center,
self.pad_mode,
self.onesided,
)
class InverseSpectrogram(torch.nn.Module):
r"""Create an inverse spectrogram to recover an audio signal from a spectrogram.
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
normalized (bool, optional): Whether the spectrogram was normalized by magnitude after stft.
(Default: ``False``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether the signal in spectrogram was padded on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether spectrogram was used to return half of results to
avoid redundancy (Default: ``True``)
Example
>>> batch, freq, time = 2, 257, 100
>>> length = 25344
>>> spectrogram = torch.randn(batch, freq, time, dtype=torch.cdouble)
>>> transform = transforms.InverseSpectrogram(n_fft=512)
>>> waveform = transform(spectrogram, length)
"""
__constants__ = ['n_fft', 'win_length', 'hop_length', 'pad', 'power', 'normalized']
def __init__(self,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True) -> None:
super(InverseSpectrogram, self).__init__()
self.n_fft = n_fft
# number of FFT bins. the returned STFT result will have n_fft // 2 + 1
# number of frequencies due to onesided=True in torch.stft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
self.normalized = normalized
self.center = center
self.pad_mode = pad_mode
self.onesided = onesided
def forward(self, spectrogram: Tensor, length: Optional[int] = None) -> Tensor:
r"""
Args:
spectrogram (Tensor): Complex tensor of audio of dimension (..., freq, time).
length (int or None, optional): The output length of the waveform.
Returns:
Tensor: Dimension (..., time), Least squares estimation of the original signal.
"""
return F.inverse_spectrogram(
spectrogram,
length,
self.pad,
self.window,
self.n_fft,
self.hop_length,
self.win_length,
self.normalized,
self.center,
self.pad_mode,
self.onesided,
)
class GriffinLim(torch.nn.Module):
r"""Compute waveform from a linear scale magnitude spectrogram using the Griffin-Lim transformation.
Implementation ported from
*librosa* [:footcite:`brian_mcfee-proc-scipy-2015`], *A fast Griffin-Lim algorithm* [:footcite:`6701851`]
and *Signal estimation from modified short-time Fourier transform* [:footcite:`1172092`].
Args:
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
n_iter (int, optional): Number of iteration for phase recovery process. (Default: ``32``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
momentum (float, optional): The momentum parameter for fast Griffin-Lim.
Setting this to 0 recovers the original Griffin-Lim method.
Values near 1 can lead to faster convergence, but above 1 may not converge. (Default: ``0.99``)
length (int, optional): Array length of the expected output. (Default: ``None``)
rand_init (bool, optional): Initializes phase randomly if True and to zero otherwise. (Default: ``True``)
Example
>>> batch, freq, time = 2, 257, 100
>>> spectrogram = torch.randn(batch, freq, time)
>>> transform = transforms.GriffinLim(n_fft=512)
>>> waveform = transform(spectrogram)
"""
__constants__ = ['n_fft', 'n_iter', 'win_length', 'hop_length', 'power',
'length', 'momentum', 'rand_init']
def __init__(self,
n_fft: int = 400,
n_iter: int = 32,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
wkwargs: Optional[dict] = None,
momentum: float = 0.99,
length: Optional[int] = None,
rand_init: bool = True) -> None:
super(GriffinLim, self).__init__()
assert momentum < 1, 'momentum={} > 1 can be unstable'.format(momentum)
assert momentum >= 0, 'momentum={} < 0'.format(momentum)
self.n_fft = n_fft
self.n_iter = n_iter
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.length = length
self.power = power
self.momentum = momentum / (1 + momentum)
self.rand_init = rand_init
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor):
A magnitude-only STFT spectrogram of dimension (..., freq, frames)
where freq is ``n_fft // 2 + 1``.
Returns:
Tensor: waveform of (..., time), where time equals the ``length`` parameter if given.
"""
return F.griffinlim(specgram, self.window, self.n_fft, self.hop_length, self.win_length, self.power,
self.n_iter, self.momentum, self.length, self.rand_init)
class AmplitudeToDB(torch.nn.Module):
r"""Turn a tensor from the power/amplitude scale to the decibel scale.
This output depends on the maximum value in the input tensor, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
stype (str, optional): scale of input tensor (``'power'`` or ``'magnitude'``). The
power being the elementwise square of the magnitude. (Default: ``'power'``)
top_db (float or None, optional): minimum negative cut-off in decibels. A reasonable
number is 80. (Default: ``None``)
"""
__constants__ = ['multiplier', 'amin', 'ref_value', 'db_multiplier']
def __init__(self, stype: str = 'power', top_db: Optional[float] = None) -> None:
super(AmplitudeToDB, self).__init__()
self.stype = stype
if top_db is not None and top_db < 0:
raise ValueError('top_db must be positive value')
self.top_db = top_db
self.multiplier = 10.0 if stype == 'power' else 20.0
self.amin = 1e-10
self.ref_value = 1.0
self.db_multiplier = math.log10(max(self.amin, self.ref_value))
def forward(self, x: Tensor) -> Tensor:
r"""Numerically stable implementation from Librosa.
https://librosa.org/doc/latest/generated/librosa.amplitude_to_db.html
Args:
x (Tensor): Input tensor before being converted to decibel scale.
Returns:
Tensor: Output tensor in decibel scale.
"""
return F.amplitude_to_DB(x, self.multiplier, self.amin, self.db_multiplier, self.top_db)
class MelScale(torch.nn.Module):
r"""Turn a normal STFT into a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
Args:
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
n_stft (int, optional): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`. (Default: ``201``)
norm (str or None, optional): If ``'slaney'``, divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['n_mels', 'sample_rate', 'f_min', 'f_max']
def __init__(self,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
n_stft: int = 201,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.f_min = f_min
self.norm = norm
self.mel_scale = mel_scale
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(
n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate, self.norm,
self.mel_scale)
self.register_buffer('fb', fb)
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): A spectrogram STFT of dimension (..., freq, time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
# (..., time, freq) dot (freq, n_mels) -> (..., n_mels, time)
mel_specgram = torch.matmul(specgram.transpose(-1, -2), self.fb).transpose(-1, -2)
return mel_specgram
class InverseMelScale(torch.nn.Module):
r"""Solve for a normal STFT from a mel frequency STFT, using a conversion
matrix. This uses triangular filter banks.
It minimizes the euclidian norm between the input mel-spectrogram and the product between
the estimated spectrogram and the filter banks using SGD.
Args:
n_stft (int): Number of bins in STFT. See ``n_fft`` in :class:`Spectrogram`.
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``sample_rate // 2``)
max_iter (int, optional): Maximum number of optimization iterations. (Default: ``100000``)
tolerance_loss (float, optional): Value of loss to stop optimization at. (Default: ``1e-5``)
tolerance_change (float, optional): Difference in losses to stop optimization at. (Default: ``1e-8``)
sgdargs (dict or None, optional): Arguments for the SGD optimizer. (Default: ``None``)
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
"""
__constants__ = ['n_stft', 'n_mels', 'sample_rate', 'f_min', 'f_max', 'max_iter', 'tolerance_loss',
'tolerance_change', 'sgdargs']
def __init__(self,
n_stft: int,
n_mels: int = 128,
sample_rate: int = 16000,
f_min: float = 0.,
f_max: Optional[float] = None,
max_iter: int = 100000,
tolerance_loss: float = 1e-5,
tolerance_change: float = 1e-8,
sgdargs: Optional[dict] = None,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(InverseMelScale, self).__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.f_max = f_max or float(sample_rate // 2)
self.f_min = f_min
self.max_iter = max_iter
self.tolerance_loss = tolerance_loss
self.tolerance_change = tolerance_change
self.sgdargs = sgdargs or {'lr': 0.1, 'momentum': 0.9}
assert f_min <= self.f_max, 'Require f_min: {} < f_max: {}'.format(f_min, self.f_max)
fb = F.melscale_fbanks(n_stft, self.f_min, self.f_max, self.n_mels, self.sample_rate,
norm, mel_scale)
self.register_buffer('fb', fb)
def forward(self, melspec: Tensor) -> Tensor:
r"""
Args:
melspec (Tensor): A Mel frequency spectrogram of dimension (..., ``n_mels``, time)
Returns:
Tensor: Linear scale spectrogram of size (..., freq, time)
"""
# pack batch
shape = melspec.size()
melspec = melspec.view(-1, shape[-2], shape[-1])
n_mels, time = shape[-2], shape[-1]
freq, _ = self.fb.size() # (freq, n_mels)
melspec = melspec.transpose(-1, -2)
assert self.n_mels == n_mels
specgram = torch.rand(melspec.size()[0], time, freq, requires_grad=True,
dtype=melspec.dtype, device=melspec.device)
optim = torch.optim.SGD([specgram], **self.sgdargs)
loss = float('inf')
for _ in range(self.max_iter):
optim.zero_grad()
diff = melspec - specgram.matmul(self.fb)
new_loss = diff.pow(2).sum(axis=-1).mean()
# take sum over mel-frequency then average over other dimensions
# so that loss threshold is applied par unit timeframe
new_loss.backward()
optim.step()
specgram.data = specgram.data.clamp(min=0)
new_loss = new_loss.item()
if new_loss < self.tolerance_loss or abs(loss - new_loss) < self.tolerance_change:
break
loss = new_loss
specgram.requires_grad_(False)
specgram = specgram.clamp(min=0).transpose(-1, -2)
# unpack batch
specgram = specgram.view(shape[:-2] + (freq, time))
return specgram
class MelSpectrogram(torch.nn.Module):
r"""Create MelSpectrogram for a raw audio signal.
This is a composition of :py:func:`torchaudio.transforms.Spectrogram` and
and :py:func:`torchaudio.transforms.MelScale`.
Sources
* https://gist.github.com/kastnerkyle/179d6e9a88202ab0a2fe
* https://timsainb.github.io/spectrograms-mfccs-and-inversion-in-python.html
* http://haythamfayek.com/2016/04/21/speech-processing-for-machine-learning.html
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
n_mels (int, optional): Number of mel filterbanks. (Default: ``128``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
power (float, optional): Exponent for the magnitude spectrogram,
(must be > 0) e.g., 1 for energy, 2 for power, etc. (Default: ``2``)
normalized (bool, optional): Whether to normalize by magnitude after stft. (Default: ``False``)
wkwargs (Dict[..., ...] or None, optional): Arguments for window function. (Default: ``None``)
center (bool, optional): whether to pad :attr:`waveform` on both sides so
that the :math:`t`-th frame is centered at time :math:`t \times \text{hop\_length}`.
(Default: ``True``)
pad_mode (string, optional): controls the padding method used when
:attr:`center` is ``True``. (Default: ``"reflect"``)
onesided (bool, optional): controls whether to return half of results to
avoid redundancy. (Default: ``True``)
norm (str or None, optional): If 'slaney', divide the triangular mel weights by the width of the mel band
(area normalization). (Default: ``None``)
mel_scale (str, optional): Scale to use: ``htk`` or ``slaney``. (Default: ``htk``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.MelSpectrogram(sample_rate)
>>> mel_specgram = transform(waveform) # (channel, n_mels, time)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad', 'n_mels', 'f_min']
def __init__(self,
sample_rate: int = 16000,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
f_min: float = 0.,
f_max: Optional[float] = None,
pad: int = 0,
n_mels: int = 128,
window_fn: Callable[..., Tensor] = torch.hann_window,
power: float = 2.,
normalized: bool = False,
wkwargs: Optional[dict] = None,
center: bool = True,
pad_mode: str = "reflect",
onesided: bool = True,
norm: Optional[str] = None,
mel_scale: str = "htk") -> None:
super(MelSpectrogram, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
self.pad = pad
self.power = power
self.normalized = normalized
self.n_mels = n_mels # number of mel frequency bins
self.f_max = f_max
self.f_min = f_min
self.spectrogram = Spectrogram(n_fft=self.n_fft, win_length=self.win_length,
hop_length=self.hop_length,
pad=self.pad, window_fn=window_fn, power=self.power,
normalized=self.normalized, wkwargs=wkwargs,
center=center, pad_mode=pad_mode, onesided=onesided)
self.mel_scale = MelScale(
self.n_mels,
self.sample_rate,
self.f_min,
self.f_max,
self.n_fft // 2 + 1,
norm,
mel_scale
)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Mel frequency spectrogram of size (..., ``n_mels``, time).
"""
specgram = self.spectrogram(waveform)
mel_specgram = self.mel_scale(specgram)
return mel_specgram
class MFCC(torch.nn.Module):
r"""Create the Mel-frequency cepstrum coefficients from an audio signal.
By default, this calculates the MFCC on the DB-scaled Mel spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_mfcc (int, optional): Number of mfc coefficients to retain. (Default: ``40``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_mels (bool, optional): whether to use log-mel spectrograms instead of db-scaled. (Default: ``False``)
melkwargs (dict or None, optional): arguments for MelSpectrogram. (Default: ``None``)
See also:
:py:func:`torchaudio.functional.melscale_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_mfcc', 'dct_type', 'top_db', 'log_mels']
def __init__(self,
sample_rate: int = 16000,
n_mfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_mels: bool = False,
melkwargs: Optional[dict] = None) -> None:
super(MFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.n_mfcc = n_mfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
melkwargs = melkwargs or {}
self.MelSpectrogram = MelSpectrogram(sample_rate=self.sample_rate, **melkwargs)
if self.n_mfcc > self.MelSpectrogram.n_mels:
raise ValueError('Cannot select more MFCC coefficients than # mel bins')
dct_mat = F.create_dct(self.n_mfcc, self.MelSpectrogram.n_mels, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_mels = log_mels
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: specgram_mel_db of size (..., ``n_mfcc``, time).
"""
mel_specgram = self.MelSpectrogram(waveform)
if self.log_mels:
log_offset = 1e-6
mel_specgram = torch.log(mel_specgram + log_offset)
else:
mel_specgram = self.amplitude_to_DB(mel_specgram)
# (..., time, n_mels) dot (n_mels, n_mfcc) -> (..., n_nfcc, time)
mfcc = torch.matmul(mel_specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return mfcc
class LFCC(torch.nn.Module):
r"""Create the linear-frequency cepstrum coefficients from an audio signal.
By default, this calculates the LFCC on the DB-scaled linear filtered spectrogram.
This is not the textbook implementation, but is implemented here to
give consistency with librosa.
This output depends on the maximum value in the input spectrogram, and so
may return different values for an audio clip split into snippets vs. a
a full clip.
Args:
sample_rate (int, optional): Sample rate of audio signal. (Default: ``16000``)
n_filter (int, optional): Number of linear filters to apply. (Default: ``128``)
n_lfcc (int, optional): Number of lfc coefficients to retain. (Default: ``40``)
f_min (float, optional): Minimum frequency. (Default: ``0.``)
f_max (float or None, optional): Maximum frequency. (Default: ``None``)
dct_type (int, optional): type of DCT (discrete cosine transform) to use. (Default: ``2``)
norm (str, optional): norm to use. (Default: ``'ortho'``)
log_lf (bool, optional): whether to use log-lf spectrograms instead of db-scaled. (Default: ``False``)
speckwargs (dict or None, optional): arguments for Spectrogram. (Default: ``None``)
See also:
:py:func:`torchaudio.functional.linear_fbanks` - The function used to
generate the filter banks.
"""
__constants__ = ['sample_rate', 'n_filter', 'n_lfcc', 'dct_type', 'top_db', 'log_lf']
def __init__(self,
sample_rate: int = 16000,
n_filter: int = 128,
f_min: float = 0.,
f_max: Optional[float] = None,
n_lfcc: int = 40,
dct_type: int = 2,
norm: str = 'ortho',
log_lf: bool = False,
speckwargs: Optional[dict] = None) -> None:
super(LFCC, self).__init__()
supported_dct_types = [2]
if dct_type not in supported_dct_types:
raise ValueError('DCT type not supported: {}'.format(dct_type))
self.sample_rate = sample_rate
self.f_min = f_min
self.f_max = f_max if f_max is not None else float(sample_rate // 2)
self.n_filter = n_filter
self.n_lfcc = n_lfcc
self.dct_type = dct_type
self.norm = norm
self.top_db = 80.0
self.amplitude_to_DB = AmplitudeToDB('power', self.top_db)
speckwargs = speckwargs or {}
self.Spectrogram = Spectrogram(**speckwargs)
if self.n_lfcc > self.Spectrogram.n_fft:
raise ValueError('Cannot select more LFCC coefficients than # fft bins')
filter_mat = F.linear_fbanks(
n_freqs=self.Spectrogram.n_fft // 2 + 1,
f_min=self.f_min,
f_max=self.f_max,
n_filter=self.n_filter,
sample_rate=self.sample_rate,
)
self.register_buffer("filter_mat", filter_mat)
dct_mat = F.create_dct(self.n_lfcc, self.n_filter, self.norm)
self.register_buffer('dct_mat', dct_mat)
self.log_lf = log_lf
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Linear Frequency Cepstral Coefficients of size (..., ``n_lfcc``, time).
"""
specgram = self.Spectrogram(waveform)
# (..., time, freq) dot (freq, n_filter) -> (..., n_filter, time)
specgram = torch.matmul(specgram.transpose(-1, -2), self.filter_mat).transpose(-1, -2)
if self.log_lf:
log_offset = 1e-6
specgram = torch.log(specgram + log_offset)
else:
specgram = self.amplitude_to_DB(specgram)
# (..., time, n_filter) dot (n_filter, n_lfcc) -> (..., n_lfcc, time)
lfcc = torch.matmul(specgram.transpose(-1, -2), self.dct_mat).transpose(-1, -2)
return lfcc
class MuLawEncoding(torch.nn.Module):
r"""Encode signal based on mu-law companding. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This algorithm assumes the signal has been scaled to between -1 and 1 and
returns a signal encoded with values from 0 to quantization_channels - 1
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.MuLawEncoding(quantization_channels=512)
>>> mulawtrans = transform(waveform)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawEncoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x: Tensor) -> Tensor:
r"""
Args:
x (Tensor): A signal to be encoded.
Returns:
Tensor: An encoded signal.
"""
return F.mu_law_encoding(x, self.quantization_channels)
class MuLawDecoding(torch.nn.Module):
r"""Decode mu-law encoded signal. For more info see the
`Wikipedia Entry <https://en.wikipedia.org/wiki/%CE%9C-law_algorithm>`_
This expects an input with values between 0 and ``quantization_channels - 1``
and returns a signal scaled between -1 and 1.
Args:
quantization_channels (int, optional): Number of channels. (Default: ``256``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = torchaudio.transforms.MuLawDecoding(quantization_channels=512)
>>> mulawtrans = transform(waveform)
"""
__constants__ = ['quantization_channels']
def __init__(self, quantization_channels: int = 256) -> None:
super(MuLawDecoding, self).__init__()
self.quantization_channels = quantization_channels
def forward(self, x_mu: Tensor) -> Tensor:
r"""
Args:
x_mu (Tensor): A mu-law encoded signal which needs to be decoded.
Returns:
Tensor: The signal decoded.
"""
return F.mu_law_decoding(x_mu, self.quantization_channels)
class Resample(torch.nn.Module):
r"""Resample a signal from one frequency to another. A resampling method can be given.
Note:
If resampling on waveforms of higher precision than float32, there may be a small loss of precision
because the kernel is cached once as float32. If high precision resampling is important for your application,
the functional form will retain higher precision, but run slower because it does not cache the kernel.
Alternatively, you could rewrite a transform that caches a higher precision kernel.
Args:
orig_freq (int, optional): The original frequency of the signal. (Default: ``16000``)
new_freq (int, optional): The desired frequency. (Default: ``16000``)
resampling_method (str, optional): The resampling method to use.
Options: [``sinc_interpolation``, ``kaiser_window``] (Default: ``'sinc_interpolation'``)
lowpass_filter_width (int, optional): Controls the sharpness of the filter, more == sharper
but less efficient. (Default: ``6``)
rolloff (float, optional): The roll-off frequency of the filter, as a fraction of the Nyquist.
Lower values reduce anti-aliasing, but also reduce some of the highest frequencies. (Default: ``0.99``)
beta (float or None, optional): The shape parameter used for kaiser window.
dtype (torch.device, optional):
Determnines the precision that resampling kernel is pre-computed and cached. If not provided,
kernel is computed with ``torch.float64`` then cached as ``torch.float32``.
If you need higher precision, provide ``torch.float64``, and the pre-computed kernel is computed and
cached as ``torch.float64``. If you use resample with lower precision, then instead of providing this
providing this argument, please use ``Resample.to(dtype)``, so that the kernel generation is still
carried out on ``torch.float64``.
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.Resample(sample_rate, sample_rate/10)
>>> waveform = transform(waveform)
"""
def __init__(
self,
orig_freq: int = 16000,
new_freq: int = 16000,
resampling_method: str = 'sinc_interpolation',
lowpass_filter_width: int = 6,
rolloff: float = 0.99,
beta: Optional[float] = None,
*,
dtype: Optional[torch.dtype] = None,
) -> None:
super().__init__()
self.orig_freq = orig_freq
self.new_freq = new_freq
self.gcd = math.gcd(int(self.orig_freq), int(self.new_freq))
self.resampling_method = resampling_method
self.lowpass_filter_width = lowpass_filter_width
self.rolloff = rolloff
self.beta = beta
if self.orig_freq != self.new_freq:
kernel, self.width = _get_sinc_resample_kernel(
self.orig_freq, self.new_freq, self.gcd,
self.lowpass_filter_width, self.rolloff,
self.resampling_method, beta, dtype=dtype)
self.register_buffer('kernel', kernel)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension (..., time).
Returns:
Tensor: Output signal of dimension (..., time).
"""
if self.orig_freq == self.new_freq:
return waveform
return _apply_sinc_resample_kernel(
waveform, self.orig_freq, self.new_freq, self.gcd,
self.kernel, self.width)
class ComputeDeltas(torch.nn.Module):
r"""Compute delta coefficients of a tensor, usually a spectrogram.
See `torchaudio.functional.compute_deltas` for more details.
Args:
win_length (int, optional): The window length used for computing delta. (Default: ``5``)
mode (str, optional): Mode parameter passed to padding. (Default: ``'replicate'``)
"""
__constants__ = ['win_length']
def __init__(self, win_length: int = 5, mode: str = "replicate") -> None:
super(ComputeDeltas, self).__init__()
self.win_length = win_length
self.mode = mode
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of audio of dimension (..., freq, time).
Returns:
Tensor: Tensor of deltas of dimension (..., freq, time).
"""
return F.compute_deltas(specgram, win_length=self.win_length, mode=self.mode)
class TimeStretch(torch.nn.Module):
r"""Stretch stft in time without modifying pitch for a given rate.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
n_freq (int, optional): number of filter banks from stft. (Default: ``201``)
fixed_rate (float or None, optional): rate to speed up or slow down by.
If None is provided, rate must be passed to the forward method. (Default: ``None``)
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> stretch = torchaudio.transforms.TimeStretch()
>>>
>>> original = spectrogram(waveform)
>>> streched_1_2 = stretch(original, 1.2)
>>> streched_0_9 = stretch(original, 0.9)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_1.png
:width: 600
:alt: Spectrogram streched by 1.2
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_2.png
:width: 600
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_stretch_3.png
:width: 600
:alt: Spectrogram streched by 0.9
"""
__constants__ = ['fixed_rate']
def __init__(self,
hop_length: Optional[int] = None,
n_freq: int = 201,
fixed_rate: Optional[float] = None) -> None:
super(TimeStretch, self).__init__()
self.fixed_rate = fixed_rate
n_fft = (n_freq - 1) * 2
hop_length = hop_length if hop_length is not None else n_fft // 2
self.register_buffer('phase_advance', torch.linspace(0, math.pi * hop_length, n_freq)[..., None])
def forward(self, complex_specgrams: Tensor, overriding_rate: Optional[float] = None) -> Tensor:
r"""
Args:
complex_specgrams (Tensor):
A tensor of dimension `(..., freq, num_frame)` with complex dtype.
overriding_rate (float or None, optional): speed up to apply to this batch.
If no rate is passed, use ``self.fixed_rate``. (Default: ``None``)
Returns:
Tensor:
Stretched spectrogram. The resulting tensor is of the same dtype as the input
spectrogram, but the number of frames is changed to ``ceil(num_frame / rate)``.
"""
if overriding_rate is None:
if self.fixed_rate is None:
raise ValueError(
"If no fixed_rate is specified, must pass a valid rate to the forward method.")
rate = self.fixed_rate
else:
rate = overriding_rate
return F.phase_vocoder(complex_specgrams, rate, self.phase_advance)
class Fade(torch.nn.Module):
r"""Add a fade in and/or fade out to an waveform.
Args:
fade_in_len (int, optional): Length of fade-in (time frames). (Default: ``0``)
fade_out_len (int, optional): Length of fade-out (time frames). (Default: ``0``)
fade_shape (str, optional): Shape of fade. Must be one of: "quarter_sine",
``"half_sine"``, ``"linear"``, ``"logarithmic"``, ``"exponential"``.
(Default: ``"linear"``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.Fade(fade_in_len=sample_rate, fade_out_len=2 * sample_rate, fade_shape='linear')
>>> faded_waveform = transform(waveform)
"""
def __init__(self,
fade_in_len: int = 0,
fade_out_len: int = 0,
fade_shape: str = "linear") -> None:
super(Fade, self).__init__()
self.fade_in_len = fade_in_len
self.fade_out_len = fade_out_len
self.fade_shape = fade_shape
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Tensor of audio of dimension `(..., time)`.
"""
waveform_length = waveform.size()[-1]
device = waveform.device
return (
self._fade_in(waveform_length, device)
* self._fade_out(waveform_length, device)
* waveform
)
def _fade_in(self, waveform_length: int, device: torch.device) -> Tensor:
fade = torch.linspace(0, 1, self.fade_in_len, device=device)
ones = torch.ones(waveform_length - self.fade_in_len, device=device)
if self.fade_shape == "linear":
fade = fade
if self.fade_shape == "exponential":
fade = torch.pow(2, (fade - 1)) * fade
if self.fade_shape == "logarithmic":
fade = torch.log10(.1 + fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi - math.pi / 2) / 2 + 0.5
return torch.cat((fade, ones)).clamp_(0, 1)
def _fade_out(self, waveform_length: int, device: torch.device) -> Tensor:
fade = torch.linspace(0, 1, self.fade_out_len, device=device)
ones = torch.ones(waveform_length - self.fade_out_len, device=device)
if self.fade_shape == "linear":
fade = - fade + 1
if self.fade_shape == "exponential":
fade = torch.pow(2, - fade) * (1 - fade)
if self.fade_shape == "logarithmic":
fade = torch.log10(1.1 - fade) + 1
if self.fade_shape == "quarter_sine":
fade = torch.sin(fade * math.pi / 2 + math.pi / 2)
if self.fade_shape == "half_sine":
fade = torch.sin(fade * math.pi + math.pi / 2) / 2 + 0.5
return torch.cat((ones, fade)).clamp_(0, 1)
class _AxisMasking(torch.nn.Module):
r"""Apply masking to a spectrogram.
Args:
mask_param (int): Maximum possible length of the mask.
axis (int): What dimension the mask is applied on.
iid_masks (bool): Applies iid masks to each of the examples in the batch dimension.
This option is applicable only when the input tensor is 4D.
"""
__constants__ = ['mask_param', 'axis', 'iid_masks']
def __init__(self, mask_param: int, axis: int, iid_masks: bool) -> None:
super(_AxisMasking, self).__init__()
self.mask_param = mask_param
self.axis = axis
self.iid_masks = iid_masks
def forward(self, specgram: Tensor, mask_value: float = 0.) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of dimension `(..., freq, time)`.
mask_value (float): Value to assign to the masked columns.
Returns:
Tensor: Masked spectrogram of dimensions `(..., freq, time)`.
"""
# if iid_masks flag marked and specgram has a batch dimension
if self.iid_masks and specgram.dim() == 4:
return F.mask_along_axis_iid(specgram, self.mask_param, mask_value, self.axis + 1)
else:
return F.mask_along_axis(specgram, self.mask_param, mask_value, self.axis)
class FrequencyMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the frequency domain.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
freq_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, freq_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> masking = torchaudio.transforms.FrequencyMasking(freq_mask_param=80)
>>>
>>> original = spectrogram(waveform)
>>> masked = masking(original)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking1.png
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_freq_masking2.png
:alt: The spectrogram masked along frequency axis
"""
def __init__(self, freq_mask_param: int, iid_masks: bool = False) -> None:
super(FrequencyMasking, self).__init__(freq_mask_param, 1, iid_masks)
class TimeMasking(_AxisMasking):
r"""Apply masking to a spectrogram in the time domain.
Proposed in *SpecAugment* [:footcite:`specaugment`].
Args:
time_mask_param (int): maximum possible length of the mask.
Indices uniformly sampled from [0, time_mask_param).
iid_masks (bool, optional): whether to apply different masks to each
example/channel in the batch. (Default: ``False``)
This option is applicable only when the input tensor is 4D.
Example
>>> spectrogram = torchaudio.transforms.Spectrogram()
>>> masking = torchaudio.transforms.TimeMasking(time_mask_param=80)
>>>
>>> original = spectrogram(waveform)
>>> masked = masking(original)
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking1.png
:alt: The original spectrogram
.. image:: https://download.pytorch.org/torchaudio/doc-assets/specaugment_time_masking2.png
:alt: The spectrogram masked along time axis
"""
def __init__(self, time_mask_param: int, iid_masks: bool = False) -> None:
super(TimeMasking, self).__init__(time_mask_param, 2, iid_masks)
class Vol(torch.nn.Module):
r"""Add a volume to an waveform.
Args:
gain (float): Interpreted according to the given gain_type:
If ``gain_type`` = ``amplitude``, ``gain`` is a positive amplitude ratio.
If ``gain_type`` = ``power``, ``gain`` is a power (voltage squared).
If ``gain_type`` = ``db``, ``gain`` is in decibels.
gain_type (str, optional): Type of gain. One of: ``amplitude``, ``power``, ``db`` (Default: ``amplitude``)
"""
def __init__(self, gain: float, gain_type: str = 'amplitude'):
super(Vol, self).__init__()
self.gain = gain
self.gain_type = gain_type
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError("If gain_type = amplitude or power, gain must be positive.")
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Tensor of audio of dimension `(..., time)`.
"""
if self.gain_type == "amplitude":
waveform = waveform * self.gain
if self.gain_type == "db":
waveform = F.gain(waveform, self.gain)
if self.gain_type == "power":
waveform = F.gain(waveform, 10 * math.log10(self.gain))
return torch.clamp(waveform, -1, 1)
class SlidingWindowCmn(torch.nn.Module):
r"""
Apply sliding-window cepstral mean (and optionally variance) normalization per utterance.
Args:
cmn_window (int, optional): Window in frames for running average CMN computation (int, default = 600)
min_cmn_window (int, optional): Minimum CMN window used at start of decoding (adds latency only at start).
Only applicable if center == false, ignored if center==true (int, default = 100)
center (bool, optional): If true, use a window centered on the current frame
(to the extent possible, modulo end effects). If false, window is to the left. (bool, default = false)
norm_vars (bool, optional): If true, normalize variance to one. (bool, default = false)
"""
def __init__(self,
cmn_window: int = 600,
min_cmn_window: int = 100,
center: bool = False,
norm_vars: bool = False) -> None:
super().__init__()
self.cmn_window = cmn_window
self.min_cmn_window = min_cmn_window
self.center = center
self.norm_vars = norm_vars
def forward(self, specgram: Tensor) -> Tensor:
r"""
Args:
specgram (Tensor): Tensor of spectrogram of dimension `(..., time, freq)`.
Returns:
Tensor: Tensor of spectrogram of dimension `(..., time, freq)`.
"""
cmn_specgram = F.sliding_window_cmn(
specgram, self.cmn_window, self.min_cmn_window, self.center, self.norm_vars)
return cmn_specgram
class Vad(torch.nn.Module):
r"""Voice Activity Detector. Similar to SoX implementation.
Attempts to trim silence and quiet background sounds from the ends of recordings of speech.
The algorithm currently uses a simple cepstral power measurement to detect voice,
so may be fooled by other things, especially music.
The effect can trim only from the front of the audio,
so in order to trim from the back, the reverse effect must also be used.
Args:
sample_rate (int): Sample rate of audio signal.
trigger_level (float, optional): The measurement level used to trigger activity detection.
This may need to be cahnged depending on the noise level, signal level,
and other characteristics of the input audio. (Default: 7.0)
trigger_time (float, optional): The time constant (in seconds)
used to help ignore short bursts of sound. (Default: 0.25)
search_time (float, optional): The amount of audio (in seconds)
to search for quieter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 1.0)
allowed_gap (float, optional): The allowed gap (in seconds) between
quiteter/shorter bursts of audio to include prior
to the detected trigger point. (Default: 0.25)
pre_trigger_time (float, optional): The amount of audio (in seconds) to preserve
before the trigger point and any found quieter/shorter bursts. (Default: 0.0)
boot_time (float, optional) The algorithm (internally) uses adaptive noise
estimation/reduction in order to detect the start of the wanted audio.
This option sets the time for the initial noise estimate. (Default: 0.35)
noise_up_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is increasing. (Default: 0.1)
noise_down_time (float, optional) Time constant used by the adaptive noise estimator
for when the noise level is decreasing. (Default: 0.01)
noise_reduction_amount (float, optional) Amount of noise reduction to use in
the detection algorithm (e.g. 0, 0.5, ...). (Default: 1.35)
measure_freq (float, optional) Frequency of the algorithm’s
processing/measurements. (Default: 20.0)
measure_duration: (float or None, optional) Measurement duration.
(Default: Twice the measurement period; i.e. with overlap.)
measure_smooth_time (float, optional) Time constant used to smooth
spectral measurements. (Default: 0.4)
hp_filter_freq (float, optional) "Brick-wall" frequency of high-pass filter applied
at the input to the detector algorithm. (Default: 50.0)
lp_filter_freq (float, optional) "Brick-wall" frequency of low-pass filter applied
at the input to the detector algorithm. (Default: 6000.0)
hp_lifter_freq (float, optional) "Brick-wall" frequency of high-pass lifter used
in the detector algorithm. (Default: 150.0)
lp_lifter_freq (float, optional) "Brick-wall" frequency of low-pass lifter used
in the detector algorithm. (Default: 2000.0)
Reference:
- http://sox.sourceforge.net/sox.html
"""
def __init__(self,
sample_rate: int,
trigger_level: float = 7.0,
trigger_time: float = 0.25,
search_time: float = 1.0,
allowed_gap: float = 0.25,
pre_trigger_time: float = 0.0,
boot_time: float = .35,
noise_up_time: float = .1,
noise_down_time: float = .01,
noise_reduction_amount: float = 1.35,
measure_freq: float = 20.0,
measure_duration: Optional[float] = None,
measure_smooth_time: float = .4,
hp_filter_freq: float = 50.,
lp_filter_freq: float = 6000.,
hp_lifter_freq: float = 150.,
lp_lifter_freq: float = 2000.) -> None:
super().__init__()
self.sample_rate = sample_rate
self.trigger_level = trigger_level
self.trigger_time = trigger_time
self.search_time = search_time
self.allowed_gap = allowed_gap
self.pre_trigger_time = pre_trigger_time
self.boot_time = boot_time
self.noise_up_time = noise_up_time
self.noise_down_time = noise_down_time
self.noise_reduction_amount = noise_reduction_amount
self.measure_freq = measure_freq
self.measure_duration = measure_duration
self.measure_smooth_time = measure_smooth_time
self.hp_filter_freq = hp_filter_freq
self.lp_filter_freq = lp_filter_freq
self.hp_lifter_freq = hp_lifter_freq
self.lp_lifter_freq = lp_lifter_freq
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(channels, time)` or `(time)`
Tensor of shape `(channels, time)` is treated as a multi-channel recording
of the same event and the resulting output will be trimmed to the earliest
voice activity in any channel.
"""
return F.vad(
waveform=waveform,
sample_rate=self.sample_rate,
trigger_level=self.trigger_level,
trigger_time=self.trigger_time,
search_time=self.search_time,
allowed_gap=self.allowed_gap,
pre_trigger_time=self.pre_trigger_time,
boot_time=self.boot_time,
noise_up_time=self.noise_up_time,
noise_down_time=self.noise_down_time,
noise_reduction_amount=self.noise_reduction_amount,
measure_freq=self.measure_freq,
measure_duration=self.measure_duration,
measure_smooth_time=self.measure_smooth_time,
hp_filter_freq=self.hp_filter_freq,
lp_filter_freq=self.lp_filter_freq,
hp_lifter_freq=self.hp_lifter_freq,
lp_lifter_freq=self.lp_lifter_freq,
)
class SpectralCentroid(torch.nn.Module):
r"""Compute the spectral centroid for each channel along the time axis.
The spectral centroid is defined as the weighted average of the
frequency values, weighted by their magnitude.
Args:
sample_rate (int): Sample rate of audio signal.
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins. (Default: ``400``)
win_length (int or None, optional): Window size. (Default: ``n_fft``)
hop_length (int or None, optional): Length of hop between STFT windows. (Default: ``win_length // 2``)
pad (int, optional): Two sided padding of signal. (Default: ``0``)
window_fn (Callable[..., Tensor], optional): A function to create a window tensor
that is applied/multiplied to each frame/window. (Default: ``torch.hann_window``)
wkwargs (dict or None, optional): Arguments for window function. (Default: ``None``)
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.SpectralCentroid(sample_rate)
>>> spectral_centroid = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_fft', 'win_length', 'hop_length', 'pad']
def __init__(self,
sample_rate: int,
n_fft: int = 400,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
pad: int = 0,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(SpectralCentroid, self).__init__()
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 2
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
self.pad = pad
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: Spectral Centroid of size `(..., time)`.
"""
return F.spectral_centroid(waveform, self.sample_rate, self.pad, self.window, self.n_fft, self.hop_length,
self.win_length)
class PitchShift(torch.nn.Module):
r"""Shift the pitch of a waveform by ``n_steps`` steps.
Args:
waveform (Tensor): The input waveform of shape `(..., time)`.
sample_rate (int): Sample rate of `waveform`.
n_steps (int): The (fractional) steps to shift `waveform`.
bins_per_octave (int, optional): The number of steps per octave (Default : ``12``).
n_fft (int, optional): Size of FFT, creates ``n_fft // 2 + 1`` bins (Default: ``512``).
win_length (int or None, optional): Window size. If None, then ``n_fft`` is used. (Default: ``None``).
hop_length (int or None, optional): Length of hop between STFT windows. If None, then ``win_length // 4``
is used (Default: ``None``).
window (Tensor or None, optional): Window tensor that is applied/multiplied to each frame/window.
If None, then ``torch.hann_window(win_length)`` is used (Default: ``None``).
Example
>>> waveform, sample_rate = torchaudio.load('test.wav', normalize=True)
>>> transform = transforms.PitchShift(sample_rate, 4)
>>> waveform_shift = transform(waveform) # (channel, time)
"""
__constants__ = ['sample_rate', 'n_steps', 'bins_per_octave', 'n_fft', 'win_length', 'hop_length']
def __init__(self,
sample_rate: int,
n_steps: int,
bins_per_octave: int = 12,
n_fft: int = 512,
win_length: Optional[int] = None,
hop_length: Optional[int] = None,
window_fn: Callable[..., Tensor] = torch.hann_window,
wkwargs: Optional[dict] = None) -> None:
super(PitchShift, self).__init__()
self.n_steps = n_steps
self.bins_per_octave = bins_per_octave
self.sample_rate = sample_rate
self.n_fft = n_fft
self.win_length = win_length if win_length is not None else n_fft
self.hop_length = hop_length if hop_length is not None else self.win_length // 4
window = window_fn(self.win_length) if wkwargs is None else window_fn(self.win_length, **wkwargs)
self.register_buffer('window', window)
def forward(self, waveform: Tensor) -> Tensor:
r"""
Args:
waveform (Tensor): Tensor of audio of dimension `(..., time)`.
Returns:
Tensor: The pitch-shifted audio of shape `(..., time)`.
"""
return F.pitch_shift(waveform, self.sample_rate, self.n_steps, self.bins_per_octave, self.n_fft,
self.win_length, self.hop_length, self.window)
class RNNTLoss(torch.nn.Module):
"""Compute the RNN Transducer loss from *Sequence Transduction with Recurrent Neural Networks*
[:footcite:`graves2012sequence`].
The RNN Transducer loss extends the CTC loss by defining a distribution over output
sequences of all lengths, and by jointly modelling both input-output and output-output
dependencies.
Args:
blank (int, optional): blank label (Default: ``-1``)
clamp (float, optional): clamp for gradients (Default: ``-1``)
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. (Default: ``'mean'``)
Example
>>> # Hypothetical values
>>> logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1],
>>> [0.1, 0.1, 0.6, 0.1, 0.1],
>>> [0.1, 0.1, 0.2, 0.8, 0.1]],
>>> [[0.1, 0.6, 0.1, 0.1, 0.1],
>>> [0.1, 0.1, 0.2, 0.1, 0.1],
>>> [0.7, 0.1, 0.2, 0.1, 0.1]]]],
>>> dtype=torch.float32,
>>> requires_grad=True)
>>> targets = torch.tensor([[1, 2]], dtype=torch.int)
>>> logit_lengths = torch.tensor([2], dtype=torch.int)
>>> target_lengths = torch.tensor([2], dtype=torch.int)
>>> transform = transforms.RNNTLoss(blank=0)
>>> loss = transform(logits, targets, logit_lengths, target_lengths)
>>> loss.backward()
"""
def __init__(
self,
blank: int = -1,
clamp: float = -1.,
reduction: str = "mean",
):
super().__init__()
self.blank = blank
self.clamp = clamp
self.reduction = reduction
def forward(
self,
logits: Tensor,
targets: Tensor,
logit_lengths: Tensor,
target_lengths: Tensor,
):
"""
Args:
logits (Tensor): Tensor of dimension `(batch, max seq length, max target length + 1, class)`
containing output from joiner
targets (Tensor): Tensor of dimension `(batch, max target length)` containing targets with zero padded
logit_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of each sequence from encoder
target_lengths (Tensor): Tensor of dimension `(batch)` containing lengths of targets for each sequence
Returns:
Tensor: Loss with the reduction option applied. If ``reduction`` is ``'none'``, then size (batch),
otherwise scalar.
"""
return F.rnnt_loss(
logits,
targets,
logit_lengths,
target_lengths,
self.blank,
self.clamp,
self.reduction
)
def _get_mat_trace(input: torch.Tensor, dim1: int = -1, dim2: int = -2) -> torch.Tensor:
r"""Compute the trace of a Tensor along ``dim1`` and ``dim2`` dimensions.
Args:
input (torch.Tensor): Tensor of dimension `(..., channel, channel)`
dim1 (int, optional): the first dimension of the diagonal matrix
(Default: -1)
dim2 (int, optional): the second dimension of the diagonal matrix
(Default: -2)
Returns:
torch.Tensor: trace of the input Tensor
"""
assert input.ndim >= 2, "The dimension of the tensor must be at least 2."
assert input.shape[dim1] == input.shape[dim2],\
"The size of ``dim1`` and ``dim2`` must be the same."
input = torch.diagonal(input, 0, dim1=dim1, dim2=dim2)
return input.sum(dim=-1)
class PSD(torch.nn.Module):
r"""Compute cross-channel power spectral density (PSD) matrix.
Args:
multi_mask (bool, optional): whether to use multi-channel Time-Frequency masks. (Default: ``False``)
normalize (bool, optional): whether normalize the mask along the time dimension.
eps (float, optional): a value added to the denominator in mask normalization. (Default: 1e-15)
"""
def __init__(self, multi_mask: bool = False, normalize: bool = True, eps: float = 1e-15):
super().__init__()
self.multi_mask = multi_mask
self.normalize = normalize
self.eps = eps
def forward(self, specgram: torch.Tensor, mask: Optional[torch.Tensor] = None):
"""
Args:
specgram (torch.Tensor): multi-channel complex-valued STFT matrix.
Tensor of dimension `(..., channel, freq, time)`
mask (torch.Tensor or None, optional): Time-Frequency mask for normalization.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False`` or
of dimension `(..., channel, freq, time)` if multi_mask is ``True``
Returns:
Tensor: PSD matrix of the input STFT matrix.
Tensor of dimension `(..., freq, channel, channel)`
"""
# outer product:
# (..., ch_1, freq, time) x (..., ch_2, freq, time) -> (..., time, ch_1, ch_2)
psd = torch.einsum("...cft,...eft->...ftce", [specgram, specgram.conj()])
if mask is not None:
if self.multi_mask:
# Averaging mask along channel dimension
mask = mask.mean(dim=-3) # (..., freq, time)
# Normalized mask along time dimension:
if self.normalize:
mask = mask / (mask.sum(dim=-1, keepdim=True) + self.eps)
psd = psd * mask.unsqueeze(-1).unsqueeze(-1)
psd = psd.sum(dim=-3)
return psd
class MVDR(torch.nn.Module):
"""Minimum Variance Distortionless Response (MVDR) module that performs MVDR beamforming with Time-Frequency masks.
Based on https://github.com/espnet/espnet/blob/master/espnet2/enh/layers/beamformer.py
We provide three solutions of MVDR beamforming. One is based on *reference channel selection*
[:footcite:`souden2009optimal`] (``solution=ref_channel``).
.. math::
\\textbf{w}_{\\text{MVDR}}(f) =\
\\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bf{\\Phi}_{\\textbf{SS}}}}(f)}\
{\\text{Trace}({{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f) \\bf{\\Phi}_{\\textbf{SS}}}(f))}}\\bm{u}
where :math:`\\bf{\\Phi}_{\\textbf{SS}}` and :math:`\\bf{\\Phi}_{\\textbf{NN}}` are the covariance\
matrices of speech and noise, respectively. :math:`\\bf{u}` is an one-hot vector to determine the\
reference channel.
The other two solutions are based on the steering vector (``solution=stv_evd`` or ``solution=stv_power``).
.. math::
\\textbf{w}_{\\text{MVDR}}(f) =\
\\frac{{{\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)}}\
{{\\bm{v}^{\\mathsf{H}}}(f){\\bf{\\Phi}_{\\textbf{NN}}^{-1}}(f){\\bm{v}}(f)}
where :math:`\\bm{v}` is the acoustic transfer function or the steering vector.\
:math:`.^{\\mathsf{H}}` denotes the Hermitian Conjugate operation.
We apply either *eigenvalue decomposition*
[:footcite:`higuchi2016robust`] or the *power method* [:footcite:`mises1929praktische`] to get the
steering vector from the PSD matrix of speech.
After estimating the beamforming weight, the enhanced Short-time Fourier Transform (STFT) is obtained by
.. math::
\\hat{\\bf{S}} = {\\bf{w}^\\mathsf{H}}{\\bf{Y}}, {\\bf{w}} \\in \\mathbb{C}^{M \\times F}
where :math:`\\bf{Y}` and :math:`\\hat{\\bf{S}}` are the STFT of the multi-channel noisy speech and\
the single-channel enhanced speech, respectively.
For online streaming audio, we provide a *recursive method* [:footcite:`higuchi2017online`] to update the
PSD matrices of speech and noise, respectively.
Args:
ref_channel (int, optional): the reference channel for beamforming. (Default: ``0``)
solution (str, optional): the solution to get MVDR weight.
Options: [``ref_channel``, ``stv_evd``, ``stv_power``]. (Default: ``ref_channel``)
multi_mask (bool, optional): whether to use multi-channel Time-Frequency masks. (Default: ``False``)
diag_loading (bool, optional): whether apply diagonal loading on the psd matrix of noise.
(Default: ``True``)
diag_eps (float, optional): the coefficient multipied to the identity matrix for diagonal loading.
(Default: 1e-7)
online (bool, optional): whether to update the mvdr vector based on the previous psd matrices.
(Default: ``False``)
Note:
The MVDR Module requires the input STFT to be double precision (``torch.complex128`` or ``torch.cdouble``),
to improve the numerical stability. You can downgrade the precision to ``torch.float`` after generating the
enhanced waveform for ASR joint training.
Note:
If you use ``stv_evd`` solution, the gradient of the same input may not be identical if the
eigenvalues of the PSD matrix are not distinct (i.e. some eigenvalues are close or identical).
"""
def __init__(
self,
ref_channel: int = 0,
solution: str = "ref_channel",
multi_mask: bool = False,
diag_loading: bool = True,
diag_eps: float = 1e-7,
online: bool = False,
):
super().__init__()
assert solution in ["ref_channel", "stv_evd", "stv_power"],\
"Unknown solution provided. Must be one of [``ref_channel``, ``stv_evd``, ``stv_power``]."
self.ref_channel = ref_channel
self.solution = solution
self.multi_mask = multi_mask
self.diag_loading = diag_loading
self.diag_eps = diag_eps
self.online = online
self.psd = PSD(multi_mask)
psd_s: torch.Tensor = torch.zeros(1)
psd_n: torch.Tensor = torch.zeros(1)
mask_sum_s: torch.Tensor = torch.zeros(1)
mask_sum_n: torch.Tensor = torch.zeros(1)
self.register_buffer('psd_s', psd_s)
self.register_buffer('psd_n', psd_n)
self.register_buffer('mask_sum_s', mask_sum_s)
self.register_buffer('mask_sum_n', mask_sum_n)
def _get_updated_mvdr_vector(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
mask_s: torch.Tensor,
mask_n: torch.Tensor,
reference_vector: torch.Tensor,
solution: str = 'ref_channel',
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
r"""Recursively update the MVDR beamforming vector.
Args:
psd_s (torch.Tensor): psd matrix of target speech
psd_n (torch.Tensor): psd matrix of noise
mask_s (torch.Tensor): T-F mask of target speech
mask_n (torch.Tensor): T-F mask of noise
reference_vector (torch.Tensor): one-hot reference channel matrix
solution (str, optional): the solution to estimate the beamforming weight
(Default: ``ref_channel``)
diagonal_loading (bool, optional): whether to apply diagonal loading to psd_n
(Default: ``True``)
diag_eps (float, optional): The coefficient multipied to the identity matrix for diagonal loading
(Default: 1e-7)
eps (float, optional): a value added to the denominator in mask normalization. (Default: 1e-8)
Returns:
Tensor: the mvdr beamforming weight matrix
"""
if self.multi_mask:
# Averaging mask along channel dimension
mask_s = mask_s.mean(dim=-3) # (..., freq, time)
mask_n = mask_n.mean(dim=-3) # (..., freq, time)
if self.psd_s.ndim == 1:
self.psd_s = psd_s
self.psd_n = psd_n
self.mask_sum_s = mask_s.sum(dim=-1)
self.mask_sum_n = mask_n.sum(dim=-1)
return self._get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps)
else:
psd_s = self._get_updated_psd_speech(psd_s, mask_s)
psd_n = self._get_updated_psd_noise(psd_n, mask_n)
self.psd_s = psd_s
self.psd_n = psd_n
self.mask_sum_s = self.mask_sum_s + mask_s.sum(dim=-1)
self.mask_sum_n = self.mask_sum_n + mask_n.sum(dim=-1)
return self._get_mvdr_vector(psd_s, psd_n, reference_vector, solution, diagonal_loading, diag_eps, eps)
def _get_updated_psd_speech(self, psd_s: torch.Tensor, mask_s: torch.Tensor) -> torch.Tensor:
r"""Update psd of speech recursively.
Args:
psd_s (torch.Tensor): psd matrix of target speech
mask_s (torch.Tensor): T-F mask of target speech
Returns:
torch.Tensor: the updated psd of speech
"""
numerator = self.mask_sum_s / (self.mask_sum_s + mask_s.sum(dim=-1))
denominator = 1 / (self.mask_sum_s + mask_s.sum(dim=-1))
psd_s = self.psd_s * numerator[..., None, None] + psd_s * denominator[..., None, None]
return psd_s
def _get_updated_psd_noise(self, psd_n: torch.Tensor, mask_n: torch.Tensor) -> torch.Tensor:
r"""Update psd of noise recursively.
Args:
psd_n (torch.Tensor): psd matrix of target noise
mask_n (torch.Tensor): T-F mask of target noise
Returns:
torch.Tensor: the updated psd of noise
"""
numerator = self.mask_sum_n / (self.mask_sum_n + mask_n.sum(dim=-1))
denominator = 1 / (self.mask_sum_n + mask_n.sum(dim=-1))
psd_n = self.psd_n * numerator[..., None, None] + psd_n * denominator[..., None, None]
return psd_n
def _get_mvdr_vector(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
reference_vector: torch.Tensor,
solution: str = 'ref_channel',
diagonal_loading: bool = True,
diag_eps: float = 1e-7,
eps: float = 1e-8,
) -> torch.Tensor:
r"""Compute beamforming vector by the reference channel selection method.
Args:
psd_s (torch.Tensor): psd matrix of target speech
psd_n (torch.Tensor): psd matrix of noise
reference_vector (torch.Tensor): one-hot reference channel matrix
solution (str, optional): the solution to estimate the beamforming weight
(Default: ``ref_channel``)
diagonal_loading (bool, optional): whether to apply diagonal loading to psd_n
(Default: ``True``)
diag_eps (float, optional): The coefficient multipied to the identity matrix for diagonal loading
(Default: 1e-7)
eps (float, optional): a value added to the denominator in mask normalization. Default: 1e-8
Returns:
torch.Tensor: the mvdr beamforming weight matrix
"""
if diagonal_loading:
psd_n = self._tik_reg(psd_n, reg=diag_eps, eps=eps)
if solution == "ref_channel":
numerator = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
# ws: (..., C, C) / (...,) -> (..., C, C)
ws = numerator / (_get_mat_trace(numerator)[..., None, None] + eps)
# h: (..., F, C_1, C_2) x (..., C_2) -> (..., F, C_1)
beamform_vector = torch.einsum("...fec,...c->...fe", [ws, reference_vector])
else:
if solution == "stv_evd":
stv = self._get_steering_vector_evd(psd_s)
else:
stv = self._get_steering_vector_power(psd_s, psd_n, reference_vector)
# numerator = psd_n.inv() @ stv
numerator = torch.linalg.solve(psd_n, stv).squeeze(-1) # (..., freq, channel)
# denominator = stv^H @ psd_n.inv() @ stv
denominator = torch.einsum("...d,...d->...", [stv.conj().squeeze(-1), numerator])
# normalzie the numerator
scale = stv.squeeze(-1)[..., self.ref_channel, None].conj()
beamform_vector = numerator * scale / (denominator.real.unsqueeze(-1) + eps)
return beamform_vector
def _get_steering_vector_evd(self, psd_s: torch.Tensor) -> torch.Tensor:
r"""Estimate the steering vector by eigenvalue decomposition.
Args:
psd_s (torch.tensor): covariance matrix of speech
Tensor of dimension `(..., freq, channel, channel)`
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, channel, 1)`
"""
w, v = torch.linalg.eig(psd_s) # (..., freq, channel, channel)
_, indices = torch.max(w.abs(), dim=-1, keepdim=True)
indices = indices.unsqueeze(-1)
stv = v.gather(-1, indices.expand(psd_s.shape[:-1] + (1,))) # (..., freq, channel, 1)
return stv
def _get_steering_vector_power(
self,
psd_s: torch.Tensor,
psd_n: torch.Tensor,
reference_vector: torch.Tensor
) -> torch.Tensor:
r"""Estimate the steering vector by the power method.
Args:
psd_s (torch.tensor): covariance matrix of speech
Tensor of dimension `(..., freq, channel, channel)`
psd_n (torch.Tensor): covariance matrix of noise
Tensor of dimension `(..., freq, channel, channel)`
reference_vector (torch.Tensor): one-hot reference channel matrix
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, channel, 1)`
"""
phi = torch.linalg.solve(psd_n, psd_s) # psd_n.inv() @ psd_s
stv = torch.einsum("...fec,...c->...fe", [phi, reference_vector])
stv = stv.unsqueeze(-1)
stv = torch.matmul(phi, stv)
stv = torch.matmul(psd_s, stv)
return stv
def _apply_beamforming_vector(
self,
specgram: torch.Tensor,
beamform_vector: torch.Tensor
) -> torch.Tensor:
r"""Apply the beamforming weight to the noisy STFT
Args:
specgram (torch.tensor): multi-channel noisy STFT
Tensor of dimension `(..., channel, freq, time)`
beamform_vector (torch.Tensor): beamforming weight matrix
Tensor of dimension `(..., freq, channel)`
Returns:
torch.Tensor: the enhanced STFT
Tensor of dimension `(..., freq, time)`
"""
# (..., channel) x (..., channel, freq, time) -> (..., freq, time)
specgram_enhanced = torch.einsum("...fc,...cft->...ft", [beamform_vector.conj(), specgram])
return specgram_enhanced
def _tik_reg(
self,
mat: torch.Tensor,
reg: float = 1e-7,
eps: float = 1e-8
) -> torch.Tensor:
"""Perform Tikhonov regularization (only modifying real part).
Args:
mat (torch.Tensor): input matrix (..., channel, channel)
reg (float, optional): regularization factor (Default: 1e-8)
eps (float, optional): a value to avoid the correlation matrix is all-zero (Default: 1e-8)
Returns:
torch.Tensor: regularized matrix (..., channel, channel)
"""
# Add eps
C = mat.size(-1)
eye = torch.eye(C, dtype=mat.dtype, device=mat.device)
with torch.no_grad():
epsilon = _get_mat_trace(mat).real[..., None, None] * reg
# in case that correlation_matrix is all-zero
epsilon = epsilon + eps
mat = mat + epsilon * eye[..., :, :]
return mat
def forward(
self,
specgram: torch.Tensor,
mask_s: torch.Tensor,
mask_n: Optional[torch.Tensor] = None
) -> torch.Tensor:
"""Perform MVDR beamforming.
Args:
specgram (torch.Tensor): the multi-channel STF of the noisy speech.
Tensor of dimension `(..., channel, freq, time)`
mask_s (torch.Tensor): Time-Frequency mask of target speech.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False``
or or dimension `(..., channel, freq, time)` if multi_mask is ``True``
mask_n (torch.Tensor or None, optional): Time-Frequency mask of noise.
Tensor of dimension `(..., freq, time)` if multi_mask is ``False``
or or dimension `(..., channel, freq, time)` if multi_mask is ``True``
(Default: None)
Returns:
torch.Tensor: The single-channel STFT of the enhanced speech.
Tensor of dimension `(..., freq, time)`
"""
if specgram.ndim < 3:
raise ValueError(
f"Expected at least 3D tensor (..., channel, freq, time). Found: {specgram.shape}"
)
if specgram.dtype != torch.cdouble:
raise ValueError(
f"The type of ``specgram`` tensor must be ``torch.cdouble``. Found: {specgram.dtype}"
)
if mask_n is None:
warnings.warn(
"``mask_n`` is not provided, use ``1 - mask_s`` as ``mask_n``."
)
mask_n = 1 - mask_s
shape = specgram.size()
# pack batch
specgram = specgram.reshape(-1, shape[-3], shape[-2], shape[-1])
if self.multi_mask:
mask_s = mask_s.reshape(-1, shape[-3], shape[-2], shape[-1])
mask_n = mask_n.reshape(-1, shape[-3], shape[-2], shape[-1])
else:
mask_s = mask_s.reshape(-1, shape[-2], shape[-1])
mask_n = mask_n.reshape(-1, shape[-2], shape[-1])
psd_s = self.psd(specgram, mask_s) # (..., freq, time, channel, channel)
psd_n = self.psd(specgram, mask_n) # (..., freq, time, channel, channel)
u = torch.zeros(
specgram.size()[:-2],
device=specgram.device,
dtype=torch.cdouble
) # (..., channel)
u[..., self.ref_channel].fill_(1)
if self.online:
w_mvdr = self._get_updated_mvdr_vector(
psd_s,
psd_n,
mask_s,
mask_n,
u,
self.solution,
self.diag_loading,
self.diag_eps
)
else:
w_mvdr = self._get_mvdr_vector(
psd_s,
psd_n,
u,
self.solution,
self.diag_loading,
self.diag_eps
)
specgram_enhanced = self._apply_beamforming_vector(specgram, w_mvdr)
# unpack batch
specgram_enhanced = specgram_enhanced.reshape(shape[:-3] + shape[-2:])
return specgram_enhanced
|
# To use this file, the dependency (https://github.com/vesis84/kaldi-io-for-python)
# needs to be installed. This is a light wrapper around kaldi_io that returns
# torch.Tensors.
from typing import Any, Callable, Iterable, Tuple
import torch
from torch import Tensor
from torchaudio._internal import module_utils as _mod_utils
if _mod_utils.is_module_available('kaldi_io', 'numpy'):
import numpy as np
import kaldi_io
__all__ = [
'read_vec_int_ark',
'read_vec_flt_scp',
'read_vec_flt_ark',
'read_mat_scp',
'read_mat_ark',
]
def _convert_method_output_to_tensor(file_or_fd: Any,
fn: Callable,
convert_contiguous: bool = False) -> Iterable[Tuple[str, Tensor]]:
r"""Takes a method invokes it. The output is converted to a tensor.
Args:
file_or_fd (str/FileDescriptor): File name or file descriptor
fn (Callable): Function that has the signature (file name/descriptor) and converts it to
Iterable[Tuple[str, Tensor]].
convert_contiguous (bool, optional): Determines whether the array should be converted into a
contiguous layout. (Default: ``False``)
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is vec/mat
"""
for key, np_arr in fn(file_or_fd):
if convert_contiguous:
np_arr = np.ascontiguousarray(np_arr)
yield key, torch.from_numpy(np_arr)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_int_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<int>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_int_ark(file) }
"""
# Requires convert_contiguous to be True because elements from int32 vector are
# sorted in tuples: (sizeof(int32), value) so strides are (5,) instead of (4,) which will throw an error
# in from_numpy as it expects strides to be a multiple of 4 (int32).
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_int_ark, convert_contiguous=True)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_flt_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read scp to a 'dictionary'
>>> # d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_scp(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_scp)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_vec_flt_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,vector<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the vector read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_vec_flt_ark(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_vec_flt_ark)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_mat_scp(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, read according to Kaldi scp.
Args:
file_or_fd (str/FileDescriptor): scp, gzipped scp, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read scp to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_scp(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_scp)
@_mod_utils.requires_module('kaldi_io', 'numpy')
def read_mat_ark(file_or_fd: Any) -> Iterable[Tuple[str, Tensor]]:
r"""Create generator of (key,matrix<float32/float64>) tuples, which reads from the ark file/stream.
Args:
file_or_fd (str/FileDescriptor): ark, gzipped ark, pipe or opened file descriptor
Returns:
Iterable[Tuple[str, Tensor]]: The string is the key and the tensor is the matrix read from file
Example
>>> # read ark to a 'dictionary'
>>> d = { u:d for u,d in torchaudio.kaldi_io.read_mat_ark(file) }
"""
return _convert_method_output_to_tensor(file_or_fd, kaldi_io.read_mat_ark)
|
from torchaudio import _extension # noqa: F401
from torchaudio import (
compliance,
datasets,
functional,
models,
pipelines,
kaldi_io,
utils,
sox_effects,
transforms,
)
from torchaudio.backend import (
list_audio_backends,
get_audio_backend,
set_audio_backend,
)
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
__all__ = [
'compliance',
'datasets',
'functional',
'models',
'pipelines',
'kaldi_io',
'utils',
'sox_effects',
'transforms',
'list_audio_backends',
'get_audio_backend',
'set_audio_backend',
]
|
import os
import warnings
from pathlib import Path
import torch
from torchaudio._internal import module_utils as _mod_utils # noqa: F401
def _init_extension():
if not _mod_utils.is_module_available('torchaudio._torchaudio'):
warnings.warn('torchaudio C++ extension is not available.')
return
suffix = 'pyd' if os.name == 'nt' else 'so'
path = Path(__file__).parent / 'lib' / f'libtorchaudio.{suffix}'
# In case `torchaudio` is deployed with `pex` format, this file does not exist.
# In this case, we expect that `libtorchaudio` is available somewhere
# in the search path of dynamic loading mechanism, and importing `_torchaudio`,
# which depends on `libtorchaudio` and dynamic loader will handle it for us.
if path.exists():
torch.ops.load_library(path)
torch.classes.load_library(path)
# This import is for initializing the methods registered via PyBind11
from torchaudio import _torchaudio # noqa
_init_extension()
|
from torch.hub import load_state_dict_from_url, download_url_to_file
__all__ = [
"load_state_dict_from_url",
"download_url_to_file",
]
|
import warnings
import importlib.util
from typing import Optional
from functools import wraps
import torch
def is_module_available(*modules: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
return all(importlib.util.find_spec(m) is not None for m in modules)
def requires_module(*modules: str):
"""Decorate function to give error message if invoked without required optional modules.
This decorator is to give better error message to users rather
than raising ``NameError: name 'module' is not defined`` at random places.
"""
missing = [m for m in modules if not is_module_available(m)]
if not missing:
# fall through. If all the modules are available, no need to decorate
def decorator(func):
return func
else:
req = f'module: {missing[0]}' if len(missing) == 1 else f'modules: {missing}'
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires {req}')
return wrapped
return decorator
def deprecated(direction: str, version: Optional[str] = None):
"""Decorator to add deprecation message
Args:
direction (str): Migration steps to be given to users.
version (str or int): The version when the object will be removed
"""
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
message = (
f'{func.__module__}.{func.__name__} has been deprecated '
f'and will be removed from {"future" if version is None else version} release. '
f'{direction}')
warnings.warn(message, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return decorator
def is_kaldi_available():
return is_module_available('torchaudio._torchaudio') and torch.ops.torchaudio.is_kaldi_available()
def requires_kaldi():
if is_kaldi_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires kaldi')
return wrapped
return decorator
def _check_soundfile_importable():
if not is_module_available('soundfile'):
return False
try:
import soundfile # noqa: F401
return True
except Exception:
warnings.warn("Failed to import soundfile. 'soundfile' backend is not available.")
return False
_is_soundfile_importable = _check_soundfile_importable()
def is_soundfile_available():
return _is_soundfile_importable
def requires_soundfile():
if is_soundfile_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires soundfile')
return wrapped
return decorator
def is_sox_available():
return is_module_available('torchaudio._torchaudio') and torch.ops.torchaudio.is_sox_available()
def requires_sox():
if is_sox_available():
def decorator(func):
return func
else:
def decorator(func):
@wraps(func)
def wrapped(*args, **kwargs):
raise RuntimeError(f'{func.__module__}.{func.__name__} requires sox')
return wrapped
return decorator
|
import os
from typing import Tuple, Optional, Union
from pathlib import Path
import torchaudio
from torch.utils.data import Dataset
from torch import Tensor
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
FOLDER_IN_ARCHIVE = "SpeechCommands"
URL = "speech_commands_v0.02"
HASH_DIVIDER = "_nohash_"
EXCEPT_FOLDER = "_background_noise_"
_CHECKSUMS = {
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.01.tar.gz":
"3cd23799cb2bbdec517f1cc028f8d43c",
"https://storage.googleapis.com/download.tensorflow.org/data/speech_commands_v0.02.tar.gz":
"6b74f3901214cb2c2934e98196829835",
}
def _load_list(root, *filenames):
output = []
for filename in filenames:
filepath = os.path.join(root, filename)
with open(filepath) as fileobj:
output += [os.path.normpath(os.path.join(root, line.strip())) for line in fileobj]
return output
def load_speechcommands_item(filepath: str, path: str) -> Tuple[Tensor, int, str, str, int]:
relpath = os.path.relpath(filepath, path)
label, filename = os.path.split(relpath)
# Besides the officially supported split method for datasets defined by "validation_list.txt"
# and "testing_list.txt" over "speech_commands_v0.0x.tar.gz" archives, an alternative split
# method referred to in paragraph 2-3 of Section 7.1, references 13 and 14 of the original
# paper, and the checksums file from the tensorflow_datasets package [1] is also supported.
# Some filenames in those "speech_commands_test_set_v0.0x.tar.gz" archives have the form
# "xxx.wav.wav", so file extensions twice needs to be stripped twice.
# [1] https://github.com/tensorflow/datasets/blob/master/tensorflow_datasets/url_checksums/speech_commands.txt
speaker, _ = os.path.splitext(filename)
speaker, _ = os.path.splitext(speaker)
speaker_id, utterance_number = speaker.split(HASH_DIVIDER)
utterance_number = int(utterance_number)
# Load audio
waveform, sample_rate = torchaudio.load(filepath)
return waveform, sample_rate, label, speaker_id, utterance_number
class SPEECHCOMMANDS(Dataset):
"""Create a Dataset for Speech Commands.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from,
or the type of the dataset to dowload.
Allowed type values are ``"speech_commands_v0.01"`` and ``"speech_commands_v0.02"``
(default: ``"speech_commands_v0.02"``)
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"SpeechCommands"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
subset (str or None, optional):
Select a subset of the dataset [None, "training", "validation", "testing"]. None means
the whole dataset. "validation" and "testing" are defined in "validation_list.txt" and
"testing_list.txt", respectively, and "training" is the rest. Details for the files
"validation_list.txt" and "testing_list.txt" are explained in the README of the dataset
and in the introduction of Section 7 of the original paper and its reference 12. The
original paper can be found `here <https://arxiv.org/pdf/1804.03209.pdf>`_. (Default: ``None``)
"""
def __init__(self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
subset: Optional[str] = None,
) -> None:
assert subset is None or subset in ["training", "validation", "testing"], (
"When `subset` not None, it must take a value from "
+ "{'training', 'validation', 'testing'}."
)
if url in [
"speech_commands_v0.01",
"speech_commands_v0.02",
]:
base_url = "https://storage.googleapis.com/download.tensorflow.org/data/"
ext_archive = ".tar.gz"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
archive = os.path.join(root, basename)
basename = basename.rsplit(".", 2)[0]
folder_in_archive = os.path.join(folder_in_archive, basename)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive, self._path)
if subset == "validation":
self._walker = _load_list(self._path, "validation_list.txt")
elif subset == "testing":
self._walker = _load_list(self._path, "testing_list.txt")
elif subset == "training":
excludes = set(_load_list(self._path, "validation_list.txt", "testing_list.txt"))
walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav'))
self._walker = [
w for w in walker
if HASH_DIVIDER in w
and EXCEPT_FOLDER not in w
and os.path.normpath(w) not in excludes
]
else:
walker = sorted(str(p) for p in Path(self._path).glob('*/*.wav'))
self._walker = [w for w in walker if HASH_DIVIDER in w and EXCEPT_FOLDER not in w]
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str, int):
``(waveform, sample_rate, label, speaker_id, utterance_number)``
"""
fileid = self._walker[n]
return load_speechcommands_item(fileid, self._path)
def __len__(self) -> int:
return len(self._walker)
|
from pathlib import Path
from typing import Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
from torchaudio.datasets.utils import (
download_url,
extract_archive,
validate_file,
)
_URL = "https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"
_CHECKSUM = "29e93debeb0e779986542229a81ff29b"
_SUPPORTED_SUBSETS = {"train", "test"}
class DR_VCTK(Dataset):
"""Create a dataset for Device Recorded VCTK (Small subset version).
Args:
root (str or Path): Root directory where the dataset's top level directory is found.
subset (str): The subset to use. Can be one of ``"train"`` and ``"test"``. (default: ``"train"``).
download (bool):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str): The URL to download the dataset from.
(default: ``"https://datashare.ed.ac.uk/bitstream/handle/10283/3038/DR-VCTK.zip"``)
"""
def __init__(
self,
root: Union[str, Path],
subset: str = "train",
*,
download: bool = False,
url: str = _URL,
) -> None:
if subset not in _SUPPORTED_SUBSETS:
raise RuntimeError(
f"The subset '{subset}' does not match any of the supported subsets: {_SUPPORTED_SUBSETS}"
)
root = Path(root).expanduser()
archive = root / "DR-VCTK.zip"
self._subset = subset
self._path = root / "DR-VCTK" / "DR-VCTK"
self._clean_audio_dir = self._path / f"clean_{self._subset}set_wav_16k"
self._noisy_audio_dir = self._path / f"device-recorded_{self._subset}set_wav_16k"
self._config_filepath = self._path / "configurations" / f"{self._subset}_ch_log.txt"
if not self._path.is_dir():
if not archive.is_file():
if not download:
raise RuntimeError("Dataset not found. Please use `download=True` to download it.")
download_url(url, root)
self._validate_checksum(archive)
extract_archive(archive, root)
self._config = self._load_config(self._config_filepath)
self._filename_list = sorted(self._config)
def _validate_checksum(self, archive):
with open(archive, "rb") as file_obj:
if not validate_file(file_obj, _CHECKSUM, "md5"):
raise RuntimeError(
f"The hash of {str(archive)} does not match. Delete the file manually and retry."
)
def _load_config(self, filepath: str) -> Dict[str, Tuple[str, int]]:
# Skip header
skip_rows = 2 if self._subset == "train" else 1
config = {}
with open(filepath) as f:
for i, line in enumerate(f):
if i < skip_rows or not line:
continue
filename, source, channel_id = line.strip().split("\t")
config[filename] = (source, int(channel_id))
return config
def _load_dr_vctk_item(self, filename: str) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
speaker_id, utterance_id = filename.split(".")[0].split("_")
source, channel_id = self._config[filename]
file_clean_audio = self._clean_audio_dir / filename
file_noisy_audio = self._noisy_audio_dir / filename
waveform_clean, sample_rate_clean = torchaudio.load(file_clean_audio)
waveform_noisy, sample_rate_noisy = torchaudio.load(file_noisy_audio)
return (
waveform_clean,
sample_rate_clean,
waveform_noisy,
sample_rate_noisy,
speaker_id,
utterance_id,
source,
channel_id,
)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Tensor, int, str, str, str, int]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Tensor, int, str, str, str, int):
``(waveform_clean, sample_rate_clean, waveform_noisy, sample_rate_noisy, speaker_id,\
utterance_id, source, channel_id)``
"""
filename = self._filename_list[n]
return self._load_dr_vctk_item(filename)
def __len__(self) -> int:
return len(self._filename_list)
|
import os
import re
from pathlib import Path
from typing import Iterable, Tuple, Union, List
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url
_CHECKSUMS = {
"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b":
"825f4ebd9183f2417df9f067a9cabe86",
"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols":
"385e490aabc71b48e772118e3d02923e",
}
_PUNCTUATIONS = set([
"!EXCLAMATION-POINT",
"\"CLOSE-QUOTE",
"\"DOUBLE-QUOTE",
"\"END-OF-QUOTE",
"\"END-QUOTE",
"\"IN-QUOTES",
"\"QUOTE",
"\"UNQUOTE",
"#HASH-MARK",
"#POUND-SIGN",
"#SHARP-SIGN",
"%PERCENT",
"&ERSAND",
"'END-INNER-QUOTE",
"'END-QUOTE",
"'INNER-QUOTE",
"'QUOTE",
"'SINGLE-QUOTE",
"(BEGIN-PARENS",
"(IN-PARENTHESES",
"(LEFT-PAREN",
"(OPEN-PARENTHESES",
"(PAREN",
"(PARENS",
"(PARENTHESES",
")CLOSE-PAREN",
")CLOSE-PARENTHESES",
")END-PAREN",
")END-PARENS",
")END-PARENTHESES",
")END-THE-PAREN",
")PAREN",
")PARENS",
")RIGHT-PAREN",
")UN-PARENTHESES",
"+PLUS",
",COMMA",
"--DASH",
"-DASH",
"-HYPHEN",
"...ELLIPSIS",
".DECIMAL",
".DOT",
".FULL-STOP",
".PERIOD",
".POINT",
"/SLASH",
":COLON",
";SEMI-COLON",
";SEMI-COLON(1)",
"?QUESTION-MARK",
"{BRACE",
"{LEFT-BRACE",
"{OPEN-BRACE",
"}CLOSE-BRACE",
"}RIGHT-BRACE",
])
def _parse_dictionary(lines: Iterable[str], exclude_punctuations: bool) -> List[str]:
_alt_re = re.compile(r'\([0-9]+\)')
cmudict: List[Tuple[str, List[str]]] = list()
for line in lines:
if not line or line.startswith(';;;'): # ignore comments
continue
word, phones = line.strip().split(' ')
if word in _PUNCTUATIONS:
if exclude_punctuations:
continue
# !EXCLAMATION-POINT -> !
# --DASH -> --
# ...ELLIPSIS -> ...
if word.startswith("..."):
word = "..."
elif word.startswith("--"):
word = "--"
else:
word = word[0]
# if a word have multiple pronunciations, there will be (number) appended to it
# for example, DATAPOINTS and DATAPOINTS(1),
# the regular expression `_alt_re` removes the '(1)' and change the word DATAPOINTS(1) to DATAPOINTS
word = re.sub(_alt_re, '', word)
phones = phones.split(" ")
cmudict.append((word, phones))
return cmudict
class CMUDict(Dataset):
"""Create a Dataset for CMU Pronouncing Dictionary (CMUDict).
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
exclude_punctuations (bool, optional):
When enabled, exclude the pronounciation of punctuations, such as
`!EXCLAMATION-POINT` and `#HASH-MARK`.
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
url (str, optional):
The URL to download the dictionary from.
(default: ``"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b"``)
url_symbols (str, optional):
The URL to download the list of symbols from.
(default: ``"http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols"``)
"""
def __init__(self,
root: Union[str, Path],
exclude_punctuations: bool = True,
*,
download: bool = False,
url: str = "http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b",
url_symbols: str = "http://svn.code.sf.net/p/cmusphinx/code/trunk/cmudict/cmudict-0.7b.symbols",
) -> None:
self.exclude_punctuations = exclude_punctuations
self._root_path = Path(root)
if not os.path.isdir(self._root_path):
raise RuntimeError(f'The root directory does not exist; {root}')
dict_file = self._root_path / os.path.basename(url)
symbol_file = self._root_path / os.path.basename(url_symbols)
if not os.path.exists(dict_file):
if not download:
raise RuntimeError(
'The dictionary file is not found in the following location. '
f'Set `download=True` to download it. {dict_file}')
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
if not os.path.exists(symbol_file):
if not download:
raise RuntimeError(
'The symbol file is not found in the following location. '
f'Set `download=True` to download it. {symbol_file}')
checksum = _CHECKSUMS.get(url_symbols, None)
download_url(url_symbols, root, hash_value=checksum, hash_type="md5")
with open(symbol_file, "r") as text:
self._symbols = [line.strip() for line in text.readlines()]
with open(dict_file, "r", encoding='latin-1') as text:
self._dictionary = _parse_dictionary(
text.readlines(), exclude_punctuations=self.exclude_punctuations)
def __getitem__(self, n: int) -> Tuple[str, List[str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded.
Returns:
(str, List[str]): The corresponding word and phonemes ``(word, [phonemes])``.
"""
return self._dictionary[n]
def __len__(self) -> int:
return len(self._dictionary)
@property
def symbols(self) -> List[str]:
"""list[str]: A list of phonemes symbols, such as `AA`, `AE`, `AH`.
"""
return self._symbols.copy()
|
import os
from pathlib import Path
from typing import Tuple, Optional, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
# The following lists prefixed with `filtered_` provide a filtered split
# that:
#
# a. Mitigate a known issue with GTZAN (duplication)
#
# b. Provide a standard split for testing it against other
# methods (e.g. the one in jordipons/sklearn-audio-transfer-learning).
#
# Those are used when GTZAN is initialised with the `filtered` keyword.
# The split was taken from (github) jordipons/sklearn-audio-transfer-learning.
gtzan_genres = [
"blues",
"classical",
"country",
"disco",
"hiphop",
"jazz",
"metal",
"pop",
"reggae",
"rock",
]
filtered_test = [
"blues.00012",
"blues.00013",
"blues.00014",
"blues.00015",
"blues.00016",
"blues.00017",
"blues.00018",
"blues.00019",
"blues.00020",
"blues.00021",
"blues.00022",
"blues.00023",
"blues.00024",
"blues.00025",
"blues.00026",
"blues.00027",
"blues.00028",
"blues.00061",
"blues.00062",
"blues.00063",
"blues.00064",
"blues.00065",
"blues.00066",
"blues.00067",
"blues.00068",
"blues.00069",
"blues.00070",
"blues.00071",
"blues.00072",
"blues.00098",
"blues.00099",
"classical.00011",
"classical.00012",
"classical.00013",
"classical.00014",
"classical.00015",
"classical.00016",
"classical.00017",
"classical.00018",
"classical.00019",
"classical.00020",
"classical.00021",
"classical.00022",
"classical.00023",
"classical.00024",
"classical.00025",
"classical.00026",
"classical.00027",
"classical.00028",
"classical.00029",
"classical.00034",
"classical.00035",
"classical.00036",
"classical.00037",
"classical.00038",
"classical.00039",
"classical.00040",
"classical.00041",
"classical.00049",
"classical.00077",
"classical.00078",
"classical.00079",
"country.00030",
"country.00031",
"country.00032",
"country.00033",
"country.00034",
"country.00035",
"country.00036",
"country.00037",
"country.00038",
"country.00039",
"country.00040",
"country.00043",
"country.00044",
"country.00046",
"country.00047",
"country.00048",
"country.00050",
"country.00051",
"country.00053",
"country.00054",
"country.00055",
"country.00056",
"country.00057",
"country.00058",
"country.00059",
"country.00060",
"country.00061",
"country.00062",
"country.00063",
"country.00064",
"disco.00001",
"disco.00021",
"disco.00058",
"disco.00062",
"disco.00063",
"disco.00064",
"disco.00065",
"disco.00066",
"disco.00069",
"disco.00076",
"disco.00077",
"disco.00078",
"disco.00079",
"disco.00080",
"disco.00081",
"disco.00082",
"disco.00083",
"disco.00084",
"disco.00085",
"disco.00086",
"disco.00087",
"disco.00088",
"disco.00091",
"disco.00092",
"disco.00093",
"disco.00094",
"disco.00096",
"disco.00097",
"disco.00099",
"hiphop.00000",
"hiphop.00026",
"hiphop.00027",
"hiphop.00030",
"hiphop.00040",
"hiphop.00043",
"hiphop.00044",
"hiphop.00045",
"hiphop.00051",
"hiphop.00052",
"hiphop.00053",
"hiphop.00054",
"hiphop.00062",
"hiphop.00063",
"hiphop.00064",
"hiphop.00065",
"hiphop.00066",
"hiphop.00067",
"hiphop.00068",
"hiphop.00069",
"hiphop.00070",
"hiphop.00071",
"hiphop.00072",
"hiphop.00073",
"hiphop.00074",
"hiphop.00075",
"hiphop.00099",
"jazz.00073",
"jazz.00074",
"jazz.00075",
"jazz.00076",
"jazz.00077",
"jazz.00078",
"jazz.00079",
"jazz.00080",
"jazz.00081",
"jazz.00082",
"jazz.00083",
"jazz.00084",
"jazz.00085",
"jazz.00086",
"jazz.00087",
"jazz.00088",
"jazz.00089",
"jazz.00090",
"jazz.00091",
"jazz.00092",
"jazz.00093",
"jazz.00094",
"jazz.00095",
"jazz.00096",
"jazz.00097",
"jazz.00098",
"jazz.00099",
"metal.00012",
"metal.00013",
"metal.00014",
"metal.00015",
"metal.00022",
"metal.00023",
"metal.00025",
"metal.00026",
"metal.00027",
"metal.00028",
"metal.00029",
"metal.00030",
"metal.00031",
"metal.00032",
"metal.00033",
"metal.00038",
"metal.00039",
"metal.00067",
"metal.00070",
"metal.00073",
"metal.00074",
"metal.00075",
"metal.00078",
"metal.00083",
"metal.00085",
"metal.00087",
"metal.00088",
"pop.00000",
"pop.00001",
"pop.00013",
"pop.00014",
"pop.00043",
"pop.00063",
"pop.00064",
"pop.00065",
"pop.00066",
"pop.00069",
"pop.00070",
"pop.00071",
"pop.00072",
"pop.00073",
"pop.00074",
"pop.00075",
"pop.00076",
"pop.00077",
"pop.00078",
"pop.00079",
"pop.00082",
"pop.00088",
"pop.00089",
"pop.00090",
"pop.00091",
"pop.00092",
"pop.00093",
"pop.00094",
"pop.00095",
"pop.00096",
"reggae.00034",
"reggae.00035",
"reggae.00036",
"reggae.00037",
"reggae.00038",
"reggae.00039",
"reggae.00040",
"reggae.00046",
"reggae.00047",
"reggae.00048",
"reggae.00052",
"reggae.00053",
"reggae.00064",
"reggae.00065",
"reggae.00066",
"reggae.00067",
"reggae.00068",
"reggae.00071",
"reggae.00079",
"reggae.00082",
"reggae.00083",
"reggae.00084",
"reggae.00087",
"reggae.00088",
"reggae.00089",
"reggae.00090",
"rock.00010",
"rock.00011",
"rock.00012",
"rock.00013",
"rock.00014",
"rock.00015",
"rock.00027",
"rock.00028",
"rock.00029",
"rock.00030",
"rock.00031",
"rock.00032",
"rock.00033",
"rock.00034",
"rock.00035",
"rock.00036",
"rock.00037",
"rock.00039",
"rock.00040",
"rock.00041",
"rock.00042",
"rock.00043",
"rock.00044",
"rock.00045",
"rock.00046",
"rock.00047",
"rock.00048",
"rock.00086",
"rock.00087",
"rock.00088",
"rock.00089",
"rock.00090",
]
filtered_train = [
"blues.00029",
"blues.00030",
"blues.00031",
"blues.00032",
"blues.00033",
"blues.00034",
"blues.00035",
"blues.00036",
"blues.00037",
"blues.00038",
"blues.00039",
"blues.00040",
"blues.00041",
"blues.00042",
"blues.00043",
"blues.00044",
"blues.00045",
"blues.00046",
"blues.00047",
"blues.00048",
"blues.00049",
"blues.00073",
"blues.00074",
"blues.00075",
"blues.00076",
"blues.00077",
"blues.00078",
"blues.00079",
"blues.00080",
"blues.00081",
"blues.00082",
"blues.00083",
"blues.00084",
"blues.00085",
"blues.00086",
"blues.00087",
"blues.00088",
"blues.00089",
"blues.00090",
"blues.00091",
"blues.00092",
"blues.00093",
"blues.00094",
"blues.00095",
"blues.00096",
"blues.00097",
"classical.00030",
"classical.00031",
"classical.00032",
"classical.00033",
"classical.00043",
"classical.00044",
"classical.00045",
"classical.00046",
"classical.00047",
"classical.00048",
"classical.00050",
"classical.00051",
"classical.00052",
"classical.00053",
"classical.00054",
"classical.00055",
"classical.00056",
"classical.00057",
"classical.00058",
"classical.00059",
"classical.00060",
"classical.00061",
"classical.00062",
"classical.00063",
"classical.00064",
"classical.00065",
"classical.00066",
"classical.00067",
"classical.00080",
"classical.00081",
"classical.00082",
"classical.00083",
"classical.00084",
"classical.00085",
"classical.00086",
"classical.00087",
"classical.00088",
"classical.00089",
"classical.00090",
"classical.00091",
"classical.00092",
"classical.00093",
"classical.00094",
"classical.00095",
"classical.00096",
"classical.00097",
"classical.00098",
"classical.00099",
"country.00019",
"country.00020",
"country.00021",
"country.00022",
"country.00023",
"country.00024",
"country.00025",
"country.00026",
"country.00028",
"country.00029",
"country.00065",
"country.00066",
"country.00067",
"country.00068",
"country.00069",
"country.00070",
"country.00071",
"country.00072",
"country.00073",
"country.00074",
"country.00075",
"country.00076",
"country.00077",
"country.00078",
"country.00079",
"country.00080",
"country.00081",
"country.00082",
"country.00083",
"country.00084",
"country.00085",
"country.00086",
"country.00087",
"country.00088",
"country.00089",
"country.00090",
"country.00091",
"country.00092",
"country.00093",
"country.00094",
"country.00095",
"country.00096",
"country.00097",
"country.00098",
"country.00099",
"disco.00005",
"disco.00015",
"disco.00016",
"disco.00017",
"disco.00018",
"disco.00019",
"disco.00020",
"disco.00022",
"disco.00023",
"disco.00024",
"disco.00025",
"disco.00026",
"disco.00027",
"disco.00028",
"disco.00029",
"disco.00030",
"disco.00031",
"disco.00032",
"disco.00033",
"disco.00034",
"disco.00035",
"disco.00036",
"disco.00037",
"disco.00039",
"disco.00040",
"disco.00041",
"disco.00042",
"disco.00043",
"disco.00044",
"disco.00045",
"disco.00047",
"disco.00049",
"disco.00053",
"disco.00054",
"disco.00056",
"disco.00057",
"disco.00059",
"disco.00061",
"disco.00070",
"disco.00073",
"disco.00074",
"disco.00089",
"hiphop.00002",
"hiphop.00003",
"hiphop.00004",
"hiphop.00005",
"hiphop.00006",
"hiphop.00007",
"hiphop.00008",
"hiphop.00009",
"hiphop.00010",
"hiphop.00011",
"hiphop.00012",
"hiphop.00013",
"hiphop.00014",
"hiphop.00015",
"hiphop.00016",
"hiphop.00017",
"hiphop.00018",
"hiphop.00019",
"hiphop.00020",
"hiphop.00021",
"hiphop.00022",
"hiphop.00023",
"hiphop.00024",
"hiphop.00025",
"hiphop.00028",
"hiphop.00029",
"hiphop.00031",
"hiphop.00032",
"hiphop.00033",
"hiphop.00034",
"hiphop.00035",
"hiphop.00036",
"hiphop.00037",
"hiphop.00038",
"hiphop.00041",
"hiphop.00042",
"hiphop.00055",
"hiphop.00056",
"hiphop.00057",
"hiphop.00058",
"hiphop.00059",
"hiphop.00060",
"hiphop.00061",
"hiphop.00077",
"hiphop.00078",
"hiphop.00079",
"hiphop.00080",
"jazz.00000",
"jazz.00001",
"jazz.00011",
"jazz.00012",
"jazz.00013",
"jazz.00014",
"jazz.00015",
"jazz.00016",
"jazz.00017",
"jazz.00018",
"jazz.00019",
"jazz.00020",
"jazz.00021",
"jazz.00022",
"jazz.00023",
"jazz.00024",
"jazz.00041",
"jazz.00047",
"jazz.00048",
"jazz.00049",
"jazz.00050",
"jazz.00051",
"jazz.00052",
"jazz.00053",
"jazz.00054",
"jazz.00055",
"jazz.00056",
"jazz.00057",
"jazz.00058",
"jazz.00059",
"jazz.00060",
"jazz.00061",
"jazz.00062",
"jazz.00063",
"jazz.00064",
"jazz.00065",
"jazz.00066",
"jazz.00067",
"jazz.00068",
"jazz.00069",
"jazz.00070",
"jazz.00071",
"jazz.00072",
"metal.00002",
"metal.00003",
"metal.00005",
"metal.00021",
"metal.00024",
"metal.00035",
"metal.00046",
"metal.00047",
"metal.00048",
"metal.00049",
"metal.00050",
"metal.00051",
"metal.00052",
"metal.00053",
"metal.00054",
"metal.00055",
"metal.00056",
"metal.00057",
"metal.00059",
"metal.00060",
"metal.00061",
"metal.00062",
"metal.00063",
"metal.00064",
"metal.00065",
"metal.00066",
"metal.00069",
"metal.00071",
"metal.00072",
"metal.00079",
"metal.00080",
"metal.00084",
"metal.00086",
"metal.00089",
"metal.00090",
"metal.00091",
"metal.00092",
"metal.00093",
"metal.00094",
"metal.00095",
"metal.00096",
"metal.00097",
"metal.00098",
"metal.00099",
"pop.00002",
"pop.00003",
"pop.00004",
"pop.00005",
"pop.00006",
"pop.00007",
"pop.00008",
"pop.00009",
"pop.00011",
"pop.00012",
"pop.00016",
"pop.00017",
"pop.00018",
"pop.00019",
"pop.00020",
"pop.00023",
"pop.00024",
"pop.00025",
"pop.00026",
"pop.00027",
"pop.00028",
"pop.00029",
"pop.00031",
"pop.00032",
"pop.00033",
"pop.00034",
"pop.00035",
"pop.00036",
"pop.00038",
"pop.00039",
"pop.00040",
"pop.00041",
"pop.00042",
"pop.00044",
"pop.00046",
"pop.00049",
"pop.00050",
"pop.00080",
"pop.00097",
"pop.00098",
"pop.00099",
"reggae.00000",
"reggae.00001",
"reggae.00002",
"reggae.00004",
"reggae.00006",
"reggae.00009",
"reggae.00011",
"reggae.00012",
"reggae.00014",
"reggae.00015",
"reggae.00016",
"reggae.00017",
"reggae.00018",
"reggae.00019",
"reggae.00020",
"reggae.00021",
"reggae.00022",
"reggae.00023",
"reggae.00024",
"reggae.00025",
"reggae.00026",
"reggae.00027",
"reggae.00028",
"reggae.00029",
"reggae.00030",
"reggae.00031",
"reggae.00032",
"reggae.00042",
"reggae.00043",
"reggae.00044",
"reggae.00045",
"reggae.00049",
"reggae.00050",
"reggae.00051",
"reggae.00054",
"reggae.00055",
"reggae.00056",
"reggae.00057",
"reggae.00058",
"reggae.00059",
"reggae.00060",
"reggae.00063",
"reggae.00069",
"rock.00000",
"rock.00001",
"rock.00002",
"rock.00003",
"rock.00004",
"rock.00005",
"rock.00006",
"rock.00007",
"rock.00008",
"rock.00009",
"rock.00016",
"rock.00017",
"rock.00018",
"rock.00019",
"rock.00020",
"rock.00021",
"rock.00022",
"rock.00023",
"rock.00024",
"rock.00025",
"rock.00026",
"rock.00057",
"rock.00058",
"rock.00059",
"rock.00060",
"rock.00061",
"rock.00062",
"rock.00063",
"rock.00064",
"rock.00065",
"rock.00066",
"rock.00067",
"rock.00068",
"rock.00069",
"rock.00070",
"rock.00091",
"rock.00092",
"rock.00093",
"rock.00094",
"rock.00095",
"rock.00096",
"rock.00097",
"rock.00098",
"rock.00099",
]
filtered_valid = [
"blues.00000",
"blues.00001",
"blues.00002",
"blues.00003",
"blues.00004",
"blues.00005",
"blues.00006",
"blues.00007",
"blues.00008",
"blues.00009",
"blues.00010",
"blues.00011",
"blues.00050",
"blues.00051",
"blues.00052",
"blues.00053",
"blues.00054",
"blues.00055",
"blues.00056",
"blues.00057",
"blues.00058",
"blues.00059",
"blues.00060",
"classical.00000",
"classical.00001",
"classical.00002",
"classical.00003",
"classical.00004",
"classical.00005",
"classical.00006",
"classical.00007",
"classical.00008",
"classical.00009",
"classical.00010",
"classical.00068",
"classical.00069",
"classical.00070",
"classical.00071",
"classical.00072",
"classical.00073",
"classical.00074",
"classical.00075",
"classical.00076",
"country.00000",
"country.00001",
"country.00002",
"country.00003",
"country.00004",
"country.00005",
"country.00006",
"country.00007",
"country.00009",
"country.00010",
"country.00011",
"country.00012",
"country.00013",
"country.00014",
"country.00015",
"country.00016",
"country.00017",
"country.00018",
"country.00027",
"country.00041",
"country.00042",
"country.00045",
"country.00049",
"disco.00000",
"disco.00002",
"disco.00003",
"disco.00004",
"disco.00006",
"disco.00007",
"disco.00008",
"disco.00009",
"disco.00010",
"disco.00011",
"disco.00012",
"disco.00013",
"disco.00014",
"disco.00046",
"disco.00048",
"disco.00052",
"disco.00067",
"disco.00068",
"disco.00072",
"disco.00075",
"disco.00090",
"disco.00095",
"hiphop.00081",
"hiphop.00082",
"hiphop.00083",
"hiphop.00084",
"hiphop.00085",
"hiphop.00086",
"hiphop.00087",
"hiphop.00088",
"hiphop.00089",
"hiphop.00090",
"hiphop.00091",
"hiphop.00092",
"hiphop.00093",
"hiphop.00094",
"hiphop.00095",
"hiphop.00096",
"hiphop.00097",
"hiphop.00098",
"jazz.00002",
"jazz.00003",
"jazz.00004",
"jazz.00005",
"jazz.00006",
"jazz.00007",
"jazz.00008",
"jazz.00009",
"jazz.00010",
"jazz.00025",
"jazz.00026",
"jazz.00027",
"jazz.00028",
"jazz.00029",
"jazz.00030",
"jazz.00031",
"jazz.00032",
"metal.00000",
"metal.00001",
"metal.00006",
"metal.00007",
"metal.00008",
"metal.00009",
"metal.00010",
"metal.00011",
"metal.00016",
"metal.00017",
"metal.00018",
"metal.00019",
"metal.00020",
"metal.00036",
"metal.00037",
"metal.00068",
"metal.00076",
"metal.00077",
"metal.00081",
"metal.00082",
"pop.00010",
"pop.00053",
"pop.00055",
"pop.00058",
"pop.00059",
"pop.00060",
"pop.00061",
"pop.00062",
"pop.00081",
"pop.00083",
"pop.00084",
"pop.00085",
"pop.00086",
"reggae.00061",
"reggae.00062",
"reggae.00070",
"reggae.00072",
"reggae.00074",
"reggae.00076",
"reggae.00077",
"reggae.00078",
"reggae.00085",
"reggae.00092",
"reggae.00093",
"reggae.00094",
"reggae.00095",
"reggae.00096",
"reggae.00097",
"reggae.00098",
"reggae.00099",
"rock.00038",
"rock.00049",
"rock.00050",
"rock.00051",
"rock.00052",
"rock.00053",
"rock.00054",
"rock.00055",
"rock.00056",
"rock.00071",
"rock.00072",
"rock.00073",
"rock.00074",
"rock.00075",
"rock.00076",
"rock.00077",
"rock.00078",
"rock.00079",
"rock.00080",
"rock.00081",
"rock.00082",
"rock.00083",
"rock.00084",
"rock.00085",
]
URL = "http://opihi.cs.uvic.ca/sound/genres.tar.gz"
FOLDER_IN_ARCHIVE = "genres"
_CHECKSUMS = {
"http://opihi.cs.uvic.ca/sound/genres.tar.gz": "5b3d6dddb579ab49814ab86dba69e7c7"
}
def load_gtzan_item(fileid: str, path: str, ext_audio: str) -> Tuple[Tensor, str]:
"""
Loads a file from the dataset and returns the raw waveform
as a Torch Tensor, its sample rate as an integer, and its
genre as a string.
"""
# Filenames are of the form label.id, e.g. blues.00078
label, _ = fileid.split(".")
# Read wav
file_audio = os.path.join(path, label, fileid + ext_audio)
waveform, sample_rate = torchaudio.load(file_audio)
return waveform, sample_rate, label
class GTZAN(Dataset):
"""Create a Dataset for GTZAN.
Note:
Please see http://marsyas.info/downloads/datasets.html if you are planning to use
this dataset to publish results.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional): The URL to download the dataset from.
(default: ``"http://opihi.cs.uvic.ca/sound/genres.tar.gz"``)
folder_in_archive (str, optional): The top-level directory of the dataset.
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
subset (str or None, optional): Which subset of the dataset to use.
One of ``"training"``, ``"validation"``, ``"testing"`` or ``None``.
If ``None``, the entire dataset is used. (default: ``None``).
"""
_ext_audio = ".wav"
def __init__(
self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False,
subset: Optional[str] = None,
) -> None:
# super(GTZAN, self).__init__()
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
self.root = root
self.url = url
self.folder_in_archive = folder_in_archive
self.download = download
self.subset = subset
assert subset is None or subset in ["training", "validation", "testing"], (
"When `subset` not None, it must take a value from "
+ "{'training', 'validation', 'testing'}."
)
archive = os.path.basename(url)
archive = os.path.join(root, archive)
self._path = os.path.join(root, folder_in_archive)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive)
if not os.path.isdir(self._path):
raise RuntimeError(
"Dataset not found. Please use `download=True` to download it."
)
if self.subset is None:
# Check every subdirectory under dataset root
# which has the same name as the genres in
# GTZAN (e.g. `root_dir'/blues/, `root_dir'/rock, etc.)
# This lets users remove or move around song files,
# useful when e.g. they want to use only some of the files
# in a genre or want to label other files with a different
# genre.
self._walker = []
root = os.path.expanduser(self._path)
for directory in gtzan_genres:
fulldir = os.path.join(root, directory)
if not os.path.exists(fulldir):
continue
songs_in_genre = os.listdir(fulldir)
songs_in_genre.sort()
for fname in songs_in_genre:
name, ext = os.path.splitext(fname)
if ext.lower() == ".wav" and "." in name:
# Check whether the file is of the form
# `gtzan_genre`.`5 digit number`.wav
genre, num = name.split(".")
if genre in gtzan_genres and len(num) == 5 and num.isdigit():
self._walker.append(name)
else:
if self.subset == "training":
self._walker = filtered_train
elif self.subset == "validation":
self._walker = filtered_valid
elif self.subset == "testing":
self._walker = filtered_test
def __getitem__(self, n: int) -> Tuple[Tensor, int, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str): ``(waveform, sample_rate, label)``
"""
fileid = self._walker[n]
item = load_gtzan_item(fileid, self._path, self._ext_audio)
waveform, sample_rate, label = item
return waveform, sample_rate, label
def __len__(self) -> int:
return len(self._walker)
|
import os
import csv
from pathlib import Path
from typing import Tuple, Union
import torchaudio
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import (
download_url,
extract_archive,
)
URL = "aew"
FOLDER_IN_ARCHIVE = "ARCTIC"
_CHECKSUMS = {
"http://festvox.org/cmu_arctic/packed/cmu_us_aew_arctic.tar.bz2":
"4382b116efcc8339c37e01253cb56295",
"http://festvox.org/cmu_arctic/packed/cmu_us_ahw_arctic.tar.bz2":
"b072d6e961e3f36a2473042d097d6da9",
"http://festvox.org/cmu_arctic/packed/cmu_us_aup_arctic.tar.bz2":
"5301c7aee8919d2abd632e2667adfa7f",
"http://festvox.org/cmu_arctic/packed/cmu_us_awb_arctic.tar.bz2":
"280fdff1e9857119d9a2c57b50e12db7",
"http://festvox.org/cmu_arctic/packed/cmu_us_axb_arctic.tar.bz2":
"5e21cb26c6529c533df1d02ccde5a186",
"http://festvox.org/cmu_arctic/packed/cmu_us_bdl_arctic.tar.bz2":
"b2c3e558f656af2e0a65da0ac0c3377a",
"http://festvox.org/cmu_arctic/packed/cmu_us_clb_arctic.tar.bz2":
"3957c503748e3ce17a3b73c1b9861fb0",
"http://festvox.org/cmu_arctic/packed/cmu_us_eey_arctic.tar.bz2":
"59708e932d27664f9eda3e8e6859969b",
"http://festvox.org/cmu_arctic/packed/cmu_us_fem_arctic.tar.bz2":
"dba4f992ff023347c07c304bf72f4c73",
"http://festvox.org/cmu_arctic/packed/cmu_us_gka_arctic.tar.bz2":
"24a876ea7335c1b0ff21460e1241340f",
"http://festvox.org/cmu_arctic/packed/cmu_us_jmk_arctic.tar.bz2":
"afb69d95f02350537e8a28df5ab6004b",
"http://festvox.org/cmu_arctic/packed/cmu_us_ksp_arctic.tar.bz2":
"4ce5b3b91a0a54b6b685b1b05aa0b3be",
"http://festvox.org/cmu_arctic/packed/cmu_us_ljm_arctic.tar.bz2":
"6f45a3b2c86a4ed0465b353be291f77d",
"http://festvox.org/cmu_arctic/packed/cmu_us_lnh_arctic.tar.bz2":
"c6a15abad5c14d27f4ee856502f0232f",
"http://festvox.org/cmu_arctic/packed/cmu_us_rms_arctic.tar.bz2":
"71072c983df1e590d9e9519e2a621f6e",
"http://festvox.org/cmu_arctic/packed/cmu_us_rxr_arctic.tar.bz2":
"3771ff03a2f5b5c3b53aa0a68b9ad0d5",
"http://festvox.org/cmu_arctic/packed/cmu_us_slp_arctic.tar.bz2":
"9cbf984a832ea01b5058ba9a96862850",
"http://festvox.org/cmu_arctic/packed/cmu_us_slt_arctic.tar.bz2":
"959eecb2cbbc4ac304c6b92269380c81",
}
def load_cmuarctic_item(line: str,
path: str,
folder_audio: str,
ext_audio: str) -> Tuple[Tensor, int, str, str]:
utterance_id, transcript = line[0].strip().split(" ", 2)[1:]
# Remove space, double quote, and single parenthesis from transcript
transcript = transcript[1:-3]
file_audio = os.path.join(path, folder_audio, utterance_id + ext_audio)
# Load audio
waveform, sample_rate = torchaudio.load(file_audio)
return (
waveform,
sample_rate,
transcript,
utterance_id.split("_")[1]
)
class CMUARCTIC(Dataset):
"""Create a Dataset for CMU_ARCTIC.
Args:
root (str or Path): Path to the directory where the dataset is found or downloaded.
url (str, optional):
The URL to download the dataset from or the type of the dataset to dowload.
(default: ``"aew"``)
Allowed type values are ``"aew"``, ``"ahw"``, ``"aup"``, ``"awb"``, ``"axb"``, ``"bdl"``,
``"clb"``, ``"eey"``, ``"fem"``, ``"gka"``, ``"jmk"``, ``"ksp"``, ``"ljm"``, ``"lnh"``,
``"rms"``, ``"rxr"``, ``"slp"`` or ``"slt"``.
folder_in_archive (str, optional):
The top-level directory of the dataset. (default: ``"ARCTIC"``)
download (bool, optional):
Whether to download the dataset if it is not found at root path. (default: ``False``).
"""
_file_text = "txt.done.data"
_folder_text = "etc"
_ext_audio = ".wav"
_folder_audio = "wav"
def __init__(self,
root: Union[str, Path],
url: str = URL,
folder_in_archive: str = FOLDER_IN_ARCHIVE,
download: bool = False) -> None:
if url in [
"aew",
"ahw",
"aup",
"awb",
"axb",
"bdl",
"clb",
"eey",
"fem",
"gka",
"jmk",
"ksp",
"ljm",
"lnh",
"rms",
"rxr",
"slp",
"slt"
]:
url = "cmu_us_" + url + "_arctic"
ext_archive = ".tar.bz2"
base_url = "http://www.festvox.org/cmu_arctic/packed/"
url = os.path.join(base_url, url + ext_archive)
# Get string representation of 'root' in case Path object is passed
root = os.fspath(root)
basename = os.path.basename(url)
root = os.path.join(root, folder_in_archive)
if not os.path.isdir(root):
os.mkdir(root)
archive = os.path.join(root, basename)
basename = basename.split(".")[0]
self._path = os.path.join(root, basename)
if download:
if not os.path.isdir(self._path):
if not os.path.isfile(archive):
checksum = _CHECKSUMS.get(url, None)
download_url(url, root, hash_value=checksum, hash_type="md5")
extract_archive(archive)
self._text = os.path.join(self._path, self._folder_text, self._file_text)
with open(self._text, "r") as text:
walker = csv.reader(text, delimiter="\n")
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, str, str): ``(waveform, sample_rate, transcript, utterance_id)``
"""
line = self._walker[n]
return load_cmuarctic_item(line, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
import csv
import os
from pathlib import Path
from typing import List, Dict, Tuple, Union
from torch import Tensor
from torch.utils.data import Dataset
import torchaudio
def load_commonvoice_item(line: List[str],
header: List[str],
path: str,
folder_audio: str,
ext_audio: str) -> Tuple[Tensor, int, Dict[str, str]]:
# Each line as the following data:
# client_id, path, sentence, up_votes, down_votes, age, gender, accent
assert header[1] == "path"
fileid = line[1]
filename = os.path.join(path, folder_audio, fileid)
if not filename.endswith(ext_audio):
filename += ext_audio
waveform, sample_rate = torchaudio.load(filename)
dic = dict(zip(header, line))
return waveform, sample_rate, dic
class COMMONVOICE(Dataset):
"""Create a Dataset for CommonVoice.
Args:
root (str or Path): Path to the directory where the dataset is located.
(Where the ``tsv`` file is present.)
tsv (str, optional):
The name of the tsv file used to construct the metadata, such as
``"train.tsv"``, ``"test.tsv"``, ``"dev.tsv"``, ``"invalidated.tsv"``,
``"validated.tsv"`` and ``"other.tsv"``. (default: ``"train.tsv"``)
"""
_ext_txt = ".txt"
_ext_audio = ".mp3"
_folder_audio = "clips"
def __init__(self,
root: Union[str, Path],
tsv: str = "train.tsv") -> None:
# Get string representation of 'root' in case Path object is passed
self._path = os.fspath(root)
self._tsv = os.path.join(self._path, tsv)
with open(self._tsv, "r") as tsv_:
walker = csv.reader(tsv_, delimiter="\t")
self._header = next(walker)
self._walker = list(walker)
def __getitem__(self, n: int) -> Tuple[Tensor, int, Dict[str, str]]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
(Tensor, int, Dict[str, str]): ``(waveform, sample_rate, dictionary)``, where dictionary
is built from the TSV file with the following keys: ``client_id``, ``path``, ``sentence``,
``up_votes``, ``down_votes``, ``age``, ``gender`` and ``accent``.
"""
line = self._walker[n]
return load_commonvoice_item(line, self._header, self._path, self._folder_audio, self._ext_audio)
def __len__(self) -> int:
return len(self._walker)
|
from .commonvoice import COMMONVOICE
from .librispeech import LIBRISPEECH
from .speechcommands import SPEECHCOMMANDS
from .vctk import VCTK_092
from .dr_vctk import DR_VCTK
from .gtzan import GTZAN
from .yesno import YESNO
from .ljspeech import LJSPEECH
from .cmuarctic import CMUARCTIC
from .cmudict import CMUDict
from .librimix import LibriMix
from .libritts import LIBRITTS
from .tedlium import TEDLIUM
__all__ = [
"COMMONVOICE",
"LIBRISPEECH",
"SPEECHCOMMANDS",
"VCTK_092",
"DR_VCTK",
"YESNO",
"LJSPEECH",
"GTZAN",
"CMUARCTIC",
"CMUDict",
"LibriMix",
"LIBRITTS",
"TEDLIUM",
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.