python_code
stringlengths 0
229k
|
---|
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
from torchaudio_unittest.common_utils import TorchaudioTestCase, skipIfNoModule
if is_module_available("unidecode") and is_module_available("inflect"):
from pipeline_tacotron2.text.text_preprocessing import text_to_sequence
from pipeline_tacotron2.text.numbers import (
_remove_commas,
_expand_pounds,
_expand_dollars,
_expand_decimal_point,
_expand_ordinal,
_expand_number,
)
@skipIfNoModule("unidecode")
@skipIfNoModule("inflect")
class TestTextPreprocessor(TorchaudioTestCase):
@parameterized.expand(
[
["dr. Strange?", [15, 26, 14, 31, 26, 29, 11, 30, 31, 29, 12, 25, 18, 16, 10]],
["ML, is fun.", [24, 23, 6, 11, 20, 30, 11, 17, 32, 25, 7]],
["I love torchaudio!", [20, 11, 23, 26, 33, 16, 11, 31, 26, 29, 14, 19, 12, 32, 15, 20, 26, 2]],
# 'one thousand dollars, twenty cents'
["$1,000.20", [26, 25, 16, 11, 31, 19, 26, 32, 30, 12, 25, 15, 11, 15, 26, 23, 23,
12, 29, 30, 6, 11, 31, 34, 16, 25, 31, 36, 11, 14, 16, 25, 31, 30]],
]
)
def test_text_to_sequence(self, sent, seq):
assert (text_to_sequence(sent) == seq)
@parameterized.expand(
[
["He, she, and I have $1,000", "He, she, and I have $1000"],
]
)
def test_remove_commas(self, sent, truth):
assert (_remove_commas(sent) == truth)
@parameterized.expand(
[
["He, she, and I have £1000", "He, she, and I have 1000 pounds"],
]
)
def test_expand_pounds(self, sent, truth):
assert (_expand_pounds(sent) == truth)
@parameterized.expand(
[
["He, she, and I have $1000", "He, she, and I have 1000 dollars"],
["He, she, and I have $3000.01", "He, she, and I have 3000 dollars, 1 cent"],
["He has $500.20 and she has $1000.50.",
"He has 500 dollars, 20 cents and she has 1000 dollars, 50 cents."],
]
)
def test_expand_dollars(self, sent, truth):
assert (_expand_dollars(sent) == truth)
@parameterized.expand(
[
["1000.20", "1000 point 20"],
["1000.1", "1000 point 1"],
]
)
def test_expand_decimal_point(self, sent, truth):
assert (_expand_decimal_point(sent) == truth)
@parameterized.expand(
[
["21st centry", "twenty-first centry"],
["20th centry", "twentieth centry"],
["2nd place.", "second place."],
]
)
def test_expand_ordinal(self, sent, truth):
assert (_expand_ordinal(sent) == truth)
_expand_ordinal,
@parameterized.expand(
[
["100020 dollars.", "one hundred thousand twenty dollars."],
["1234567890!", "one billion, two hundred thirty-four million, "
"five hundred sixty-seven thousand, eight hundred ninety!"],
]
)
def test_expand_number(self, sent, truth):
assert (_expand_number(sent) == truth)
|
from torchaudio import sox_effects
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoSox,
get_wav_data,
get_sinusoid,
save_wav,
)
from .common import (
load_params,
)
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various effects
The purpose of this test suite is to verify that sox_effect functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
"""`apply_effects_tensor` should not crash"""
effects = args['effects']
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
original = get_sinusoid(
frequency=800, sample_rate=input_sr,
n_channels=num_channels, dtype='float32')
_found, _sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = 'int32'
channels_first = True
effects = args['effects']
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path('input.wav')
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
_found, _sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_fileobj(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = 'int32'
channels_first = True
effects = args['effects']
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
input_path = self.get_temp_path('input.wav')
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
with open(input_path, 'rb') as fileobj:
_found, _sr = sox_effects.apply_effects_file(
fileobj, effects, normalize=False, channels_first=channels_first)
|
import io
import itertools
from pathlib import Path
import tarfile
from parameterized import parameterized
from torchaudio import sox_effects
from torchaudio._internal import module_utils as _mod_utils
from torchaudio_unittest.common_utils import (
TempDirMixin,
HttpServerMixin,
PytorchTestCase,
skipIfNoSox,
skipIfNoModule,
skipIfNoExec,
get_asset_path,
get_sinusoid,
get_wav_data,
save_wav,
load_wav,
sox_utils,
)
from .common import (
load_params,
name_func,
)
if _mod_utils.is_module_available("requests"):
import requests
@skipIfNoSox
class TestSoxEffects(PytorchTestCase):
def test_init(self):
"""Calling init_sox_effects multiple times does not crush"""
for _ in range(3):
sox_effects.init_sox_effects()
@skipIfNoSox
class TestSoxEffectsTensor(TempDirMixin, PytorchTestCase):
"""Test suite for `apply_effects_tensor` function"""
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2, 4, 8],
[True, False]
)), name_func=name_func)
def test_apply_no_effect(self, dtype, sample_rate, num_channels, channels_first):
"""`apply_effects_tensor` without effects should return identical data as input"""
original = get_wav_data(dtype, num_channels, channels_first=channels_first)
expected = original.clone()
found, output_sample_rate = sox_effects.apply_effects_tensor(
expected, sample_rate, [], channels_first)
assert output_sample_rate == sample_rate
# SoxEffect should not alter the input Tensor object
self.assertEqual(original, expected)
# SoxEffect should not return the same Tensor object
assert expected is not found
# Returned Tensor should equal to the input Tensor
self.assertEqual(expected, found)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects(self, args):
"""`apply_effects_tensor` should return identical data as sox command"""
effects = args['effects']
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
output_sr = args.get("output_sample_rate")
input_path = self.get_temp_path('input.wav')
reference_path = self.get_temp_path('reference.wav')
original = get_sinusoid(
frequency=800, sample_rate=input_sr,
n_channels=num_channels, dtype='float32')
save_wav(input_path, original, input_sr)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_sample_rate=output_sr)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_tensor(original, input_sr, effects)
assert sr == expected_sr
self.assertEqual(expected, found)
@skipIfNoSox
class TestSoxEffectsFile(TempDirMixin, PytorchTestCase):
"""Test suite for `apply_effects_file` function"""
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2, 4, 8],
[False, True],
)), name_func=name_func)
def test_apply_no_effect(self, dtype, sample_rate, num_channels, channels_first):
"""`apply_effects_file` without effects should return identical data as input"""
path = self.get_temp_path('input.wav')
expected = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(path, expected, sample_rate, channels_first=channels_first)
found, output_sample_rate = sox_effects.apply_effects_file(
path, [], normalize=False, channels_first=channels_first)
assert output_sample_rate == sample_rate
self.assertEqual(expected, found)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_str(self, args):
"""`apply_effects_file` should return identical data as sox command"""
dtype = 'int32'
channels_first = True
effects = args['effects']
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
output_sr = args.get("output_sample_rate")
input_path = self.get_temp_path('input.wav')
reference_path = self.get_temp_path('reference.wav')
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_sample_rate=output_sr)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
def test_apply_effects_path(self):
"""`apply_effects_file` should return identical data as sox command when file path is given as a Path Object"""
dtype = 'int32'
channels_first = True
effects = [["hilbert"]]
num_channels = 2
input_sr = 8000
output_sr = 8000
input_path = self.get_temp_path('input.wav')
reference_path = self.get_temp_path('reference.wav')
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, input_sr, channels_first=channels_first)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_sample_rate=output_sr)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
Path(input_path), effects, normalize=False, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@skipIfNoSox
class TestFileFormats(TempDirMixin, PytorchTestCase):
"""`apply_effects_file` gives the same result as sox on various file formats"""
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=lambda f, _, p: f'{f.__name__}_{"_".join(str(arg) for arg in p.args)}')
def test_wav(self, dtype, sample_rate, num_channels):
"""`apply_effects_file` works on various wav format"""
channels_first = True
effects = [['band', '300', '10']]
input_path = self.get_temp_path('input.wav')
reference_path = self.get_temp_path('reference.wav')
data = get_wav_data(dtype, num_channels, channels_first=channels_first)
save_wav(input_path, data, sample_rate, channels_first=channels_first)
sox_utils.run_sox_effect(input_path, reference_path, effects)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
input_path, effects, normalize=False, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
)), name_func=lambda f, _, p: f'{f.__name__}_{"_".join(str(arg) for arg in p.args)}')
def test_mp3(self, sample_rate, num_channels):
"""`apply_effects_file` works on various mp3 format"""
channels_first = True
effects = [['band', '300', '10']]
input_path = self.get_temp_path('input.mp3')
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(input_path, sample_rate, num_channels)
sox_utils.run_sox_effect(input_path, reference_path, effects)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
input_path, effects, channels_first=channels_first)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected, atol=1e-4, rtol=1e-8)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
)), name_func=lambda f, _, p: f'{f.__name__}_{"_".join(str(arg) for arg in p.args)}')
def test_flac(self, sample_rate, num_channels):
"""`apply_effects_file` works on various flac format"""
channels_first = True
effects = [['band', '300', '10']]
input_path = self.get_temp_path('input.flac')
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(input_path, sample_rate, num_channels)
sox_utils.run_sox_effect(input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
input_path, effects, channels_first=channels_first)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
)), name_func=lambda f, _, p: f'{f.__name__}_{"_".join(str(arg) for arg in p.args)}')
def test_vorbis(self, sample_rate, num_channels):
"""`apply_effects_file` works on various vorbis format"""
channels_first = True
effects = [['band', '300', '10']]
input_path = self.get_temp_path('input.vorbis')
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(input_path, sample_rate, num_channels)
sox_utils.run_sox_effect(input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
found, sr = sox_effects.apply_effects_file(
input_path, effects, channels_first=channels_first)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@skipIfNoSox
class TestApplyEffectFileWithoutExtension(PytorchTestCase):
def test_mp3(self):
"""Providing format allows to read mp3 without extension
libsox does not check header for mp3
https://github.com/pytorch/audio/issues/1040
The file was generated with the following command
ffmpeg -f lavfi -i "sine=frequency=1000:duration=5" -ar 16000 -f mp3 test_noext
"""
effects = [['band', '300', '10']]
path = get_asset_path("mp3_without_ext")
_, sr = sox_effects.apply_effects_file(path, effects, format="mp3")
assert sr == 16000
@skipIfNoExec('sox')
@skipIfNoSox
class TestFileObject(TempDirMixin, PytorchTestCase):
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_fileobj(self, ext, compression):
"""Applying effects via file object works"""
sample_rate = 16000
channels_first = True
effects = [['band', '300', '10']]
format_ = ext if ext in ['mp3'] else None
input_path = self.get_temp_path(f'input.{ext}')
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(
input_path, sample_rate, num_channels=2, compression=compression)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
with open(input_path, 'rb') as fileobj:
found, sr = sox_effects.apply_effects_file(
fileobj, effects, channels_first=channels_first, format=format_)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_bytesio(self, ext, compression):
"""Applying effects via BytesIO object works"""
sample_rate = 16000
channels_first = True
effects = [['band', '300', '10']]
format_ = ext if ext in ['mp3'] else None
input_path = self.get_temp_path(f'input.{ext}')
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(
input_path, sample_rate, num_channels=2, compression=compression)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
with open(input_path, 'rb') as file_:
fileobj = io.BytesIO(file_.read())
found, sr = sox_effects.apply_effects_file(
fileobj, effects, channels_first=channels_first, format=format_)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_tarfile(self, ext, compression):
"""Applying effects to compressed audio via file-like file works"""
sample_rate = 16000
channels_first = True
effects = [['band', '300', '10']]
format_ = ext if ext in ['mp3'] else None
audio_file = f'input.{ext}'
input_path = self.get_temp_path(audio_file)
reference_path = self.get_temp_path('reference.wav')
archive_path = self.get_temp_path('archive.tar.gz')
sox_utils.gen_audio_file(
input_path, sample_rate, num_channels=2, compression=compression)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
with tarfile.TarFile(archive_path, 'w') as tarobj:
tarobj.add(input_path, arcname=audio_file)
with tarfile.TarFile(archive_path, 'r') as tarobj:
fileobj = tarobj.extractfile(audio_file)
found, sr = sox_effects.apply_effects_file(
fileobj, effects, channels_first=channels_first, format=format_)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
@skipIfNoSox
@skipIfNoExec('sox')
@skipIfNoModule("requests")
class TestFileObjectHttp(HttpServerMixin, PytorchTestCase):
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_requests(self, ext, compression):
sample_rate = 16000
channels_first = True
effects = [['band', '300', '10']]
format_ = ext if ext in ['mp3'] else None
audio_file = f'input.{ext}'
input_path = self.get_temp_path(audio_file)
reference_path = self.get_temp_path('reference.wav')
sox_utils.gen_audio_file(
input_path, sample_rate, num_channels=2, compression=compression)
sox_utils.run_sox_effect(
input_path, reference_path, effects, output_bitdepth=32)
expected, expected_sr = load_wav(reference_path)
url = self.get_url(audio_file)
with requests.get(url, stream=True) as resp:
found, sr = sox_effects.apply_effects_file(
resp.raw, effects, channels_first=channels_first, format=format_)
save_wav(self.get_temp_path('result.wav'), found, sr, channels_first=channels_first)
assert sr == expected_sr
self.assertEqual(found, expected)
|
import sys
import platform
from unittest import skipIf
from typing import List, Tuple
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import torch
import torchaudio
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoSox,
get_whitenoise,
save_wav,
)
class RandomPerturbationFile(torch.utils.data.Dataset):
"""Given flist, apply random speed perturbation"""
def __init__(self, flist: List[str], sample_rate: int):
super().__init__()
self.flist = flist
self.sample_rate = sample_rate
self.rng = None
def __getitem__(self, index):
speed = self.rng.uniform(0.5, 2.0)
effects = [
['gain', '-n', '-10'],
['speed', f'{speed:.5f}'], # duration of data is 0.5 ~ 2.0 seconds.
['rate', f'{self.sample_rate}'],
['pad', '0', '1.5'], # add 1.5 seconds silence at the end
['trim', '0', '2'], # get the first 2 seconds
]
data, _ = torchaudio.sox_effects.apply_effects_file(self.flist[index], effects)
return data
def __len__(self):
return len(self.flist)
class RandomPerturbationTensor(torch.utils.data.Dataset):
"""Apply speed purturbation to (synthetic) Tensor data"""
def __init__(self, signals: List[Tuple[torch.Tensor, int]], sample_rate: int):
super().__init__()
self.signals = signals
self.sample_rate = sample_rate
self.rng = None
def __getitem__(self, index):
speed = self.rng.uniform(0.5, 2.0)
effects = [
['gain', '-n', '-10'],
['speed', f'{speed:.5f}'], # duration of data is 0.5 ~ 2.0 seconds.
['rate', f'{self.sample_rate}'],
['pad', '0', '1.5'], # add 1.5 seconds silence at the end
['trim', '0', '2'], # get the first 2 seconds
]
tensor, sample_rate = self.signals[index]
data, _ = torchaudio.sox_effects.apply_effects_tensor(tensor, sample_rate, effects)
return data
def __len__(self):
return len(self.signals)
def init_random_seed(worker_id):
dataset = torch.utils.data.get_worker_info().dataset
dataset.rng = np.random.RandomState(worker_id)
@skipIfNoSox
@skipIf(
platform.system() == 'Darwin' and
sys.version_info.major == 3 and
sys.version_info.minor in [6, 7],
'This test is known to get stuck for macOS with Python < 3.8. '
'See https://github.com/pytorch/pytorch/issues/46409'
)
class TestSoxEffectsDataset(TempDirMixin, PytorchTestCase):
"""Test `apply_effects_file` in multi-process dataloader setting"""
def _generate_dataset(self, num_samples=128):
flist = []
for i in range(num_samples):
sample_rate = np.random.choice([8000, 16000, 44100])
dtype = np.random.choice(['float32', 'int32', 'int16', 'uint8'])
data = get_whitenoise(n_channels=2, sample_rate=sample_rate, duration=1, dtype=dtype)
path = self.get_temp_path(f'{i:03d}_{dtype}_{sample_rate}.wav')
save_wav(path, data, sample_rate)
flist.append(path)
return flist
def test_apply_effects_file(self):
sample_rate = 12000
flist = self._generate_dataset()
dataset = RandomPerturbationFile(flist, sample_rate)
loader = torch.utils.data.DataLoader(
dataset, batch_size=32, num_workers=16,
worker_init_fn=init_random_seed,
)
for batch in loader:
assert batch.shape == (32, 2, 2 * sample_rate)
def _generate_signals(self, num_samples=128):
signals = []
for _ in range(num_samples):
sample_rate = np.random.choice([8000, 16000, 44100])
data = get_whitenoise(
n_channels=2, sample_rate=sample_rate, duration=1, dtype='float32')
signals.append((data, sample_rate))
return signals
def test_apply_effects_tensor(self):
sample_rate = 12000
signals = self._generate_signals()
dataset = RandomPerturbationTensor(signals, sample_rate)
loader = torch.utils.data.DataLoader(
dataset, batch_size=32, num_workers=16,
worker_init_fn=init_random_seed,
)
for batch in loader:
assert batch.shape == (32, 2, 2 * sample_rate)
def speed(path):
wav, sample_rate = torchaudio.backend.sox_io_backend.load(path)
effects = [
['speed', '1.03756523535464655'],
['rate', f'{sample_rate}'],
]
return torchaudio.sox_effects.apply_effects_tensor(wav, sample_rate, effects)[0]
@skipIfNoSox
class TestProcessPoolExecutor(TempDirMixin, PytorchTestCase):
backend = "sox_io"
def setUp(self):
sample_rate = 16000
self.flist = []
for i in range(10):
path = self.get_temp_path(f'{i}.wav')
data = get_whitenoise(n_channels=1, sample_rate=sample_rate, duration=1, dtype='float')
save_wav(path, data, sample_rate)
self.flist.append(path)
def test_executor(self):
"""Test that apply_effects_tensor with speed + rate does not crush
https://github.com/pytorch/audio/issues/1021
"""
executor = ProcessPoolExecutor(1)
futures = [executor.submit(speed, path) for path in self.flist]
for future in futures:
future.result()
|
from typing import List
import torch
from torchaudio import sox_effects
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoSox,
get_sinusoid,
save_wav,
torch_script,
)
from .common import (
load_params,
)
class SoxEffectTensorTransform(torch.nn.Module):
effects: List[List[str]]
def __init__(self, effects: List[List[str]], sample_rate: int, channels_first: bool):
super().__init__()
self.effects = effects
self.sample_rate = sample_rate
self.channels_first = channels_first
def forward(self, tensor: torch.Tensor):
return sox_effects.apply_effects_tensor(
tensor, self.sample_rate, self.effects, self.channels_first)
class SoxEffectFileTransform(torch.nn.Module):
effects: List[List[str]]
channels_first: bool
def __init__(self, effects: List[List[str]], channels_first: bool):
super().__init__()
self.effects = effects
self.channels_first = channels_first
def forward(self, path: str):
return sox_effects.apply_effects_file(path, self.effects, self.channels_first)
@skipIfNoSox
class TestTorchScript(TempDirMixin, TorchaudioTestCase):
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_tensor(self, args):
effects = args['effects']
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectTensorTransform(effects, input_sr, channels_first)
trans = torch_script(trans)
wav = get_sinusoid(
frequency=800, sample_rate=input_sr,
n_channels=num_channels, dtype='float32', channels_first=channels_first)
found, sr_found = trans(wav)
expected, sr_expected = sox_effects.apply_effects_tensor(
wav, input_sr, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
@parameterized.expand(
load_params("sox_effect_test_args.jsonl"),
name_func=lambda f, i, p: f'{f.__name__}_{i}_{p.args[0]["effects"][0][0]}',
)
def test_apply_effects_file(self, args):
effects = args['effects']
channels_first = True
num_channels = args.get("num_channels", 2)
input_sr = args.get("input_sample_rate", 8000)
trans = SoxEffectFileTransform(effects, channels_first)
trans = torch_script(trans)
path = self.get_temp_path('input.wav')
wav = get_sinusoid(
frequency=800, sample_rate=input_sr,
n_channels=num_channels, dtype='float32', channels_first=channels_first)
save_wav(path, wav, sample_rate=input_sr, channels_first=channels_first)
found, sr_found = trans(path)
expected, sr_expected = sox_effects.apply_effects_file(path, effects, channels_first)
assert sr_found == sr_expected
self.assertEqual(expected, found)
|
import json
from parameterized import param
from torchaudio_unittest.common_utils import get_asset_path
def name_func(func, _, params):
if isinstance(params.args[0], str):
args = "_".join([str(arg) for arg in params.args])
else:
args = "_".join([str(arg) for arg in params.args[0]])
return f'{func.__name__}_{args}'
def load_params(*paths):
params = []
with open(get_asset_path(*paths), 'r') as file:
for line in file:
data = json.loads(line)
for effect in data['effects']:
for i, arg in enumerate(effect):
if arg.startswith("<ASSET_DIR>"):
effect[i] = arg.replace("<ASSET_DIR>", get_asset_path())
params.append(param(data))
return params
|
import os
from pathlib import Path
from torchaudio.datasets import vctk
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
# Used to generate a unique transcript for each dummy audio file
_TRANSCRIPT = [
'Please call Stella',
'Ask her to bring these things',
'with her from the store',
'Six spoons of fresh snow peas, five thick slabs of blue cheese, and maybe a snack for her brother Bob',
'We also need a small plastic snake and a big toy frog for the kids',
'She can scoop these things into three red bags, and we will go meet her Wednesday at the train station',
'When the sunlight strikes raindrops in the air, they act as a prism and form a rainbow',
'The rainbow is a division of white light into many beautiful colors',
'These take the shape of a long round arch, with its path high above, and its two ends \
apparently beyond the horizon',
'There is, according to legend, a boiling pot of gold at one end'
]
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = []
dataset_dir = os.path.join(root_dir, 'VCTK-Corpus-0.92')
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 48000
seed = 0
for speaker in range(225, 230):
speaker_id = 'p' + str(speaker)
audio_dir = os.path.join(dataset_dir, 'wav48_silence_trimmed', speaker_id)
os.makedirs(audio_dir, exist_ok=True)
file_dir = os.path.join(dataset_dir, 'txt', speaker_id)
os.makedirs(file_dir, exist_ok=True)
for utterance_id in range(1, 11):
filename = f'{speaker_id}_{utterance_id:03d}_mic2'
audio_file_path = os.path.join(audio_dir, filename + '.wav')
data = get_whitenoise(
sample_rate=sample_rate,
duration=0.01,
n_channels=1,
dtype='float32',
seed=seed
)
save_wav(audio_file_path, data, sample_rate)
txt_file_path = os.path.join(file_dir, filename[:-5] + '.txt')
transcript = _TRANSCRIPT[utterance_id - 1]
with open(txt_file_path, 'w') as f:
f.write(transcript)
sample = (
normalize_wav(data),
sample_rate,
transcript,
speaker_id,
utterance_id
)
mocked_samples.append(sample)
seed += 1
return mocked_samples
class TestVCTK(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_vctk(self, dataset):
num_samples = 0
for i, (data, sample_rate, transcript, speaker_id, utterance_id) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert int(utterance_id) == self.samples[i][4]
num_samples += 1
assert num_samples == len(self.samples)
def test_vctk_str(self):
dataset = vctk.VCTK_092(self.root_dir, audio_ext=".wav")
self._test_vctk(dataset)
def test_vctk_path(self):
dataset = vctk.VCTK_092(Path(self.root_dir), audio_ext=".wav")
self._test_vctk(dataset)
|
import os
from pathlib import Path
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
normalize_wav,
save_wav,
)
from torchaudio.datasets import speechcommands
_LABELS = [
"bed",
"bird",
"cat",
"dog",
"down",
"eight",
"five",
"follow",
"forward",
"four",
"go",
"happy",
"house",
"learn",
"left",
"marvin",
"nine",
"no",
"off",
"on",
"one",
"right",
"seven",
"sheila",
"six",
"stop",
"three",
"tree",
"two",
"up",
"visual",
"wow",
"yes",
"zero",
]
def get_mock_dataset(dataset_dir):
"""
dataset_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_train_samples = []
mocked_valid_samples = []
mocked_test_samples = []
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz sample rate
seed = 0
valid_file = os.path.join(dataset_dir, "validation_list.txt")
test_file = os.path.join(dataset_dir, "testing_list.txt")
with open(valid_file, "w") as valid, open(test_file, "w") as test:
for label in _LABELS:
path = os.path.join(dataset_dir, label)
os.makedirs(path, exist_ok=True)
for j in range(6):
# generate hash ID for speaker
speaker = "{:08x}".format(j)
for utterance in range(3):
filename = f"{speaker}{speechcommands.HASH_DIVIDER}{utterance}.wav"
file_path = os.path.join(path, filename)
seed += 1
data = get_whitenoise(
sample_rate=sample_rate,
duration=0.01,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(file_path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
label,
speaker,
utterance,
)
mocked_samples.append(sample)
if j < 2:
mocked_train_samples.append(sample)
elif j < 4:
valid.write(f'{label}/{filename}\n')
mocked_valid_samples.append(sample)
elif j < 6:
test.write(f'{label}/{filename}\n')
mocked_test_samples.append(sample)
return mocked_samples, mocked_train_samples, mocked_valid_samples, mocked_test_samples
class TestSpeechCommands(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
train_samples = []
valid_samples = []
test_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
dataset_dir = os.path.join(
cls.root_dir, speechcommands.FOLDER_IN_ARCHIVE, speechcommands.URL
)
cls.samples, cls.train_samples, cls.valid_samples, cls.test_samples = get_mock_dataset(dataset_dir)
def _testSpeechCommands(self, dataset, data_samples):
num_samples = 0
for i, (data, sample_rate, label, speaker_id, utterance_number) in enumerate(
dataset
):
self.assertEqual(data, data_samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == data_samples[i][1]
assert label == data_samples[i][2]
assert speaker_id == data_samples[i][3]
assert utterance_number == data_samples[i][4]
num_samples += 1
assert num_samples == len(data_samples)
def testSpeechCommands_str(self):
dataset = speechcommands.SPEECHCOMMANDS(self.root_dir)
self._testSpeechCommands(dataset, self.samples)
def testSpeechCommands_path(self):
dataset = speechcommands.SPEECHCOMMANDS(Path(self.root_dir))
self._testSpeechCommands(dataset, self.samples)
def testSpeechCommandsSubsetTrain(self):
dataset = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="training")
self._testSpeechCommands(dataset, self.train_samples)
def testSpeechCommandsSubsetValid(self):
dataset = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="validation")
self._testSpeechCommands(dataset, self.valid_samples)
def testSpeechCommandsSubsetTest(self):
dataset = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="testing")
self._testSpeechCommands(dataset, self.test_samples)
def testSpeechCommandsSum(self):
dataset_all = speechcommands.SPEECHCOMMANDS(self.root_dir)
dataset_train = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="training")
dataset_valid = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="validation")
dataset_test = speechcommands.SPEECHCOMMANDS(self.root_dir, subset="testing")
assert len(dataset_train) + len(dataset_valid) + len(dataset_test) == len(dataset_all)
|
import os
from pathlib import Path
from torchaudio.datasets import cmuarctic
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
sample_rate = 16000
transcript = "This is a test transcript."
base_dir = os.path.join(root_dir, "ARCTIC", "cmu_us_aew_arctic")
txt_dir = os.path.join(base_dir, "etc")
os.makedirs(txt_dir, exist_ok=True)
txt_file = os.path.join(txt_dir, "txt.done.data")
audio_dir = os.path.join(base_dir, "wav")
os.makedirs(audio_dir, exist_ok=True)
seed = 42
with open(txt_file, "w") as txt:
for c in ["a", "b"]:
for i in range(5):
utterance_id = f"arctic_{c}{i:04d}"
path = os.path.join(audio_dir, f"{utterance_id}.wav")
data = get_whitenoise(
sample_rate=sample_rate,
duration=3,
n_channels=1,
dtype="int16",
seed=seed,
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
utterance_id.split("_")[1],
)
mocked_data.append(sample)
txt.write(f'( {utterance_id} "{transcript}" )\n')
seed += 1
return mocked_data
class TestCMUARCTIC(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_cmuarctic(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, utterance_id) in enumerate(dataset):
expected_sample = self.samples[i]
assert sample_rate == expected_sample[1]
assert transcript == expected_sample[2]
assert utterance_id == expected_sample[3]
self.assertEqual(expected_sample[0], waveform, atol=5e-5, rtol=1e-8)
n_ite += 1
assert n_ite == len(self.samples)
def test_cmuarctic_str(self):
dataset = cmuarctic.CMUARCTIC(self.root_dir)
self._test_cmuarctic(dataset)
def test_cmuarctic_path(self):
dataset = cmuarctic.CMUARCTIC(Path(self.root_dir))
self._test_cmuarctic(dataset)
|
import csv
import os
from pathlib import Path
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
normalize_wav,
save_wav,
)
from torchaudio.datasets import ljspeech
_TRANSCRIPTS = [
"Test transcript 1",
"Test transcript 2",
"Test transcript 3",
"In 1465 Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,"
]
_NORMALIZED_TRANSCRIPT = [
"Test transcript one",
"Test transcript two",
"Test transcript three",
"In fourteen sixty-five Sweynheim and Pannartz began printing in the monastery of Subiaco near Rome,"
]
def get_mock_dataset(root_dir):
"""
root_dir: path to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, "LJSpeech-1.1")
archive_dir = os.path.join(base_dir, "wavs")
os.makedirs(archive_dir, exist_ok=True)
metadata_path = os.path.join(base_dir, "metadata.csv")
sample_rate = 22050
with open(metadata_path, mode="w", newline='') as metadata_file:
metadata_writer = csv.writer(
metadata_file, delimiter="|", quoting=csv.QUOTE_NONE
)
for i, (transcript, normalized_transcript) in enumerate(
zip(_TRANSCRIPTS, _NORMALIZED_TRANSCRIPT)
):
fileid = f'LJ001-{i:04d}'
metadata_writer.writerow([fileid, transcript, normalized_transcript])
filename = fileid + ".wav"
path = os.path.join(archive_dir, filename)
data = get_whitenoise(
sample_rate=sample_rate, duration=1, n_channels=1, dtype="int16", seed=i
)
save_wav(path, data, sample_rate)
mocked_data.append(normalize_wav(data))
return mocked_data, _TRANSCRIPTS, _NORMALIZED_TRANSCRIPT
class TestLJSpeech(TempDirMixin, TorchaudioTestCase):
backend = "default"
root_dir = None
data, _transcripts, _normalized_transcript = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._transcripts, cls._normalized_transcript = get_mock_dataset(cls.root_dir)
def _test_ljspeech(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, transcript, normalized_transcript) in enumerate(
dataset
):
expected_transcript = self._transcripts[i]
expected_normalized_transcript = self._normalized_transcript[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == sample_rate
assert transcript == expected_transcript
assert normalized_transcript == expected_normalized_transcript
n_ite += 1
assert n_ite == len(self.data)
def test_ljspeech_str(self):
dataset = ljspeech.LJSPEECH(self.root_dir)
self._test_ljspeech(dataset)
def test_ljspeech_path(self):
dataset = ljspeech.LJSPEECH(Path(self.root_dir))
self._test_ljspeech(dataset)
|
import os
from pathlib import Path
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
from torchaudio.datasets.libritts import LIBRITTS
_UTTERANCE_IDS = [
[19, 198, '000000', '000000'],
[26, 495, '000004', '000000'],
]
_ORIGINAL_TEXT = 'this is the original text.'
_NORMALIZED_TEXT = 'this is the normalized text.'
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
base_dir = os.path.join(root_dir, 'LibriTTS', 'train-clean-100')
for i, utterance_id in enumerate(_UTTERANCE_IDS):
filename = f'{"_".join(str(u) for u in utterance_id)}.wav'
file_dir = os.path.join(base_dir, str(utterance_id[0]), str(utterance_id[1]))
os.makedirs(file_dir, exist_ok=True)
path = os.path.join(file_dir, filename)
data = get_whitenoise(sample_rate=24000, duration=2, n_channels=1, dtype='int16', seed=i)
save_wav(path, data, 24000)
mocked_data.append(normalize_wav(data))
original_text_filename = f'{"_".join(str(u) for u in utterance_id)}.original.txt'
path_original = os.path.join(file_dir, original_text_filename)
with open(path_original, 'w') as file_:
file_.write(_ORIGINAL_TEXT)
normalized_text_filename = f'{"_".join(str(u) for u in utterance_id)}.normalized.txt'
path_normalized = os.path.join(file_dir, normalized_text_filename)
with open(path_normalized, 'w') as file_:
file_.write(_NORMALIZED_TEXT)
return mocked_data, _UTTERANCE_IDS, _ORIGINAL_TEXT, _NORMALIZED_TEXT
class TestLibriTTS(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
data = []
_utterance_ids, _original_text, _normalized_text = [], [], []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data, cls._utterance_ids, cls._original_text, cls._normalized_text = get_mock_dataset(cls.root_dir)
def _test_libritts(self, dataset):
n_ites = 0
for i, (waveform,
sample_rate,
original_text,
normalized_text,
speaker_id,
chapter_id,
utterance_id) in enumerate(dataset):
expected_ids = self._utterance_ids[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 24000
assert speaker_id == expected_ids[0]
assert chapter_id == expected_ids[1]
assert original_text == self._original_text
assert normalized_text == self._normalized_text
assert utterance_id == f'{"_".join(str(u) for u in expected_ids[-4:])}'
n_ites += 1
assert n_ites == len(self._utterance_ids)
def test_libritts_str(self):
dataset = LIBRITTS(self.root_dir)
self._test_libritts(dataset)
def test_libritts_path(self):
dataset = LIBRITTS(Path(self.root_dir))
self._test_libritts(dataset)
|
import csv
import os
from pathlib import Path
from typing import Tuple, Dict
from torch import Tensor
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
from torchaudio.datasets import COMMONVOICE
_ORIGINAL_EXT_AUDIO = COMMONVOICE._ext_audio
_SAMPLE_RATE = 48000
_HEADERS = [u"client_ids", u"path", u"sentence", u"up_votes", u"down_votes", u"age", u"gender", u"accent"]
_EN_TRAIN_CSV_CONTENTS = [
["9d16c5d980247861130e0480e2719f448be73d86a496c36d01a477cbdecd8cfd1399403d7a77bf458d211a70711b2da0845c",
"common_voice_en_18885784.wav",
"He was accorded a State funeral, and was buried in Drayton and Toowoomba Cemetery.", "2", "0", "", "",
""],
["c82eb9291328620f06025a1f8112b909099e447e485e99236cb87df008650250e79fea5ca772061fb6a370830847b9c44d20",
"common_voice_en_556542.wav", "Once more into the breach", "2", "0", "thirties", "male", "us"],
["f74d880c5ad4c5917f314a604d3fc4805159d255796fb9f8defca35333ecc002bdf53dc463503c12674ea840b21b4a507b7c",
"common_voice_en_18607573.wav",
"Caddy, show Miss Clare and Miss Summerson their rooms.", "2", "0", "twenties", "male", "canada"],
]
_FR_TRAIN_CSV_CONTENTS = [
[
"a2e8e1e1cc74d08c92a53d7b9ff84e077eb90410edd85b8882f16fd037cecfcb6a19413c6c63ce6458cfea9579878fa91cef"
"18343441c601cae0597a4b0d3144",
"89e67e7682b36786a0b4b4022c4d42090c86edd96c78c12d30088e62522b8fe466ea4912e6a1055dfb91b296a0743e0a2bbe"
"16cebac98ee5349e3e8262cb9329",
"Or sur ce point nous n’avons aucune réponse de votre part.", "2", "0", "twenties", "male", "france"],
[
"a2e8e1e1cc74d08c92a53d7b9ff84e077eb90410edd85b8882f16fd037cecfcb6a19413c6c63ce6458cfea9579878fa91cef18"
"343441c601cae0597a4b0d3144",
"87d71819a26179e93acfee149d0b21b7bf5e926e367d80b2b3792d45f46e04853a514945783ff764c1fc237b4eb0ee2b0a7a7"
"cbd395acbdfcfa9d76a6e199bbd",
"Monsieur de La Verpillière, laissez parler le ministre", "2", "0", "twenties", "male", "france"],
]
def get_mock_dataset(root_dir, train_csv_contents, ext_audio) -> Tuple[Tensor, int, Dict[str, str]]:
"""
prepares mocked dataset
"""
mocked_data = []
# Note: extension is changed to wav for the sake of test
# Note: the first content is missing values for `age`, `gender` and `accent` as in the original data.
# Tsv file name difference does not mean different subset, testing as a whole dataset here
tsv_filename = os.path.join(root_dir, "train.tsv")
audio_base_path = os.path.join(root_dir, "clips")
os.makedirs(audio_base_path, exist_ok=True)
with open(tsv_filename, "w", newline='') as tsv:
writer = csv.writer(tsv, delimiter='\t')
writer.writerow(_HEADERS)
for i, content in enumerate(train_csv_contents):
content[2] = str(content[2].encode("utf-8"))
writer.writerow(content)
if not content[1].endswith(ext_audio):
audio_path = os.path.join(audio_base_path, content[1] + ext_audio)
else:
audio_path = os.path.join(audio_base_path, content[1])
data = get_whitenoise(sample_rate=_SAMPLE_RATE, duration=1, n_channels=1, seed=i, dtype='float32')
save_wav(audio_path, data, _SAMPLE_RATE)
# Append data entry
mocked_data.append((normalize_wav(data), _SAMPLE_RATE, dict(zip(_HEADERS, content))))
return mocked_data
def get_mock_dataset_en(root_dir, ext_audio) -> Tuple[Tensor, int, Dict[str, str]]:
"""
prepares english mocked dataset
"""
return get_mock_dataset(root_dir, _EN_TRAIN_CSV_CONTENTS, ext_audio)
def get_mock_dataset_fr(root_dir, ext_audio) -> Tuple[Tensor, int, Dict[str, str]]:
"""
prepares french mocked dataset
"""
return get_mock_dataset(root_dir, _FR_TRAIN_CSV_CONTENTS, ext_audio)
class BaseTestCommonVoice(TempDirMixin):
root_dir = None
data = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.root_dir = cls.get_base_temp_dir()
COMMONVOICE._ext_audio = ".wav"
@classmethod
def tearDownClass(cls):
super().tearDownClass()
COMMONVOICE._ext_audio = _ORIGINAL_EXT_AUDIO
def _test_commonvoice(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, dictionary) in enumerate(dataset):
expected_dictionary = self.data[i][2]
expected_data = self.data[i][0]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == _SAMPLE_RATE
assert dictionary == expected_dictionary
n_ite += 1
assert n_ite == len(self.data)
class TestCommonVoiceEN(BaseTestCommonVoice, TorchaudioTestCase):
backend = 'default'
root_dir = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.data = get_mock_dataset_en(cls.root_dir, COMMONVOICE._ext_audio)
def test_commonvoice_str(self):
dataset = COMMONVOICE(self.root_dir)
self._test_commonvoice(dataset)
def test_commonvoice_path(self):
dataset = COMMONVOICE(Path(self.root_dir))
self._test_commonvoice(dataset)
class TestCommonVoiceFR(BaseTestCommonVoice, TorchaudioTestCase):
backend = 'default'
root_dir = None
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.data = get_mock_dataset_fr(cls.root_dir, COMMONVOICE._ext_audio)
def test_commonvoice_str(self):
dataset = COMMONVOICE(self.root_dir)
self._test_commonvoice(dataset)
|
import os
from pathlib import Path
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
from torchaudio.datasets import librispeech
# Used to generate a unique transcript for each dummy audio file
_NUMBERS = [
'ZERO',
'ONE',
'TWO',
'THREE',
'FOUR',
'FIVE',
'SIX',
'SEVEN',
'EIGHT',
'NINE'
]
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_data = []
dataset_dir = os.path.join(
root_dir, librispeech.FOLDER_IN_ARCHIVE, librispeech.URL
)
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for speaker_id in range(5):
speaker_path = os.path.join(dataset_dir, str(speaker_id))
os.makedirs(speaker_path, exist_ok=True)
for chapter_id in range(3):
chapter_path = os.path.join(speaker_path, str(chapter_id))
os.makedirs(chapter_path, exist_ok=True)
trans_content = []
for utterance_id in range(10):
filename = f'{speaker_id}-{chapter_id}-{utterance_id:04d}.wav'
path = os.path.join(chapter_path, filename)
transcript = ' '.join(
[_NUMBERS[x] for x in [speaker_id, chapter_id, utterance_id]]
)
trans_content.append(
f'{speaker_id}-{chapter_id}-{utterance_id:04d} {transcript}'
)
data = get_whitenoise(
sample_rate=sample_rate,
duration=0.01,
n_channels=1,
dtype='float32',
seed=seed
)
save_wav(path, data, sample_rate)
sample = (
normalize_wav(data),
sample_rate,
transcript,
speaker_id,
chapter_id,
utterance_id
)
mocked_data.append(sample)
seed += 1
trans_filename = f'{speaker_id}-{chapter_id}.trans.txt'
trans_path = os.path.join(chapter_path, trans_filename)
with open(trans_path, 'w') as f:
f.write('\n'.join(trans_content))
return mocked_data
class TestLibriSpeech(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
@classmethod
def tearDownClass(cls):
# In case of test failure
librispeech.LIBRISPEECH._ext_audio = '.flac'
def _test_librispeech(self, dataset):
num_samples = 0
for i, (
data, sample_rate, transcript, speaker_id, chapter_id, utterance_id
) in enumerate(dataset):
self.assertEqual(data, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert transcript == self.samples[i][2]
assert speaker_id == self.samples[i][3]
assert chapter_id == self.samples[i][4]
assert utterance_id == self.samples[i][5]
num_samples += 1
assert num_samples == len(self.samples)
librispeech.LIBRISPEECH._ext_audio = '.flac'
def test_librispeech_str(self):
librispeech.LIBRISPEECH._ext_audio = '.wav'
dataset = librispeech.LIBRISPEECH(self.root_dir)
self._test_librispeech(dataset)
def test_librispeech_path(self):
librispeech.LIBRISPEECH._ext_audio = '.wav'
dataset = librispeech.LIBRISPEECH(Path(self.root_dir))
self._test_librispeech(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import yesno
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_data(root_dir, labels):
"""
root_dir: path
labels: list of labels
"""
mocked_data = []
base_dir = os.path.join(root_dir, 'waves_yesno')
os.makedirs(base_dir, exist_ok=True)
for i, label in enumerate(labels):
filename = f'{"_".join(str(l) for l in label)}.wav'
path = os.path.join(base_dir, filename)
data = get_whitenoise(sample_rate=8000, duration=6, n_channels=1, dtype='int16', seed=i)
save_wav(path, data, 8000)
mocked_data.append(normalize_wav(data))
return mocked_data
class TestYesNo(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
data = []
labels = [
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1],
]
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.data = get_mock_data(cls.root_dir, cls.labels)
def _test_yesno(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
expected_label = self.labels[i]
expected_data = self.data[i]
self.assertEqual(expected_data, waveform, atol=5e-5, rtol=1e-8)
assert sample_rate == 8000
assert label == expected_label
n_ite += 1
assert n_ite == len(self.data)
def test_yesno_str(self):
dataset = yesno.YESNO(self.root_dir)
self._test_yesno(dataset)
def test_yesno_path(self):
dataset = yesno.YESNO(Path(self.root_dir))
self._test_yesno(dataset)
|
from pathlib import Path
import pytest
from torchaudio.datasets import dr_vctk
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
)
_SUBSETS = ["train", "test"]
_CONDITIONS = ["clean", "device-recorded"]
_SOURCES = ["DR-VCTK_Office1_ClosedWindow", "DR-VCTK_Office1_OpenedWindow"]
_SPEAKER_IDS = range(226, 230)
_CHANNEL_IDS = range(1, 6)
def get_mock_dataset(root_dir):
"""
root_dir: root directory of the mocked data
"""
mocked_samples = {}
dataset_dir = Path(root_dir) / "DR-VCTK" / "DR-VCTK"
dataset_dir.mkdir(parents=True, exist_ok=True)
config_dir = dataset_dir / "configurations"
config_dir.mkdir(parents=True, exist_ok=True)
sample_rate = 16000
seed = 0
for subset in _SUBSETS:
mocked_samples[subset] = []
for condition in _CONDITIONS:
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_dir.mkdir(parents=True, exist_ok=True)
config_filepath = config_dir / f"{subset}_ch_log.txt"
with open(config_filepath, "w") as f:
if subset == "train":
f.write("\n")
f.write("File Name\tMain Source\tChannel Idx\n")
for speaker_id in _SPEAKER_IDS:
utterance_id = 1
for source in _SOURCES:
for channel_id in _CHANNEL_IDS:
filename = f"p{speaker_id}_{utterance_id:03d}.wav"
f.write(f"{filename}\t{source}\t{channel_id}\n")
data = {}
for condition in _CONDITIONS:
data[condition] = get_whitenoise(
sample_rate=sample_rate,
duration=0.01,
n_channels=1,
dtype='float32',
seed=seed
)
audio_dir = dataset_dir / f"{condition}_{subset}set_wav_16k"
audio_file_path = audio_dir / filename
save_wav(audio_file_path, data[condition], sample_rate)
seed += 1
sample = (
data[_CONDITIONS[0]],
sample_rate,
data[_CONDITIONS[1]],
sample_rate,
"p" + str(speaker_id),
f"{utterance_id:03d}",
source,
channel_id,
)
mocked_samples[subset].append(sample)
utterance_id += 1
return mocked_samples
class TestDRVCTK(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.samples = get_mock_dataset(cls.root_dir)
def _test_dr_vctk(self, dataset, subset):
num_samples = 0
for i, (
waveform_clean,
sample_rate_clean,
waveform_dr,
sample_rate_dr,
speaker_id,
utterance_id,
source,
channel_id,
) in enumerate(dataset):
self.assertEqual(waveform_clean, self.samples[subset][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate_clean == self.samples[subset][i][1]
self.assertEqual(waveform_dr, self.samples[subset][i][2], atol=5e-5, rtol=1e-8)
assert sample_rate_dr == self.samples[subset][i][3]
assert speaker_id == self.samples[subset][i][4]
assert utterance_id == self.samples[subset][i][5]
assert source == self.samples[subset][i][6]
assert channel_id == self.samples[subset][i][7]
num_samples += 1
assert num_samples == len(self.samples[subset])
def test_dr_vctk_train_str(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_str(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(self.root_dir, subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_train_path(self):
subset = "train"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_test_path(self):
subset = "test"
dataset = dr_vctk.DR_VCTK(Path(self.root_dir), subset=subset)
self._test_dr_vctk(dataset, subset)
def test_dr_vctk_invalid_subset(self):
subset = "invalid"
with pytest.raises(RuntimeError, match=f"The subset '{subset}' does not match any of the supported subsets"):
dr_vctk.DR_VCTK(self.root_dir, subset=subset)
|
import os
import platform
from pathlib import Path
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
skipIfNoSox
)
from torchaudio.datasets import tedlium
# Used to generate a unique utterance for each dummy audio file
_UTTERANCES = [
"AaronHuey_2010X 1 AaronHuey_2010X 0.0 2.0 <o,f0,female> script1\n",
"AaronHuey_2010X 1 AaronHuey_2010X 2.0 4.0 <o,f0,female> script2\n",
"AaronHuey_2010X 1 AaronHuey_2010X 4.0 6.0 <o,f0,female> script3\n",
"AaronHuey_2010X 1 AaronHuey_2010X 6.0 8.0 <o,f0,female> script4\n",
"AaronHuey_2010X 1 AaronHuey_2010X 8.0 10.0 <o,f0,female> script5\n",
]
_PHONEME = [
"a AH",
"a(2) EY",
"aachen AA K AH N",
"aad AE D",
"aaden EY D AH N",
"aadmi AE D M IY",
"aae EY EY",
]
def get_mock_dataset(dataset_dir):
"""
dataset_dir: directory of the mocked dataset
"""
mocked_samples = {}
os.makedirs(dataset_dir, exist_ok=True)
sample_rate = 16000 # 16kHz
seed = 0
for release in ["release1", "release2", "release3"]:
data = get_whitenoise(sample_rate=sample_rate, duration=10.00, n_channels=1, dtype="float32", seed=seed)
if release in ["release1", "release2"]:
release_dir = os.path.join(
dataset_dir,
tedlium._RELEASE_CONFIGS[release]["folder_in_archive"],
tedlium._RELEASE_CONFIGS[release]["subset"],
)
else:
release_dir = os.path.join(
dataset_dir,
tedlium._RELEASE_CONFIGS[release]["folder_in_archive"],
tedlium._RELEASE_CONFIGS[release]["data_path"],
)
os.makedirs(release_dir, exist_ok=True)
os.makedirs(os.path.join(release_dir, "stm"), exist_ok=True) # Subfolder for transcripts
os.makedirs(os.path.join(release_dir, "sph"), exist_ok=True) # Subfolder for audio files
filename = f"{release}.sph"
path = os.path.join(os.path.join(release_dir, "sph"), filename)
save_wav(path, data, sample_rate)
trans_filename = f"{release}.stm"
trans_path = os.path.join(os.path.join(release_dir, "stm"), trans_filename)
with open(trans_path, "w") as f:
f.write("".join(_UTTERANCES))
dict_filename = f"{release}.dic"
dict_path = os.path.join(release_dir, dict_filename)
with open(dict_path, "w") as f:
f.write("\n".join(_PHONEME))
# Create a samples list to compare with
mocked_samples[release] = []
for utterance in _UTTERANCES:
talk_id, _, speaker_id, start_time, end_time, identifier, transcript = utterance.split(" ", 6)
start_time = int(float(start_time)) * sample_rate
end_time = int(float(end_time)) * sample_rate
sample = (
data[:, start_time:end_time],
sample_rate,
transcript,
talk_id,
speaker_id,
identifier,
)
mocked_samples[release].append(sample)
seed += 1
return mocked_samples
class Tedlium(TempDirMixin):
root_dir = None
samples = {}
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
cls.root_dir = dataset_dir = os.path.join(cls.root_dir, "tedlium")
cls.samples = get_mock_dataset(dataset_dir)
def _test_tedlium(self, dataset, release):
num_samples = 0
for i, (data, sample_rate, transcript, talk_id, speaker_id, identifier) in enumerate(dataset):
self.assertEqual(data, self.samples[release][i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[release][i][1]
assert transcript == self.samples[release][i][2]
assert talk_id == self.samples[release][i][3]
assert speaker_id == self.samples[release][i][4]
assert identifier == self.samples[release][i][5]
num_samples += 1
assert num_samples == len(self.samples[release])
dataset._dict_path = os.path.join(dataset._path, f"{release}.dic")
phoneme_dict = dataset.phoneme_dict
phoenemes = [f"{key} {' '.join(value)}" for key, value in phoneme_dict.items()]
assert phoenemes == _PHONEME
def test_tedlium_release1_str(self):
release = "release1"
dataset = tedlium.TEDLIUM(self.root_dir, release=release)
self._test_tedlium(dataset, release)
def test_tedlium_release1_path(self):
release = "release1"
dataset = tedlium.TEDLIUM(Path(self.root_dir), release=release)
self._test_tedlium(dataset, release)
def test_tedlium_release2(self):
release = "release2"
dataset = tedlium.TEDLIUM(self.root_dir, release=release)
self._test_tedlium(dataset, release)
def test_tedlium_release3(self):
release = "release3"
dataset = tedlium.TEDLIUM(self.root_dir, release=release)
self._test_tedlium(dataset, release)
class TestTedliumSoundfile(Tedlium, TorchaudioTestCase):
backend = "soundfile"
if platform.system() != "Windows":
@skipIfNoSox
class TestTedliumSoxIO(Tedlium, TorchaudioTestCase):
backend = "sox_io"
|
import os
from pathlib import Path
from torchaudio.datasets import CMUDict
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
)
def get_mock_dataset(root_dir, return_punc=False):
"""
root_dir: directory to the mocked dataset
"""
header = [
";;; # CMUdict -- Major Version: 0.07",
";;; ",
";;; # $HeadURL$",
]
puncs = [
"!EXCLAMATION-POINT EH2 K S K L AH0 M EY1 SH AH0 N P OY2 N T",
"\"CLOSE-QUOTE K L OW1 Z K W OW1 T",
"#HASH-MARK HH AE1 M AA2 R K",
"%PERCENT P ER0 S EH1 N T",
"&ERSAND AE1 M P ER0 S AE2 N D",
"'END-INNER-QUOTE EH1 N D IH1 N ER0 K W OW1 T",
"(BEGIN-PARENS B IH0 G IH1 N P ER0 EH1 N Z",
")CLOSE-PAREN K L OW1 Z P ER0 EH1 N",
"+PLUS P L UH1 S",
",COMMA K AA1 M AH0",
"--DASH D AE1 SH",
"!EXCLAMATION-POINT EH2 K S K L AH0 M EY1 SH AH0 N P OY2 N T",
"/SLASH S L AE1 SH",
":COLON K OW1 L AH0 N",
";SEMI-COLON S EH1 M IY0 K OW1 L AH0 N",
"?QUESTION-MARK K W EH1 S CH AH0 N M AA1 R K",
"{BRACE B R EY1 S",
"}CLOSE-BRACE K L OW1 Z B R EY1 S",
"...ELLIPSIS IH2 L IH1 P S IH0 S",
]
punc_outputs = [
"!",
"\"",
"#",
"%",
"&",
"'",
"(",
")",
"+",
",",
"--",
"!",
"/",
":",
";",
"?",
"{",
"}",
"...",
]
words = [
"3-D TH R IY1 D IY2",
"'BOUT B AW1 T",
"'CAUSE K AH0 Z",
"'TWAS T W AH1 Z",
"A AH0",
"B B IY1",
"C S IY1",
"D D IY1",
"E IY1",
"F EH1 F",
"G JH IY1",
"H EY1 CH",
"I AY1",
"J JH EY1",
"K K EY1",
"L EH1 L",
"M EH1 M",
"N EH1 N",
"O OW1",
"P P IY1",
"Q K Y UW1",
"R AA1 R",
"S EH1 S",
"T T IY1",
"U Y UW1",
"V V IY1",
"X EH1 K S",
"Y W AY1",
"Z Z IY1",
]
mocked_symbols = [
"AA1",
"AA2",
"AE1",
"AE2",
"AH0",
"AH1",
"AY1",
"B",
"CH",
"D",
"EH1",
"EH2",
"ER0",
"EY1",
"F",
"G",
"HH",
"IH0",
"IH1",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"OW1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH1",
"UW0",
"UW1",
"V",
"W",
"Y",
"Z",
]
dict_file = os.path.join(root_dir, "cmudict-0.7b")
symbol_file = os.path.join(root_dir, "cmudict-0.7b.symbols")
with open(dict_file, "w") as fileobj:
for section in [header, puncs, words]:
for line in section:
fileobj.write(line)
fileobj.write("\n")
with open(symbol_file, "w") as txt:
txt.write("\n".join(mocked_symbols))
mocked_data = []
if return_punc:
for i, ent in enumerate(puncs):
_, phones = ent.split(" ")
mocked_data.append((punc_outputs[i], phones.split(" ")))
for ent in words:
word, phones = ent.split(" ")
mocked_data.append((word, phones.split(" ")))
return mocked_data
class TestCMUDict(TempDirMixin, TorchaudioTestCase):
root_dir = None
root_punc_dir = None
samples = []
punc_samples = []
@classmethod
def setUpClass(cls):
cls.root_dir = os.path.join(cls.get_base_temp_dir(), "normal")
os.mkdir(cls.root_dir)
cls.samples = get_mock_dataset(cls.root_dir)
cls.root_punc_dir = os.path.join(cls.get_base_temp_dir(), "punc")
os.mkdir(cls.root_punc_dir)
cls.punc_samples = get_mock_dataset(cls.root_punc_dir, return_punc=True)
def _test_cmudict(self, dataset):
"""Test if the dataset is reading the mocked data correctly."""
n_item = 0
for i, (word, phones) in enumerate(dataset):
expected_word, expected_phones = self.samples[i]
assert word == expected_word
assert phones == expected_phones
n_item += 1
assert n_item == len(self.samples)
def _test_punc_cmudict(self, dataset):
"""Test if the dataset is reading the mocked data with punctuations correctly."""
n_item = 0
for i, (word, phones) in enumerate(dataset):
expected_word, expected_phones = self.punc_samples[i]
assert word == expected_word
assert phones == expected_phones
n_item += 1
assert n_item == len(self.punc_samples)
def test_cmuarctic_path_with_punctuation(self):
dataset = CMUDict(Path(self.root_punc_dir), exclude_punctuations=False)
self._test_punc_cmudict(dataset)
def test_cmuarctic_str_with_punctuation(self):
dataset = CMUDict(self.root_punc_dir, exclude_punctuations=False)
self._test_punc_cmudict(dataset)
def test_cmuarctic_path(self):
dataset = CMUDict(Path(self.root_punc_dir), exclude_punctuations=True)
self._test_cmudict(dataset)
def test_cmuarctic_str(self):
dataset = CMUDict(self.root_punc_dir, exclude_punctuations=True)
self._test_cmudict(dataset)
|
import os
from pathlib import Path
from torchaudio.datasets import gtzan
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
get_whitenoise,
save_wav,
normalize_wav,
)
def get_mock_dataset(root_dir):
"""
root_dir: directory to the mocked dataset
"""
mocked_samples = []
mocked_training = []
mocked_validation = []
mocked_testing = []
sample_rate = 22050
seed = 0
for genre in gtzan.gtzan_genres:
base_dir = os.path.join(root_dir, 'genres', genre)
os.makedirs(base_dir, exist_ok=True)
for i in range(100):
filename = f'{genre}.{i:05d}'
path = os.path.join(base_dir, f'{filename}.wav')
data = get_whitenoise(sample_rate=sample_rate, duration=0.01, n_channels=1, dtype='int16', seed=seed)
save_wav(path, data, sample_rate)
sample = (normalize_wav(data), sample_rate, genre)
mocked_samples.append(sample)
if filename in gtzan.filtered_test:
mocked_testing.append(sample)
if filename in gtzan.filtered_train:
mocked_training.append(sample)
if filename in gtzan.filtered_valid:
mocked_validation.append(sample)
seed += 1
return (mocked_samples, mocked_training, mocked_validation, mocked_testing)
class TestGTZAN(TempDirMixin, TorchaudioTestCase):
backend = 'default'
root_dir = None
samples = []
training = []
validation = []
testing = []
@classmethod
def setUpClass(cls):
cls.root_dir = cls.get_base_temp_dir()
mocked_data = get_mock_dataset(cls.root_dir)
cls.samples = mocked_data[0]
cls.training = mocked_data[1]
cls.validation = mocked_data[2]
cls.testing = mocked_data[3]
def test_no_subset(self):
dataset = gtzan.GTZAN(self.root_dir)
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.samples[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.samples[i][1]
assert label == self.samples[i][2]
n_ite += 1
assert n_ite == len(self.samples)
def _test_training(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.training[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.training[i][1]
assert label == self.training[i][2]
n_ite += 1
assert n_ite == len(self.training)
def _test_validation(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.validation[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.validation[i][1]
assert label == self.validation[i][2]
n_ite += 1
assert n_ite == len(self.validation)
def _test_testing(self, dataset):
n_ite = 0
for i, (waveform, sample_rate, label) in enumerate(dataset):
self.assertEqual(waveform, self.testing[i][0], atol=5e-5, rtol=1e-8)
assert sample_rate == self.testing[i][1]
assert label == self.testing[i][2]
n_ite += 1
assert n_ite == len(self.testing)
def test_training_str(self):
train_dataset = gtzan.GTZAN(self.root_dir, subset='training')
self._test_training(train_dataset)
def test_validation_str(self):
val_dataset = gtzan.GTZAN(self.root_dir, subset='validation')
self._test_validation(val_dataset)
def test_testing_str(self):
test_dataset = gtzan.GTZAN(self.root_dir, subset='testing')
self._test_testing(test_dataset)
def test_training_path(self):
root_dir = Path(self.root_dir)
train_dataset = gtzan.GTZAN(root_dir, subset='training')
self._test_training(train_dataset)
def test_validation_path(self):
root_dir = Path(self.root_dir)
val_dataset = gtzan.GTZAN(root_dir, subset='validation')
self._test_validation(val_dataset)
def test_testing_path(self):
root_dir = Path(self.root_dir)
test_dataset = gtzan.GTZAN(root_dir, subset='testing')
self._test_testing(test_dataset)
|
from torchaudio.utils import sox_utils
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoSox,
)
@skipIfNoSox
class TestSoxUtils(PytorchTestCase):
"""Smoke tests for sox_util module"""
def test_set_seed(self):
"""`set_seed` does not crush"""
sox_utils.set_seed(0)
def test_set_verbosity(self):
"""`set_verbosity` does not crush"""
for val in range(6, 0, -1):
sox_utils.set_verbosity(val)
def test_set_buffer_size(self):
"""`set_buffer_size` does not crush"""
sox_utils.set_buffer_size(131072)
# back to default
sox_utils.set_buffer_size(8192)
def test_set_use_threads(self):
"""`set_use_threads` does not crush"""
sox_utils.set_use_threads(True)
# back to default
sox_utils.set_use_threads(False)
def test_list_effects(self):
"""`list_effects` returns the list of available effects"""
effects = sox_utils.list_effects()
# We cannot infer what effects are available, so only check some of them.
assert 'highpass' in effects
assert 'phaser' in effects
assert 'gain' in effects
def test_list_read_formats(self):
"""`list_read_formats` returns the list of supported formats"""
formats = sox_utils.list_read_formats()
assert 'wav' in formats
def test_list_write_formats(self):
"""`list_write_formats` returns the list of supported formats"""
formats = sox_utils.list_write_formats()
assert 'opus' not in formats
|
import torchaudio
from torchaudio_unittest import common_utils
class BackendSwitchMixin:
"""Test set/get_audio_backend works"""
backend = None
backend_module = None
def test_switch(self):
torchaudio.set_audio_backend(self.backend)
if self.backend is None:
assert torchaudio.get_audio_backend() is None
else:
assert torchaudio.get_audio_backend() == self.backend
assert torchaudio.load == self.backend_module.load
assert torchaudio.save == self.backend_module.save
assert torchaudio.info == self.backend_module.info
class TestBackendSwitch_NoBackend(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = None
backend_module = torchaudio.backend.no_backend
@common_utils.skipIfNoSox
class TestBackendSwitch_SoXIO(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = 'sox_io'
backend_module = torchaudio.backend.sox_io_backend
@common_utils.skipIfNoModule('soundfile')
class TestBackendSwitch_soundfile(BackendSwitchMixin, common_utils.TorchaudioTestCase):
backend = 'soundfile'
backend_module = torchaudio.backend.soundfile_backend
|
from torchaudio_unittest.common_utils import sox_utils
def get_encoding(ext, dtype):
exts = {
'mp3',
'flac',
'vorbis',
}
encodings = {
'float32': 'PCM_F',
'int32': 'PCM_S',
'int16': 'PCM_S',
'uint8': 'PCM_U',
}
return ext.upper() if ext in exts else encodings[dtype]
def get_bits_per_sample(ext, dtype):
bits_per_samples = {
'flac': 24,
'mp3': 0,
'vorbis': 0,
}
return bits_per_samples.get(ext, sox_utils.get_bit_depth(dtype))
|
from unittest.mock import patch
import warnings
import tarfile
import torch
from torchaudio.backend import soundfile_backend
from torchaudio._internal import module_utils as _mod_utils
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoModule,
get_wav_data,
save_wav,
nested_params,
)
from torchaudio_unittest.backend.common import (
get_bits_per_sample,
get_encoding,
)
from .common import skipIfFormatNotSupported, parameterize
if _mod_utils.is_module_available("soundfile"):
import soundfile
@skipIfNoModule("soundfile")
class TestInfo(TempDirMixin, PytorchTestCase):
@parameterize(
["float32", "int32", "int16", "uint8"], [8000, 16000], [1, 2],
)
def test_wav(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.info` can check wav file correctly"""
duration = 1
path = self.get_temp_path("data.wav")
data = get_wav_data(
dtype, num_channels, normalize=False, num_frames=duration * sample_rate
)
save_wav(path, data, sample_rate)
info = soundfile_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == get_bits_per_sample("wav", dtype)
assert info.encoding == get_encoding("wav", dtype)
@parameterize([8000, 16000], [1, 2])
@skipIfFormatNotSupported("FLAC")
def test_flac(self, sample_rate, num_channels):
"""`soundfile_backend.info` can check flac file correctly"""
duration = 1
num_frames = sample_rate * duration
data = torch.randn(num_frames, num_channels).numpy()
path = self.get_temp_path("data.flac")
soundfile.write(path, data, sample_rate)
info = soundfile_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == num_frames
assert info.num_channels == num_channels
assert info.bits_per_sample == 16
assert info.encoding == "FLAC"
@parameterize([8000, 16000], [1, 2])
@skipIfFormatNotSupported("OGG")
def test_ogg(self, sample_rate, num_channels):
"""`soundfile_backend.info` can check ogg file correctly"""
duration = 1
num_frames = sample_rate * duration
data = torch.randn(num_frames, num_channels).numpy()
path = self.get_temp_path("data.ogg")
soundfile.write(path, data, sample_rate)
info = soundfile_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 0
assert info.encoding == "VORBIS"
@nested_params(
[8000, 16000],
[1, 2],
[
('PCM_24', 24),
('PCM_32', 32)
],
)
@skipIfFormatNotSupported("NIST")
def test_sphere(self, sample_rate, num_channels, subtype_and_bit_depth):
"""`soundfile_backend.info` can check sph file correctly"""
duration = 1
num_frames = sample_rate * duration
data = torch.randn(num_frames, num_channels).numpy()
path = self.get_temp_path("data.nist")
subtype, bits_per_sample = subtype_and_bit_depth
soundfile.write(path, data, sample_rate, subtype=subtype)
info = soundfile_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == "PCM_S"
def test_unknown_subtype_warning(self):
"""soundfile_backend.info issues a warning when the subtype is unknown
This will happen if a new subtype is supported in SoundFile: the _SUBTYPE_TO_BITS_PER_SAMPLE
dict should be updated.
"""
def _mock_info_func(_):
class MockSoundFileInfo:
samplerate = 8000
frames = 356
channels = 2
subtype = 'UNSEEN_SUBTYPE'
format = 'UNKNOWN'
return MockSoundFileInfo()
with patch("soundfile.info", _mock_info_func):
with warnings.catch_warnings(record=True) as w:
info = soundfile_backend.info("foo")
assert len(w) == 1
assert "UNSEEN_SUBTYPE subtype is unknown to TorchAudio" in str(w[-1].message)
assert info.bits_per_sample == 0
@skipIfNoModule("soundfile")
class TestFileObject(TempDirMixin, PytorchTestCase):
def _test_fileobj(self, ext, subtype, bits_per_sample):
"""Query audio via file-like object works"""
duration = 2
sample_rate = 16000
num_channels = 2
num_frames = sample_rate * duration
path = self.get_temp_path(f'test.{ext}')
data = torch.randn(num_frames, num_channels).numpy()
soundfile.write(path, data, sample_rate, subtype=subtype)
with open(path, 'rb') as fileobj:
info = soundfile_backend.info(fileobj)
assert info.sample_rate == sample_rate
assert info.num_frames == num_frames
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == "FLAC" if ext == 'flac' else "PCM_S"
def test_fileobj_wav(self):
"""Loading audio via file-like object works"""
self._test_fileobj('wav', 'PCM_16', 16)
@skipIfFormatNotSupported("FLAC")
def test_fileobj_flac(self):
"""Loading audio via file-like object works"""
self._test_fileobj('flac', 'PCM_16', 16)
def _test_tarobj(self, ext, subtype, bits_per_sample):
"""Query compressed audio via file-like object works"""
duration = 2
sample_rate = 16000
num_channels = 2
num_frames = sample_rate * duration
audio_file = f'test.{ext}'
audio_path = self.get_temp_path(audio_file)
archive_path = self.get_temp_path('archive.tar.gz')
data = torch.randn(num_frames, num_channels).numpy()
soundfile.write(audio_path, data, sample_rate, subtype=subtype)
with tarfile.TarFile(archive_path, 'w') as tarobj:
tarobj.add(audio_path, arcname=audio_file)
with tarfile.TarFile(archive_path, 'r') as tarobj:
fileobj = tarobj.extractfile(audio_file)
info = soundfile_backend.info(fileobj)
assert info.sample_rate == sample_rate
assert info.num_frames == num_frames
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == "FLAC" if ext == 'flac' else "PCM_S"
def test_tarobj_wav(self):
"""Query compressed audio via file-like object works"""
self._test_tarobj('wav', 'PCM_16', 16)
@skipIfFormatNotSupported("FLAC")
def test_tarobj_flac(self):
"""Query compressed audio via file-like object works"""
self._test_tarobj('flac', 'PCM_16', 16)
|
import os
import tarfile
from unittest.mock import patch
import torch
from torchaudio._internal import module_utils as _mod_utils
from torchaudio.backend import soundfile_backend
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoModule,
get_wav_data,
normalize_wav,
load_wav,
save_wav,
)
from .common import (
parameterize,
dtype2subtype,
skipIfFormatNotSupported,
)
if _mod_utils.is_module_available("soundfile"):
import soundfile
def _get_mock_path(
ext: str, dtype: str, sample_rate: int, num_channels: int, num_frames: int,
):
return f"{dtype}_{sample_rate}_{num_channels}_{num_frames}.{ext}"
def _get_mock_params(path: str):
filename, ext = path.split(".")
parts = filename.split("_")
return {
"ext": ext,
"dtype": parts[0],
"sample_rate": int(parts[1]),
"num_channels": int(parts[2]),
"num_frames": int(parts[3]),
}
class SoundFileMock:
def __init__(self, path, mode):
assert mode == "r"
self.path = path
self._params = _get_mock_params(path)
self._start = None
@property
def samplerate(self):
return self._params["sample_rate"]
@property
def format(self):
if self._params["ext"] == "wav":
return "WAV"
if self._params["ext"] == "flac":
return "FLAC"
if self._params["ext"] == "ogg":
return "OGG"
if self._params["ext"] in ["sph", "nis", "nist"]:
return "NIST"
@property
def subtype(self):
if self._params["ext"] == "ogg":
return "VORBIS"
return dtype2subtype(self._params["dtype"])
def _prepare_read(self, start, stop, frames):
assert stop is None
self._start = start
return frames
def read(self, frames, dtype, always_2d):
assert always_2d
data = get_wav_data(
dtype,
self._params["num_channels"],
normalize=False,
num_frames=self._params["num_frames"],
channels_first=False,
).numpy()
return data[self._start:self._start + frames]
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
class MockedLoadTest(PytorchTestCase):
def assert_dtype(
self, ext, dtype, sample_rate, num_channels, normalize, channels_first
):
"""When format is WAV or NIST, normalize=False will return the native dtype Tensor, otherwise float32"""
num_frames = 3 * sample_rate
path = _get_mock_path(ext, dtype, sample_rate, num_channels, num_frames)
expected_dtype = (
torch.float32
if normalize or ext not in ["wav", "nist"]
else getattr(torch, dtype)
)
with patch("soundfile.SoundFile", SoundFileMock):
found, sr = soundfile_backend.load(
path, normalize=normalize, channels_first=channels_first
)
assert found.dtype == expected_dtype
assert sample_rate == sr
@parameterize(
["uint8", "int16", "int32", "float32", "float64"],
[8000, 16000],
[1, 2],
[True, False],
[True, False],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns native dtype when normalize=False else float32"""
self.assert_dtype(
"wav", dtype, sample_rate, num_channels, normalize, channels_first
)
@parameterize(
["int8", "int16", "int32"], [8000, 16000], [1, 2], [True, False], [True, False],
)
def test_sphere(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype(
"sph", dtype, sample_rate, num_channels, normalize, channels_first
)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_ogg(self, sample_rate, num_channels, normalize, channels_first):
"""Returns float32 always"""
self.assert_dtype(
"ogg", "int16", sample_rate, num_channels, normalize, channels_first
)
@parameterize([8000, 16000], [1, 2], [True, False], [True, False])
def test_flac(self, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load ogg format."""
self.assert_dtype(
"flac", "int16", sample_rate, num_channels, normalize, channels_first
)
class LoadTestBase(TempDirMixin, PytorchTestCase):
def assert_wav(
self,
dtype,
sample_rate,
num_channels,
normalize,
channels_first=True,
duration=1,
):
"""`soundfile_backend.load` can load wav format correctly.
Wav data loaded with soundfile backend should match those with scipy
"""
path = self.get_temp_path("reference.wav")
num_frames = duration * sample_rate
data = get_wav_data(
dtype,
num_channels,
normalize=normalize,
num_frames=num_frames,
channels_first=channels_first,
)
save_wav(path, data, sample_rate, channels_first=channels_first)
expected = load_wav(path, normalize=normalize, channels_first=channels_first)[0]
data, sr = soundfile_backend.load(
path, normalize=normalize, channels_first=channels_first
)
assert sr == sample_rate
self.assertEqual(data, expected)
def assert_sphere(
self, dtype, sample_rate, num_channels, channels_first=True, duration=1,
):
"""`soundfile_backend.load` can load SPHERE format correctly."""
path = self.get_temp_path("reference.sph")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(
path, raw, sample_rate, subtype=dtype2subtype(dtype), format="NIST"
)
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
def assert_flac(
self, dtype, sample_rate, num_channels, channels_first=True, duration=1,
):
"""`soundfile_backend.load` can load FLAC format correctly."""
path = self.get_temp_path("reference.flac")
num_frames = duration * sample_rate
raw = get_wav_data(
dtype,
num_channels,
num_frames=num_frames,
normalize=False,
channels_first=False,
)
soundfile.write(path, raw, sample_rate)
expected = normalize_wav(raw.t() if channels_first else raw)
data, sr = soundfile_backend.load(path, channels_first=channels_first)
assert sr == sample_rate
self.assertEqual(data, expected, atol=1e-4, rtol=1e-8)
@skipIfNoModule("soundfile")
class TestLoad(LoadTestBase):
"""Test the correctness of `soundfile_backend.load` for various formats"""
@parameterize(
["float32", "int32", "int16"],
[8000, 16000],
[1, 2],
[False, True],
[False, True],
)
def test_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""`soundfile_backend.load` can load wav format correctly."""
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(
["int16"], [16000], [2], [False],
)
def test_wav_large(self, dtype, sample_rate, num_channels, normalize):
"""`soundfile_backend.load` can load large wav file correctly."""
two_hours = 2 * 60 * 60
self.assert_wav(dtype, sample_rate, num_channels, normalize, duration=two_hours)
@parameterize(["float32", "int32", "int16"], [4, 8, 16, 32], [False, True])
def test_multiple_channels(self, dtype, num_channels, channels_first):
"""`soundfile_backend.load` can load wav file with more than 2 channels."""
sample_rate = 8000
normalize = False
self.assert_wav(dtype, sample_rate, num_channels, normalize, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("NIST")
def test_sphere(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load sphere format correctly."""
self.assert_sphere(dtype, sample_rate, num_channels, channels_first)
@parameterize(["int32", "int16"], [8000, 16000], [1, 2], [False, True])
@skipIfFormatNotSupported("FLAC")
def test_flac(self, dtype, sample_rate, num_channels, channels_first):
"""`soundfile_backend.load` can load flac format correctly."""
self.assert_flac(dtype, sample_rate, num_channels, channels_first)
@skipIfNoModule("soundfile")
class TestLoadFormat(TempDirMixin, PytorchTestCase):
"""Given `format` parameter, `so.load` can load files without extension"""
original = None
path = None
def _make_file(self, format_):
sample_rate = 8000
path_with_ext = self.get_temp_path(f'test.{format_}')
data = get_wav_data('float32', num_channels=2).numpy().T
soundfile.write(path_with_ext, data, sample_rate)
expected = soundfile.read(path_with_ext, dtype='float32')[0].T
path = os.path.splitext(path_with_ext)[0]
os.rename(path_with_ext, path)
return path, expected
def _test_format(self, format_):
"""Providing format allows to read file without extension"""
path, expected = self._make_file(format_)
found, _ = soundfile_backend.load(path)
self.assertEqual(found, expected)
@parameterized.expand([
('WAV', ), ('wav', ),
])
def test_wav(self, format_):
self._test_format(format_)
@parameterized.expand([
('FLAC', ), ('flac',),
])
@skipIfFormatNotSupported("FLAC")
def test_flac(self, format_):
self._test_format(format_)
@skipIfNoModule("soundfile")
class TestFileObject(TempDirMixin, PytorchTestCase):
def _test_fileobj(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
path = self.get_temp_path(f'test.{ext}')
data = get_wav_data('float32', num_channels=2).numpy().T
soundfile.write(path, data, sample_rate)
expected = soundfile.read(path, dtype='float32')[0].T
with open(path, 'rb') as fileobj:
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_fileobj_wav(self):
"""Loading audio via file-like object works"""
self._test_fileobj('wav')
@skipIfFormatNotSupported("FLAC")
def test_fileobj_flac(self):
"""Loading audio via file-like object works"""
self._test_fileobj('flac')
def _test_tarfile(self, ext):
"""Loading audio via file-like object works"""
sample_rate = 16000
audio_file = f'test.{ext}'
audio_path = self.get_temp_path(audio_file)
archive_path = self.get_temp_path('archive.tar.gz')
data = get_wav_data('float32', num_channels=2).numpy().T
soundfile.write(audio_path, data, sample_rate)
expected = soundfile.read(audio_path, dtype='float32')[0].T
with tarfile.TarFile(archive_path, 'w') as tarobj:
tarobj.add(audio_path, arcname=audio_file)
with tarfile.TarFile(archive_path, 'r') as tarobj:
fileobj = tarobj.extractfile(audio_file)
found, sr = soundfile_backend.load(fileobj)
assert sr == sample_rate
self.assertEqual(expected, found)
def test_tarfile_wav(self):
"""Loading audio via file-like object works"""
self._test_tarfile('wav')
@skipIfFormatNotSupported("FLAC")
def test_tarfile_flac(self):
"""Loading audio via file-like object works"""
self._test_tarfile('flac')
|
import io
from unittest.mock import patch
from torchaudio._internal import module_utils as _mod_utils
from torchaudio.backend import soundfile_backend
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoModule,
get_wav_data,
load_wav,
nested_params,
)
from .common import (
fetch_wav_subtype,
parameterize,
skipIfFormatNotSupported,
)
if _mod_utils.is_module_available("soundfile"):
import soundfile
class MockedSaveTest(PytorchTestCase):
@nested_params(
["float32", "int32", "int16", "uint8"],
[8000, 16000],
[1, 2],
[False, True],
[
(None, None),
('PCM_U', None),
('PCM_U', 8),
('PCM_S', None),
('PCM_S', 16),
('PCM_S', 32),
('PCM_F', None),
('PCM_F', 32),
('PCM_F', 64),
('ULAW', None),
('ULAW', 8),
('ALAW', None),
('ALAW', 8),
],
)
@patch("soundfile.write")
def test_wav(self, dtype, sample_rate, num_channels, channels_first,
enc_params, mocked_write):
"""soundfile_backend.save passes correct subtype to soundfile.write when WAV"""
filepath = "foo.wav"
input_tensor = get_wav_data(
dtype,
num_channels,
num_frames=3 * sample_rate,
normalize=dtype == "float32",
channels_first=channels_first,
).t()
encoding, bits_per_sample = enc_params
soundfile_backend.save(
filepath, input_tensor, sample_rate, channels_first=channels_first,
encoding=encoding, bits_per_sample=bits_per_sample
)
# on +Py3.8 call_args.kwargs is more descreptive
args = mocked_write.call_args[1]
assert args["file"] == filepath
assert args["samplerate"] == sample_rate
assert args["subtype"] == fetch_wav_subtype(
dtype, encoding, bits_per_sample)
assert args["format"] is None
self.assertEqual(
args["data"], input_tensor.t() if channels_first else input_tensor
)
@patch("soundfile.write")
def assert_non_wav(
self, fmt, dtype, sample_rate, num_channels, channels_first, mocked_write,
encoding=None, bits_per_sample=None,
):
"""soundfile_backend.save passes correct subtype and format to soundfile.write when SPHERE"""
filepath = f"foo.{fmt}"
input_tensor = get_wav_data(
dtype,
num_channels,
num_frames=3 * sample_rate,
normalize=False,
channels_first=channels_first,
).t()
expected_data = input_tensor.t() if channels_first else input_tensor
soundfile_backend.save(
filepath, input_tensor, sample_rate, channels_first,
encoding=encoding, bits_per_sample=bits_per_sample,
)
# on +Py3.8 call_args.kwargs is more descreptive
args = mocked_write.call_args[1]
assert args["file"] == filepath
assert args["samplerate"] == sample_rate
if fmt in ["sph", "nist", "nis"]:
assert args["format"] == "NIST"
else:
assert args["format"] is None
self.assertEqual(args["data"], expected_data)
@nested_params(
["sph", "nist", "nis"],
["int32", "int16"],
[8000, 16000],
[1, 2],
[False, True],
[
('PCM_S', 8),
('PCM_S', 16),
('PCM_S', 24),
('PCM_S', 32),
('ULAW', 8),
('ALAW', 8),
('ALAW', 16),
('ALAW', 24),
('ALAW', 32),
],
)
def test_sph(self, fmt, dtype, sample_rate, num_channels, channels_first, enc_params):
"""soundfile_backend.save passes default format and subtype (None-s) to
soundfile.write when not WAV"""
encoding, bits_per_sample = enc_params
self.assert_non_wav(fmt, dtype, sample_rate, num_channels,
channels_first, encoding=encoding,
bits_per_sample=bits_per_sample)
@parameterize(
["int32", "int16"], [8000, 16000], [1, 2], [False, True],
[8, 16, 24],
)
def test_flac(self, dtype, sample_rate, num_channels,
channels_first, bits_per_sample):
"""soundfile_backend.save passes default format and subtype (None-s) to
soundfile.write when not WAV"""
self.assert_non_wav("flac", dtype, sample_rate, num_channels,
channels_first, bits_per_sample=bits_per_sample)
@parameterize(
["int32", "int16"], [8000, 16000], [1, 2], [False, True],
)
def test_ogg(self, dtype, sample_rate, num_channels, channels_first):
"""soundfile_backend.save passes default format and subtype (None-s) to
soundfile.write when not WAV"""
self.assert_non_wav("ogg", dtype, sample_rate, num_channels, channels_first)
@skipIfNoModule("soundfile")
class SaveTestBase(TempDirMixin, PytorchTestCase):
def assert_wav(self, dtype, sample_rate, num_channels, num_frames):
"""`soundfile_backend.save` can save wav format."""
path = self.get_temp_path("data.wav")
expected = get_wav_data(
dtype, num_channels, num_frames=num_frames, normalize=False
)
soundfile_backend.save(path, expected, sample_rate)
found, sr = load_wav(path, normalize=False)
assert sample_rate == sr
self.assertEqual(found, expected)
def _assert_non_wav(self, fmt, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save non-wav format.
Due to precision missmatch, and the lack of alternative way to decode the
resulting files without using soundfile, only meta data are validated.
"""
num_frames = sample_rate * 3
path = self.get_temp_path(f"data.{fmt}")
expected = get_wav_data(
dtype, num_channels, num_frames=num_frames, normalize=False
)
soundfile_backend.save(path, expected, sample_rate)
sinfo = soundfile.info(path)
assert sinfo.format == fmt.upper()
assert sinfo.frames == num_frames
assert sinfo.channels == num_channels
assert sinfo.samplerate == sample_rate
def assert_flac(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save flac format."""
self._assert_non_wav("flac", dtype, sample_rate, num_channels)
def assert_sphere(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save sph format."""
self._assert_non_wav("nist", dtype, sample_rate, num_channels)
def assert_ogg(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save ogg format.
As we cannot inspect the OGG format (it's lossy), we only check the metadata.
"""
self._assert_non_wav("ogg", dtype, sample_rate, num_channels)
@skipIfNoModule("soundfile")
class TestSave(SaveTestBase):
@parameterize(
["float32", "int32", "int16"], [8000, 16000], [1, 2],
)
def test_wav(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save wav format."""
self.assert_wav(dtype, sample_rate, num_channels, num_frames=None)
@parameterize(
["float32", "int32", "int16"], [4, 8, 16, 32],
)
def test_multiple_channels(self, dtype, num_channels):
"""`soundfile_backend.save` can save wav with more than 2 channels."""
sample_rate = 8000
self.assert_wav(dtype, sample_rate, num_channels, num_frames=None)
@parameterize(
["int32", "int16"], [8000, 16000], [1, 2],
)
@skipIfFormatNotSupported("NIST")
def test_sphere(self, dtype, sample_rate, num_channels):
"""`soundfile_backend.save` can save sph format."""
self.assert_sphere(dtype, sample_rate, num_channels)
@parameterize(
[8000, 16000], [1, 2],
)
@skipIfFormatNotSupported("FLAC")
def test_flac(self, sample_rate, num_channels):
"""`soundfile_backend.save` can save flac format."""
self.assert_flac("float32", sample_rate, num_channels)
@parameterize(
[8000, 16000], [1, 2],
)
@skipIfFormatNotSupported("OGG")
def test_ogg(self, sample_rate, num_channels):
"""`soundfile_backend.save` can save ogg/vorbis format."""
self.assert_ogg("float32", sample_rate, num_channels)
@skipIfNoModule("soundfile")
class TestSaveParams(TempDirMixin, PytorchTestCase):
"""Test the correctness of optional parameters of `soundfile_backend.save`"""
@parameterize([True, False])
def test_channels_first(self, channels_first):
"""channels_first swaps axes"""
path = self.get_temp_path("data.wav")
data = get_wav_data("int32", 2, channels_first=channels_first)
soundfile_backend.save(path, data, 8000, channels_first=channels_first)
found = load_wav(path)[0]
expected = data if channels_first else data.transpose(1, 0)
self.assertEqual(found, expected, atol=1e-4, rtol=1e-8)
@skipIfNoModule("soundfile")
class TestFileObject(TempDirMixin, PytorchTestCase):
def _test_fileobj(self, ext):
"""Saving audio to file-like object works"""
sample_rate = 16000
path = self.get_temp_path(f'test.{ext}')
subtype = 'FLOAT' if ext == 'wav' else None
data = get_wav_data('float32', num_channels=2)
soundfile.write(path, data.numpy().T, sample_rate, subtype=subtype)
expected = soundfile.read(path, dtype='float32')[0]
fileobj = io.BytesIO()
soundfile_backend.save(fileobj, data, sample_rate, format=ext)
fileobj.seek(0)
found, sr = soundfile.read(fileobj, dtype='float32')
assert sr == sample_rate
self.assertEqual(expected, found, atol=1e-4, rtol=1e-8)
def test_fileobj_wav(self):
"""Saving audio via file-like object works"""
self._test_fileobj('wav')
@skipIfFormatNotSupported("FLAC")
def test_fileobj_flac(self):
"""Saving audio via file-like object works"""
self._test_fileobj('flac')
@skipIfFormatNotSupported("NIST")
def test_fileobj_nist(self):
"""Saving audio via file-like object works"""
self._test_fileobj('NIST')
@skipIfFormatNotSupported("OGG")
def test_fileobj_ogg(self):
"""Saving audio via file-like object works"""
self._test_fileobj('OGG')
|
import itertools
from unittest import skipIf
from parameterized import parameterized
from torchaudio._internal.module_utils import is_module_available
def name_func(func, _, params):
return f'{func.__name__}_{"_".join(str(arg) for arg in params.args)}'
def dtype2subtype(dtype):
return {
"float64": "DOUBLE",
"float32": "FLOAT",
"int32": "PCM_32",
"int16": "PCM_16",
"uint8": "PCM_U8",
"int8": "PCM_S8",
}[dtype]
def skipIfFormatNotSupported(fmt):
fmts = []
if is_module_available("soundfile"):
import soundfile
fmts = soundfile.available_formats()
return skipIf(fmt not in fmts, f'"{fmt}" is not supported by soundfile')
return skipIf(True, '"soundfile" not available.')
def parameterize(*params):
return parameterized.expand(list(itertools.product(*params)), name_func=name_func)
def fetch_wav_subtype(dtype, encoding, bits_per_sample):
subtype = {
(None, None): dtype2subtype(dtype),
(None, 8): "PCM_U8",
('PCM_U', None): "PCM_U8",
('PCM_U', 8): "PCM_U8",
('PCM_S', None): "PCM_32",
('PCM_S', 16): "PCM_16",
('PCM_S', 32): "PCM_32",
('PCM_F', None): "FLOAT",
('PCM_F', 32): "FLOAT",
('PCM_F', 64): "DOUBLE",
('ULAW', None): "ULAW",
('ULAW', 8): "ULAW",
('ALAW', None): "ALAW",
('ALAW', 8): "ALAW",
}.get((encoding, bits_per_sample))
if subtype:
return subtype
raise ValueError(
f"wav does not support ({encoding}, {bits_per_sample}).")
|
import io
import itertools
import unittest
from torchaudio.utils import sox_utils
from torchaudio.backend import sox_io_backend
from torchaudio._internal.module_utils import is_sox_available
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoSox,
get_wav_data,
)
from .common import name_func
skipIfNoMP3 = unittest.skipIf(
not is_sox_available() or
'mp3' not in sox_utils.list_read_formats() or
'mp3' not in sox_utils.list_write_formats(),
'"sox_io" backend does not support MP3')
@skipIfNoSox
class SmokeTest(TempDirMixin, TorchaudioTestCase):
"""Run smoke test on various audio format
The purpose of this test suite is to verify that sox_io_backend functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
def run_smoke_test(self, ext, sample_rate, num_channels, *, compression=None, dtype='float32'):
duration = 1
num_frames = sample_rate * duration
path = self.get_temp_path(f'test.{ext}')
original = get_wav_data(dtype, num_channels, normalize=False, num_frames=num_frames)
# 1. run save
sox_io_backend.save(path, original, sample_rate, compression=compression)
# 2. run info
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_channels == num_channels
# 3. run load
loaded, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
assert loaded.shape[0] == num_channels
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_wav(self, dtype, sample_rate, num_channels):
"""Run smoke test on wav format"""
self.run_smoke_test('wav', sample_rate, num_channels, dtype=dtype)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-4.2, -0.2, 0, 0.2, 96, 128, 160, 192, 224, 256, 320],
)))
@skipIfNoMP3
def test_mp3(self, sample_rate, num_channels, bit_rate):
"""Run smoke test on mp3 format"""
self.run_smoke_test('mp3', sample_rate, num_channels, compression=bit_rate)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-1, 0, 1, 2, 3, 3.6, 5, 10],
)))
def test_vorbis(self, sample_rate, num_channels, quality_level):
"""Run smoke test on vorbis format"""
self.run_smoke_test('vorbis', sample_rate, num_channels, compression=quality_level)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_flac(self, sample_rate, num_channels, compression_level):
"""Run smoke test on flac format"""
self.run_smoke_test('flac', sample_rate, num_channels, compression=compression_level)
@skipIfNoSox
class SmokeTestFileObj(TorchaudioTestCase):
"""Run smoke test on various audio format
The purpose of this test suite is to verify that sox_io_backend functionalities do not exhibit
abnormal behaviors.
This test suite should be able to run without any additional tools (such as sox command),
however without such tools, the correctness of each function cannot be verified.
"""
def run_smoke_test(self, ext, sample_rate, num_channels, *, compression=None, dtype='float32'):
duration = 1
num_frames = sample_rate * duration
original = get_wav_data(dtype, num_channels, normalize=False, num_frames=num_frames)
fileobj = io.BytesIO()
# 1. run save
sox_io_backend.save(fileobj, original, sample_rate, compression=compression, format=ext)
# 2. run info
fileobj.seek(0)
info = sox_io_backend.info(fileobj, format=ext)
assert info.sample_rate == sample_rate
assert info.num_channels == num_channels
# 3. run load
fileobj.seek(0)
loaded, sr = sox_io_backend.load(fileobj, normalize=False, format=ext)
assert sr == sample_rate
assert loaded.shape[0] == num_channels
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_wav(self, dtype, sample_rate, num_channels):
"""Run smoke test on wav format"""
self.run_smoke_test('wav', sample_rate, num_channels, dtype=dtype)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-4.2, -0.2, 0, 0.2, 96, 128, 160, 192, 224, 256, 320],
)))
@skipIfNoMP3
def test_mp3(self, sample_rate, num_channels, bit_rate):
"""Run smoke test on mp3 format"""
self.run_smoke_test('mp3', sample_rate, num_channels, compression=bit_rate)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-1, 0, 1, 2, 3, 3.6, 5, 10],
)))
def test_vorbis(self, sample_rate, num_channels, quality_level):
"""Run smoke test on vorbis format"""
self.run_smoke_test('vorbis', sample_rate, num_channels, compression=quality_level)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_flac(self, sample_rate, num_channels, compression_level):
"""Run smoke test on flac format"""
self.run_smoke_test('flac', sample_rate, num_channels, compression=compression_level)
|
from contextlib import contextmanager
import io
import os
import itertools
import tarfile
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio.utils.sox_utils import get_buffer_size, set_buffer_size
from torchaudio._internal import module_utils as _mod_utils
from torchaudio_unittest.backend.common import (
get_bits_per_sample,
get_encoding,
)
from torchaudio_unittest.common_utils import (
TempDirMixin,
HttpServerMixin,
PytorchTestCase,
skipIfNoExec,
skipIfNoModule,
skipIfNoSox,
get_asset_path,
get_wav_data,
save_wav,
sox_utils,
)
from .common import (
name_func,
)
if _mod_utils.is_module_available("requests"):
import requests
@skipIfNoExec('sox')
@skipIfNoSox
class TestInfo(TempDirMixin, PytorchTestCase):
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_wav(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` can check wav file correctly"""
duration = 1
path = self.get_temp_path('data.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)
save_wav(path, data, sample_rate)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)
assert info.encoding == get_encoding('wav', dtype)
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[4, 8, 16, 32],
)), name_func=name_func)
def test_wav_multiple_channels(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` can check wav file with channels more than 2 correctly"""
duration = 1
path = self.get_temp_path('data.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)
save_wav(path, data, sample_rate)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)
assert info.encoding == get_encoding('wav', dtype)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[96, 128, 160, 192, 224, 256, 320],
)), name_func=name_func)
def test_mp3(self, sample_rate, num_channels, bit_rate):
"""`sox_io_backend.info` can check mp3 file correctly"""
duration = 1
path = self.get_temp_path('data.mp3')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=bit_rate, duration=duration,
)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
# mp3 does not preserve the number of samples
# assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 0 # bit_per_sample is irrelevant for compressed formats
assert info.encoding == "MP3"
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_flac(self, sample_rate, num_channels, compression_level):
"""`sox_io_backend.info` can check flac file correctly"""
duration = 1
path = self.get_temp_path('data.flac')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=compression_level, duration=duration,
)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 24 # FLAC standard
assert info.encoding == "FLAC"
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-1, 0, 1, 2, 3, 3.6, 5, 10],
)), name_func=name_func)
def test_vorbis(self, sample_rate, num_channels, quality_level):
"""`sox_io_backend.info` can check vorbis file correctly"""
duration = 1
path = self.get_temp_path('data.vorbis')
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
compression=quality_level, duration=duration,
)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 0 # bit_per_sample is irrelevant for compressed formats
assert info.encoding == "VORBIS"
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[16, 32],
)), name_func=name_func)
def test_sphere(self, sample_rate, num_channels, bits_per_sample):
"""`sox_io_backend.info` can check sph file correctly"""
duration = 1
path = self.get_temp_path('data.sph')
sox_utils.gen_audio_file(
path, sample_rate, num_channels, duration=duration,
bit_depth=bits_per_sample)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == "PCM_S"
@parameterized.expand(list(itertools.product(
['int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_amb(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` can check amb file correctly"""
duration = 1
path = self.get_temp_path('data.amb')
bits_per_sample = sox_utils.get_bit_depth(dtype)
sox_utils.gen_audio_file(
path, sample_rate, num_channels,
bit_depth=bits_per_sample, duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == bits_per_sample
assert info.encoding == get_encoding("amb", dtype)
def test_amr_nb(self):
"""`sox_io_backend.info` can check amr-nb file correctly"""
duration = 1
num_channels = 1
sample_rate = 8000
path = self.get_temp_path('data.amr-nb')
sox_utils.gen_audio_file(
path, sample_rate=sample_rate, num_channels=num_channels, bit_depth=16,
duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 0
assert info.encoding == "AMR_NB"
def test_ulaw(self):
"""`sox_io_backend.info` can check ulaw file correctly"""
duration = 1
num_channels = 1
sample_rate = 8000
path = self.get_temp_path('data.wav')
sox_utils.gen_audio_file(
path, sample_rate=sample_rate, num_channels=num_channels,
bit_depth=8, encoding='u-law',
duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 8
assert info.encoding == "ULAW"
def test_alaw(self):
"""`sox_io_backend.info` can check alaw file correctly"""
duration = 1
num_channels = 1
sample_rate = 8000
path = self.get_temp_path('data.wav')
sox_utils.gen_audio_file(
path, sample_rate=sample_rate, num_channels=num_channels,
bit_depth=8, encoding='a-law',
duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 8
assert info.encoding == "ALAW"
def test_gsm(self):
"""`sox_io_backend.info` can check gsm file correctly"""
duration = 1
num_channels = 1
sample_rate = 8000
path = self.get_temp_path('data.gsm')
sox_utils.gen_audio_file(
path, sample_rate=sample_rate, num_channels=num_channels,
duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_channels == num_channels
assert info.bits_per_sample == 0
assert info.encoding == "GSM"
def test_htk(self):
"""`sox_io_backend.info` can check HTK file correctly"""
duration = 1
num_channels = 1
sample_rate = 8000
path = self.get_temp_path('data.htk')
sox_utils.gen_audio_file(
path, sample_rate=sample_rate, num_channels=num_channels,
bit_depth=16, duration=duration)
info = sox_io_backend.info(path)
assert info.sample_rate == sample_rate
assert info.num_frames == sample_rate * duration
assert info.num_channels == num_channels
assert info.bits_per_sample == 16
assert info.encoding == "PCM_S"
@skipIfNoSox
class TestInfoOpus(PytorchTestCase):
@parameterized.expand(list(itertools.product(
['96k'],
[1, 2],
[0, 5, 10],
)), name_func=name_func)
def test_opus(self, bitrate, num_channels, compression_level):
"""`sox_io_backend.info` can check opus file correcty"""
path = get_asset_path('io', f'{bitrate}_{compression_level}_{num_channels}ch.opus')
info = sox_io_backend.info(path)
assert info.sample_rate == 48000
assert info.num_frames == 32768
assert info.num_channels == num_channels
assert info.bits_per_sample == 0 # bit_per_sample is irrelevant for compressed formats
assert info.encoding == "OPUS"
@skipIfNoSox
class TestLoadWithoutExtension(PytorchTestCase):
def test_mp3(self):
"""Providing `format` allows to read mp3 without extension
libsox does not check header for mp3
https://github.com/pytorch/audio/issues/1040
The file was generated with the following command
ffmpeg -f lavfi -i "sine=frequency=1000:duration=5" -ar 16000 -f mp3 test_noext
"""
path = get_asset_path("mp3_without_ext")
sinfo = sox_io_backend.info(path, format="mp3")
assert sinfo.sample_rate == 16000
assert sinfo.num_frames == 81216
assert sinfo.num_channels == 1
assert sinfo.bits_per_sample == 0 # bit_per_sample is irrelevant for compressed formats
assert sinfo.encoding == "MP3"
class FileObjTestBase(TempDirMixin):
def _gen_file(self, ext, dtype, sample_rate, num_channels, num_frames, *, comments=None):
path = self.get_temp_path(f'test.{ext}')
bit_depth = sox_utils.get_bit_depth(dtype)
duration = num_frames / sample_rate
comment_file = self._gen_comment_file(comments) if comments else None
sox_utils.gen_audio_file(
path, sample_rate, num_channels=num_channels,
encoding=sox_utils.get_encoding(dtype),
bit_depth=bit_depth,
duration=duration,
comment_file=comment_file,
)
return path
def _gen_comment_file(self, comments):
comment_path = self.get_temp_path("comment.txt")
with open(comment_path, "w") as file_:
file_.writelines(comments)
return comment_path
@skipIfNoSox
@skipIfNoExec('sox')
class TestFileObject(FileObjTestBase, PytorchTestCase):
def _query_fileobj(self, ext, dtype, sample_rate, num_channels, num_frames, *, comments=None):
path = self._gen_file(ext, dtype, sample_rate, num_channels, num_frames, comments=comments)
format_ = ext if ext in ['mp3'] else None
with open(path, 'rb') as fileobj:
return sox_io_backend.info(fileobj, format_)
def _query_bytesio(self, ext, dtype, sample_rate, num_channels, num_frames):
path = self._gen_file(ext, dtype, sample_rate, num_channels, num_frames)
format_ = ext if ext in ['mp3'] else None
with open(path, 'rb') as file_:
fileobj = io.BytesIO(file_.read())
return sox_io_backend.info(fileobj, format_)
def _query_tarfile(self, ext, dtype, sample_rate, num_channels, num_frames):
audio_path = self._gen_file(ext, dtype, sample_rate, num_channels, num_frames)
audio_file = os.path.basename(audio_path)
archive_path = self.get_temp_path('archive.tar.gz')
with tarfile.TarFile(archive_path, 'w') as tarobj:
tarobj.add(audio_path, arcname=audio_file)
format_ = ext if ext in ['mp3'] else None
with tarfile.TarFile(archive_path, 'r') as tarobj:
fileobj = tarobj.extractfile(audio_file)
return sox_io_backend.info(fileobj, format_)
@contextmanager
def _set_buffer_size(self, buffer_size):
try:
original_buffer_size = get_buffer_size()
set_buffer_size(buffer_size)
yield
finally:
set_buffer_size(original_buffer_size)
@parameterized.expand([
('wav', "float32"),
('wav', "int32"),
('wav', "int16"),
('wav', "uint8"),
('mp3', "float32"),
('flac', "float32"),
('vorbis', "float32"),
('amb', "int16"),
])
def test_fileobj(self, ext, dtype):
"""Querying audio via file object works"""
sample_rate = 16000
num_frames = 3 * sample_rate
num_channels = 2
sinfo = self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@parameterized.expand([
('vorbis', "float32"),
])
def test_fileobj_large_header(self, ext, dtype):
"""
For audio file with header size exceeding default buffer size:
- Querying audio via file object without enlarging buffer size fails.
- Querying audio via file object after enlarging buffer size succeeds.
"""
sample_rate = 16000
num_frames = 3 * sample_rate
num_channels = 2
comments = "metadata=" + " ".join(["value" for _ in range(1000)])
with self.assertRaisesRegex(RuntimeError, "^Error loading audio file:"):
sinfo = self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames, comments=comments)
with self._set_buffer_size(16384):
sinfo = self._query_fileobj(ext, dtype, sample_rate, num_channels, num_frames, comments=comments)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@parameterized.expand([
('wav', "float32"),
('wav', "int32"),
('wav', "int16"),
('wav', "uint8"),
('mp3', "float32"),
('flac', "float32"),
('vorbis', "float32"),
('amb', "int16"),
])
def test_bytesio(self, ext, dtype):
"""Querying audio via ByteIO object works for small data"""
sample_rate = 16000
num_frames = 3 * sample_rate
num_channels = 2
sinfo = self._query_bytesio(ext, dtype, sample_rate, num_channels, num_frames)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@parameterized.expand([
('wav', "float32"),
('wav', "int32"),
('wav', "int16"),
('wav', "uint8"),
('mp3', "float32"),
('flac', "float32"),
('vorbis', "float32"),
('amb', "int16"),
])
def test_bytesio_tiny(self, ext, dtype):
"""Querying audio via ByteIO object works for small data"""
sample_rate = 8000
num_frames = 4
num_channels = 2
sinfo = self._query_bytesio(ext, dtype, sample_rate, num_channels, num_frames)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@parameterized.expand([
('wav', "float32"),
('wav', "int32"),
('wav', "int16"),
('wav', "uint8"),
('mp3', "float32"),
('flac', "float32"),
('vorbis', "float32"),
('amb', "int16"),
])
def test_tarfile(self, ext, dtype):
"""Querying compressed audio via file-like object works"""
sample_rate = 16000
num_frames = 3.0 * sample_rate
num_channels = 2
sinfo = self._query_tarfile(ext, dtype, sample_rate, num_channels, num_frames)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@skipIfNoSox
@skipIfNoExec('sox')
@skipIfNoModule("requests")
class TestFileObjectHttp(HttpServerMixin, FileObjTestBase, PytorchTestCase):
def _query_http(self, ext, dtype, sample_rate, num_channels, num_frames):
audio_path = self._gen_file(ext, dtype, sample_rate, num_channels, num_frames)
audio_file = os.path.basename(audio_path)
url = self.get_url(audio_file)
format_ = ext if ext in ['mp3'] else None
with requests.get(url, stream=True) as resp:
return sox_io_backend.info(resp.raw, format=format_)
@parameterized.expand([
('wav', "float32"),
('wav', "int32"),
('wav', "int16"),
('wav', "uint8"),
('mp3', "float32"),
('flac', "float32"),
('vorbis', "float32"),
('amb', "int16"),
])
def test_requests(self, ext, dtype):
"""Querying compressed audio via requests works"""
sample_rate = 16000
num_frames = 3.0 * sample_rate
num_channels = 2
sinfo = self._query_http(ext, dtype, sample_rate, num_channels, num_frames)
bits_per_sample = get_bits_per_sample(ext, dtype)
num_frames = 0 if ext in ['mp3', 'vorbis'] else num_frames
assert sinfo.sample_rate == sample_rate
assert sinfo.num_channels == num_channels
assert sinfo.num_frames == num_frames
assert sinfo.bits_per_sample == bits_per_sample
assert sinfo.encoding == get_encoding(ext, dtype)
@skipIfNoSox
class TestInfoNoSuchFile(PytorchTestCase):
def test_info_fail(self):
"""
When attempted to get info on a non-existing file, error message must contain the file path.
"""
path = "non_existing_audio.wav"
with self.assertRaisesRegex(RuntimeError, "^Error loading audio file: failed to open file {0}$".format(path)):
sox_io_backend.info(path)
|
import io
import itertools
import tarfile
from parameterized import parameterized
from torchaudio.backend import sox_io_backend
from torchaudio._internal import module_utils as _mod_utils
from torchaudio_unittest.common_utils import (
TempDirMixin,
HttpServerMixin,
PytorchTestCase,
skipIfNoExec,
skipIfNoModule,
skipIfNoSox,
get_asset_path,
get_wav_data,
load_wav,
save_wav,
sox_utils,
)
from .common import (
name_func,
)
if _mod_utils.is_module_available("requests"):
import requests
class LoadTestBase(TempDirMixin, PytorchTestCase):
def assert_format(
self,
format: str,
sample_rate: float,
num_channels: int,
compression: float = None,
bit_depth: int = None,
duration: float = 1,
normalize: bool = True,
encoding: str = None,
atol: float = 4e-05,
rtol: float = 1.3e-06,
):
"""`sox_io_backend.load` can load given format correctly.
file encodings introduce delay and boundary effects so
we create a reference wav file from the original file format
x
|
| 1. Generate given format with Sox
|
v 2. Convert to wav with Sox
given format ----------------------> wav
| |
| 3. Load with torchaudio | 4. Load with scipy
| |
v v
tensor ----------> x <----------- tensor
5. Compare
Underlying assumptions are;
i. Conversion of given format to wav with Sox preserves data.
ii. Loading wav file with scipy is correct.
By combining i & ii, step 2. and 4. allows to load reference given format
data without using torchaudio
"""
path = self.get_temp_path(f'1.original.{format}')
ref_path = self.get_temp_path('2.reference.wav')
# 1. Generate the given format with sox
sox_utils.gen_audio_file(
path, sample_rate, num_channels, encoding=encoding,
compression=compression, bit_depth=bit_depth, duration=duration,
)
# 2. Convert to wav with sox
wav_bit_depth = 32 if bit_depth == 24 else None # for 24-bit wav
sox_utils.convert_audio_file(path, ref_path, bit_depth=wav_bit_depth)
# 3. Load the given format with torchaudio
data, sr = sox_io_backend.load(path, normalize=normalize)
# 4. Load wav with scipy
data_ref = load_wav(ref_path, normalize=normalize)[0]
# 5. Compare
assert sr == sample_rate
self.assertEqual(data, data_ref, atol=atol, rtol=rtol)
def assert_wav(self, dtype, sample_rate, num_channels, normalize, duration):
"""`sox_io_backend.load` can load wav format correctly.
Wav data loaded with sox_io backend should match those with scipy
"""
path = self.get_temp_path('reference.wav')
data = get_wav_data(dtype, num_channels, normalize=normalize, num_frames=duration * sample_rate)
save_wav(path, data, sample_rate)
expected = load_wav(path, normalize=normalize)[0]
data, sr = sox_io_backend.load(path, normalize=normalize)
assert sr == sample_rate
self.assertEqual(data, expected)
@skipIfNoExec('sox')
@skipIfNoSox
class TestLoad(LoadTestBase):
"""Test the correctness of `sox_io_backend.load` for various formats"""
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
[False, True],
)), name_func=name_func)
def test_wav(self, dtype, sample_rate, num_channels, normalize):
"""`sox_io_backend.load` can load wav format correctly."""
self.assert_wav(dtype, sample_rate, num_channels, normalize, duration=1)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[False, True],
)), name_func=name_func)
def test_24bit_wav(self, sample_rate, num_channels, normalize):
"""`sox_io_backend.load` can load 24bit wav format correctly. Corectly casts it to ``int32`` tensor dtype."""
self.assert_format("wav", sample_rate, num_channels, bit_depth=24, normalize=normalize, duration=1)
@parameterized.expand(list(itertools.product(
['int16'],
[16000],
[2],
[False],
)), name_func=name_func)
def test_wav_large(self, dtype, sample_rate, num_channels, normalize):
"""`sox_io_backend.load` can load large wav file correctly."""
two_hours = 2 * 60 * 60
self.assert_wav(dtype, sample_rate, num_channels, normalize, two_hours)
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[4, 8, 16, 32],
)), name_func=name_func)
def test_multiple_channels(self, dtype, num_channels):
"""`sox_io_backend.load` can load wav file with more than 2 channels."""
sample_rate = 8000
normalize = False
self.assert_wav(dtype, sample_rate, num_channels, normalize, duration=1)
@parameterized.expand(list(itertools.product(
[8000, 16000, 44100],
[1, 2],
[96, 128, 160, 192, 224, 256, 320],
)), name_func=name_func)
def test_mp3(self, sample_rate, num_channels, bit_rate):
"""`sox_io_backend.load` can load mp3 format correctly."""
self.assert_format("mp3", sample_rate, num_channels, compression=bit_rate, duration=1, atol=5e-05)
@parameterized.expand(list(itertools.product(
[16000],
[2],
[128],
)), name_func=name_func)
def test_mp3_large(self, sample_rate, num_channels, bit_rate):
"""`sox_io_backend.load` can load large mp3 file correctly."""
two_hours = 2 * 60 * 60
self.assert_format("mp3", sample_rate, num_channels, compression=bit_rate, duration=two_hours, atol=5e-05)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_flac(self, sample_rate, num_channels, compression_level):
"""`sox_io_backend.load` can load flac format correctly."""
self.assert_format("flac", sample_rate, num_channels, compression=compression_level, bit_depth=16, duration=1)
@parameterized.expand(list(itertools.product(
[16000],
[2],
[0],
)), name_func=name_func)
def test_flac_large(self, sample_rate, num_channels, compression_level):
"""`sox_io_backend.load` can load large flac file correctly."""
two_hours = 2 * 60 * 60
self.assert_format(
"flac", sample_rate, num_channels, compression=compression_level, bit_depth=16, duration=two_hours)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
[-1, 0, 1, 2, 3, 3.6, 5, 10],
)), name_func=name_func)
def test_vorbis(self, sample_rate, num_channels, quality_level):
"""`sox_io_backend.load` can load vorbis format correctly."""
self.assert_format("vorbis", sample_rate, num_channels, compression=quality_level, bit_depth=16, duration=1)
@parameterized.expand(list(itertools.product(
[16000],
[2],
[10],
)), name_func=name_func)
def test_vorbis_large(self, sample_rate, num_channels, quality_level):
"""`sox_io_backend.load` can load large vorbis file correctly."""
two_hours = 2 * 60 * 60
self.assert_format(
"vorbis", sample_rate, num_channels, compression=quality_level, bit_depth=16, duration=two_hours)
@parameterized.expand(list(itertools.product(
['96k'],
[1, 2],
[0, 5, 10],
)), name_func=name_func)
def test_opus(self, bitrate, num_channels, compression_level):
"""`sox_io_backend.load` can load opus file correctly."""
ops_path = get_asset_path('io', f'{bitrate}_{compression_level}_{num_channels}ch.opus')
wav_path = self.get_temp_path(f'{bitrate}_{compression_level}_{num_channels}ch.opus.wav')
sox_utils.convert_audio_file(ops_path, wav_path)
expected, sample_rate = load_wav(wav_path)
found, sr = sox_io_backend.load(ops_path)
assert sample_rate == sr
self.assertEqual(expected, found)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_sphere(self, sample_rate, num_channels):
"""`sox_io_backend.load` can load sph format correctly."""
self.assert_format("sph", sample_rate, num_channels, bit_depth=32, duration=1)
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16'],
[8000, 16000],
[1, 2],
[False, True],
)), name_func=name_func)
def test_amb(self, dtype, sample_rate, num_channels, normalize):
"""`sox_io_backend.load` can load amb format correctly."""
bit_depth = sox_utils.get_bit_depth(dtype)
encoding = sox_utils.get_encoding(dtype)
self.assert_format(
"amb", sample_rate, num_channels, bit_depth=bit_depth, duration=1, encoding=encoding, normalize=normalize)
def test_amr_nb(self):
"""`sox_io_backend.load` can load amr_nb format correctly."""
self.assert_format("amr-nb", sample_rate=8000, num_channels=1, bit_depth=32, duration=1)
@skipIfNoExec('sox')
@skipIfNoSox
class TestLoadParams(TempDirMixin, PytorchTestCase):
"""Test the correctness of frame parameters of `sox_io_backend.load`"""
original = None
path = None
def setUp(self):
super().setUp()
sample_rate = 8000
self.original = get_wav_data('float32', num_channels=2)
self.path = self.get_temp_path('test.wav')
save_wav(self.path, self.original, sample_rate)
@parameterized.expand(list(itertools.product(
[0, 1, 10, 100, 1000],
[-1, 1, 10, 100, 1000],
)), name_func=name_func)
def test_frame(self, frame_offset, num_frames):
"""num_frames and frame_offset correctly specify the region of data"""
found, _ = sox_io_backend.load(self.path, frame_offset, num_frames)
frame_end = None if num_frames == -1 else frame_offset + num_frames
self.assertEqual(found, self.original[:, frame_offset:frame_end])
@parameterized.expand([(True, ), (False, )], name_func=name_func)
def test_channels_first(self, channels_first):
"""channels_first swaps axes"""
found, _ = sox_io_backend.load(self.path, channels_first=channels_first)
expected = self.original if channels_first else self.original.transpose(1, 0)
self.assertEqual(found, expected)
@skipIfNoSox
class TestLoadWithoutExtension(PytorchTestCase):
def test_mp3(self):
"""Providing format allows to read mp3 without extension
libsox does not check header for mp3
https://github.com/pytorch/audio/issues/1040
The file was generated with the following command
ffmpeg -f lavfi -i "sine=frequency=1000:duration=5" -ar 16000 -f mp3 test_noext
"""
path = get_asset_path("mp3_without_ext")
_, sr = sox_io_backend.load(path, format="mp3")
assert sr == 16000
class CloggedFileObj:
def __init__(self, fileobj):
self.fileobj = fileobj
self.buffer = b''
def read(self, n):
if not self.buffer:
self.buffer += self.fileobj.read(n)
ret = self.buffer[:2]
self.buffer = self.buffer[2:]
return ret
@skipIfNoSox
@skipIfNoExec('sox')
class TestFileObject(TempDirMixin, PytorchTestCase):
"""
In this test suite, the result of file-like object input is compared against file path input,
because `load` function is rigrously tested for file path inputs to match libsox's result,
"""
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_fileobj(self, ext, compression):
"""Loading audio via file object returns the same result as via file path."""
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
path = self.get_temp_path(f'test.{ext}')
sox_utils.gen_audio_file(
path, sample_rate, num_channels=2,
compression=compression)
expected, _ = sox_io_backend.load(path)
with open(path, 'rb') as fileobj:
found, sr = sox_io_backend.load(fileobj, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_bytesio(self, ext, compression):
"""Loading audio via BytesIO object returns the same result as via file path."""
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
path = self.get_temp_path(f'test.{ext}')
sox_utils.gen_audio_file(
path, sample_rate, num_channels=2,
compression=compression)
expected, _ = sox_io_backend.load(path)
with open(path, 'rb') as file_:
fileobj = io.BytesIO(file_.read())
found, sr = sox_io_backend.load(fileobj, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_bytesio_clogged(self, ext, compression):
"""Loading audio via clogged file object returns the same result as via file path.
This test case validates the case where fileobject returns shorter bytes than requeted.
"""
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
path = self.get_temp_path(f'test.{ext}')
sox_utils.gen_audio_file(
path, sample_rate, num_channels=2,
compression=compression)
expected, _ = sox_io_backend.load(path)
with open(path, 'rb') as file_:
fileobj = CloggedFileObj(io.BytesIO(file_.read()))
found, sr = sox_io_backend.load(fileobj, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_bytesio_tiny(self, ext, compression):
"""Loading very small audio via file object returns the same result as via file path.
"""
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
path = self.get_temp_path(f'test.{ext}')
sox_utils.gen_audio_file(
path, sample_rate, num_channels=2,
compression=compression, duration=1 / 1600)
expected, _ = sox_io_backend.load(path)
with open(path, 'rb') as file_:
fileobj = io.BytesIO(file_.read())
found, sr = sox_io_backend.load(fileobj, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_tarfile(self, ext, compression):
"""Loading compressed audio via file-like object returns the same result as via file path."""
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
audio_file = f'test.{ext}'
audio_path = self.get_temp_path(audio_file)
archive_path = self.get_temp_path('archive.tar.gz')
sox_utils.gen_audio_file(
audio_path, sample_rate, num_channels=2,
compression=compression)
expected, _ = sox_io_backend.load(audio_path)
with tarfile.TarFile(archive_path, 'w') as tarobj:
tarobj.add(audio_path, arcname=audio_file)
with tarfile.TarFile(archive_path, 'r') as tarobj:
fileobj = tarobj.extractfile(audio_file)
found, sr = sox_io_backend.load(fileobj, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@skipIfNoSox
@skipIfNoExec('sox')
@skipIfNoModule("requests")
class TestFileObjectHttp(HttpServerMixin, PytorchTestCase):
@parameterized.expand([
('wav', None),
('mp3', 128),
('mp3', 320),
('flac', 0),
('flac', 5),
('flac', 8),
('vorbis', -1),
('vorbis', 10),
('amb', None),
])
def test_requests(self, ext, compression):
sample_rate = 16000
format_ = ext if ext in ['mp3'] else None
audio_file = f'test.{ext}'
audio_path = self.get_temp_path(audio_file)
sox_utils.gen_audio_file(
audio_path, sample_rate, num_channels=2, compression=compression)
expected, _ = sox_io_backend.load(audio_path)
url = self.get_url(audio_file)
with requests.get(url, stream=True) as resp:
found, sr = sox_io_backend.load(resp.raw, format=format_)
assert sr == sample_rate
self.assertEqual(expected, found)
@parameterized.expand(list(itertools.product(
[0, 1, 10, 100, 1000],
[-1, 1, 10, 100, 1000],
)), name_func=name_func)
def test_frame(self, frame_offset, num_frames):
"""num_frames and frame_offset correctly specify the region of data"""
sample_rate = 8000
audio_file = 'test.wav'
audio_path = self.get_temp_path(audio_file)
original = get_wav_data('float32', num_channels=2)
save_wav(audio_path, original, sample_rate)
frame_end = None if num_frames == -1 else frame_offset + num_frames
expected = original[:, frame_offset:frame_end]
url = self.get_url(audio_file)
with requests.get(url, stream=True) as resp:
found, sr = sox_io_backend.load(resp.raw, frame_offset, num_frames)
assert sr == sample_rate
self.assertEqual(expected, found)
@skipIfNoSox
class TestLoadNoSuchFile(PytorchTestCase):
def test_load_fail(self):
"""
When attempted to load a non-existing file, error message must contain the file path.
"""
path = "non_existing_audio.wav"
with self.assertRaisesRegex(RuntimeError, "^Error loading audio file: failed to open file {0}$".format(path)):
sox_io_backend.load(path)
|
import itertools
from torchaudio.backend import sox_io_backend
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
PytorchTestCase,
skipIfNoExec,
skipIfNoSox,
get_wav_data,
)
from .common import (
name_func,
get_enc_params,
)
@skipIfNoExec('sox')
@skipIfNoSox
class TestRoundTripIO(TempDirMixin, PytorchTestCase):
"""save/load round trip should not degrade data for lossless formats"""
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_wav(self, dtype, sample_rate, num_channels):
"""save/load round trip should not degrade data for wav formats"""
original = get_wav_data(dtype, num_channels, normalize=False)
enc, bps = get_enc_params(dtype)
data = original
for i in range(10):
path = self.get_temp_path(f'{i}.wav')
sox_io_backend.save(path, data, sample_rate, encoding=enc, bits_per_sample=bps)
data, sr = sox_io_backend.load(path, normalize=False)
assert sr == sample_rate
self.assertEqual(original, data)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_flac(self, sample_rate, num_channels, compression_level):
"""save/load round trip should not degrade data for flac formats"""
original = get_wav_data('float32', num_channels)
data = original
for i in range(10):
path = self.get_temp_path(f'{i}.flac')
sox_io_backend.save(path, data, sample_rate, compression=compression_level)
data, sr = sox_io_backend.load(path)
assert sr == sample_rate
self.assertEqual(original, data)
|
import io
import os
import unittest
import torch
from torchaudio.backend import sox_io_backend
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
PytorchTestCase,
skipIfNoExec,
skipIfNoSox,
get_wav_data,
load_wav,
save_wav,
sox_utils,
nested_params,
)
from .common import (
name_func,
get_enc_params,
)
def _get_sox_encoding(encoding):
encodings = {
'PCM_F': 'floating-point',
'PCM_S': 'signed-integer',
'PCM_U': 'unsigned-integer',
'ULAW': 'u-law',
'ALAW': 'a-law',
}
return encodings.get(encoding)
class SaveTestBase(TempDirMixin, TorchaudioTestCase):
def assert_save_consistency(
self,
format: str,
*,
compression: float = None,
encoding: str = None,
bits_per_sample: int = None,
sample_rate: float = 8000,
num_channels: int = 2,
num_frames: float = 3 * 8000,
src_dtype: str = 'int32',
test_mode: str = "path",
):
"""`save` function produces file that is comparable with `sox` command
To compare that the file produced by `save` function agains the file produced by
the equivalent `sox` command, we need to load both files.
But there are many formats that cannot be opened with common Python modules (like
SciPy).
So we use `sox` command to prepare the original data and convert the saved files
into a format that SciPy can read (PCM wav).
The following diagram illustrates this process. The difference is 2.1. and 3.1.
This assumes that
- loading data with SciPy preserves the data well.
- converting the resulting files into WAV format with `sox` preserve the data well.
x
| 1. Generate source wav file with SciPy
|
v
-------------- wav ----------------
| |
| 2.1. load with scipy | 3.1. Convert to the target
| then save it into the target | format depth with sox
| format with torchaudio |
v v
target format target format
| |
| 2.2. Convert to wav with sox | 3.2. Convert to wav with sox
| |
v v
wav wav
| |
| 2.3. load with scipy | 3.3. load with scipy
| |
v v
tensor -------> compare <--------- tensor
"""
cmp_encoding = 'floating-point'
cmp_bit_depth = 32
src_path = self.get_temp_path('1.source.wav')
tgt_path = self.get_temp_path(f'2.1.torchaudio.{format}')
tst_path = self.get_temp_path('2.2.result.wav')
sox_path = self.get_temp_path(f'3.1.sox.{format}')
ref_path = self.get_temp_path('3.2.ref.wav')
# 1. Generate original wav
data = get_wav_data(src_dtype, num_channels, normalize=False, num_frames=num_frames)
save_wav(src_path, data, sample_rate)
# 2.1. Convert the original wav to target format with torchaudio
data = load_wav(src_path, normalize=False)[0]
if test_mode == "path":
sox_io_backend.save(
tgt_path, data, sample_rate,
compression=compression, encoding=encoding, bits_per_sample=bits_per_sample)
elif test_mode == "fileobj":
with open(tgt_path, 'bw') as file_:
sox_io_backend.save(
file_, data, sample_rate,
format=format, compression=compression,
encoding=encoding, bits_per_sample=bits_per_sample)
elif test_mode == "bytesio":
file_ = io.BytesIO()
sox_io_backend.save(
file_, data, sample_rate,
format=format, compression=compression,
encoding=encoding, bits_per_sample=bits_per_sample)
file_.seek(0)
with open(tgt_path, 'bw') as f:
f.write(file_.read())
else:
raise ValueError(f"Unexpected test mode: {test_mode}")
# 2.2. Convert the target format to wav with sox
sox_utils.convert_audio_file(
tgt_path, tst_path, encoding=cmp_encoding, bit_depth=cmp_bit_depth)
# 2.3. Load with SciPy
found = load_wav(tst_path, normalize=False)[0]
# 3.1. Convert the original wav to target format with sox
sox_encoding = _get_sox_encoding(encoding)
sox_utils.convert_audio_file(
src_path, sox_path,
compression=compression, encoding=sox_encoding, bit_depth=bits_per_sample)
# 3.2. Convert the target format to wav with sox
sox_utils.convert_audio_file(
sox_path, ref_path, encoding=cmp_encoding, bit_depth=cmp_bit_depth)
# 3.3. Load with SciPy
expected = load_wav(ref_path, normalize=False)[0]
self.assertEqual(found, expected)
@skipIfNoExec('sox')
@skipIfNoSox
class SaveTest(SaveTestBase):
@nested_params(
["path", "fileobj", "bytesio"],
[
('PCM_U', 8),
('PCM_S', 16),
('PCM_S', 32),
('PCM_F', 32),
('PCM_F', 64),
('ULAW', 8),
('ALAW', 8),
],
)
def test_save_wav(self, test_mode, enc_params):
encoding, bits_per_sample = enc_params
self.assert_save_consistency(
"wav", encoding=encoding, bits_per_sample=bits_per_sample, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[
('float32', ),
('int32', ),
('int16', ),
('uint8', ),
],
)
def test_save_wav_dtype(self, test_mode, params):
dtype, = params
self.assert_save_consistency(
"wav", src_dtype=dtype, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[
None,
-4.2,
-0.2,
0,
0.2,
96,
128,
160,
192,
224,
256,
320,
],
)
def test_save_mp3(self, test_mode, bit_rate):
if test_mode in ["fileobj", "bytesio"]:
if bit_rate is not None and bit_rate < 1:
raise unittest.SkipTest(
"mp3 format with variable bit rate is known to "
"not yield the exact same result as sox command.")
self.assert_save_consistency(
"mp3", compression=bit_rate, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[8, 16, 24],
[
None,
0,
1,
2,
3,
4,
5,
6,
7,
8,
],
)
def test_save_flac(self, test_mode, bits_per_sample, compression_level):
self.assert_save_consistency(
"flac", compression=compression_level,
bits_per_sample=bits_per_sample, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
)
def test_save_htk(self, test_mode):
self.assert_save_consistency("htk", test_mode=test_mode, num_channels=1)
@nested_params(
["path", "fileobj", "bytesio"],
[
None,
-1,
0,
1,
2,
3,
3.6,
5,
10,
],
)
def test_save_vorbis(self, test_mode, quality_level):
self.assert_save_consistency(
"vorbis", compression=quality_level, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[
('PCM_S', 8, ),
('PCM_S', 16, ),
('PCM_S', 24, ),
('PCM_S', 32, ),
('ULAW', 8),
('ALAW', 8),
('ALAW', 16),
('ALAW', 24),
('ALAW', 32),
],
)
def test_save_sphere(self, test_mode, enc_params):
encoding, bits_per_sample = enc_params
self.assert_save_consistency(
"sph", encoding=encoding, bits_per_sample=bits_per_sample, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[
('PCM_U', 8, ),
('PCM_S', 16, ),
('PCM_S', 24, ),
('PCM_S', 32, ),
('PCM_F', 32, ),
('PCM_F', 64, ),
('ULAW', 8, ),
('ALAW', 8, ),
],
)
def test_save_amb(self, test_mode, enc_params):
encoding, bits_per_sample = enc_params
self.assert_save_consistency(
"amb", encoding=encoding, bits_per_sample=bits_per_sample, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
[
None,
0,
1,
2,
3,
4,
5,
6,
7,
],
)
def test_save_amr_nb(self, test_mode, bit_rate):
self.assert_save_consistency(
"amr-nb", compression=bit_rate, num_channels=1, test_mode=test_mode)
@nested_params(
["path", "fileobj", "bytesio"],
)
def test_save_gsm(self, test_mode):
self.assert_save_consistency(
"gsm", num_channels=1, test_mode=test_mode)
with self.assertRaises(
RuntimeError, msg="gsm format only supports single channel audio."):
self.assert_save_consistency(
"gsm", num_channels=2, test_mode=test_mode)
with self.assertRaises(
RuntimeError, msg="gsm format only supports a sampling rate of 8kHz."):
self.assert_save_consistency(
"gsm", sample_rate=16000, test_mode=test_mode)
@parameterized.expand([
("wav", "PCM_S", 16),
("mp3", ),
("flac", ),
("vorbis", ),
("sph", "PCM_S", 16),
("amr-nb", ),
("amb", "PCM_S", 16),
], name_func=name_func)
def test_save_large(self, format, encoding=None, bits_per_sample=None):
"""`sox_io_backend.save` can save large files."""
sample_rate = 8000
one_hour = 60 * 60 * sample_rate
self.assert_save_consistency(
format, num_channels=1, sample_rate=8000, num_frames=one_hour,
encoding=encoding, bits_per_sample=bits_per_sample)
@parameterized.expand([
(32, ),
(64, ),
(128, ),
(256, ),
], name_func=name_func)
def test_save_multi_channels(self, num_channels):
"""`sox_io_backend.save` can save audio with many channels"""
self.assert_save_consistency(
"wav", encoding="PCM_S", bits_per_sample=16,
num_channels=num_channels)
@skipIfNoExec('sox')
@skipIfNoSox
class TestSaveParams(TempDirMixin, PytorchTestCase):
"""Test the correctness of optional parameters of `sox_io_backend.save`"""
@parameterized.expand([(True, ), (False, )], name_func=name_func)
def test_save_channels_first(self, channels_first):
"""channels_first swaps axes"""
path = self.get_temp_path('data.wav')
data = get_wav_data(
'int16', 2, channels_first=channels_first, normalize=False)
sox_io_backend.save(
path, data, 8000, channels_first=channels_first)
found = load_wav(path, normalize=False)[0]
expected = data if channels_first else data.transpose(1, 0)
self.assertEqual(found, expected)
@parameterized.expand([
'float32', 'int32', 'int16', 'uint8'
], name_func=name_func)
def test_save_noncontiguous(self, dtype):
"""Noncontiguous tensors are saved correctly"""
path = self.get_temp_path('data.wav')
enc, bps = get_enc_params(dtype)
expected = get_wav_data(dtype, 4, normalize=False)[::2, ::2]
assert not expected.is_contiguous()
sox_io_backend.save(
path, expected, 8000, encoding=enc, bits_per_sample=bps)
found = load_wav(path, normalize=False)[0]
self.assertEqual(found, expected)
@parameterized.expand([
'float32', 'int32', 'int16', 'uint8',
])
def test_save_tensor_preserve(self, dtype):
"""save function should not alter Tensor"""
path = self.get_temp_path('data.wav')
expected = get_wav_data(dtype, 4, normalize=False)[::2, ::2]
data = expected.clone()
sox_io_backend.save(path, data, 8000)
self.assertEqual(data, expected)
@skipIfNoSox
class TestSaveNonExistingDirectory(PytorchTestCase):
def test_save_fail(self):
"""
When attempted to save into a non-existing dir, error message must contain the file path.
"""
path = os.path.join("non_existing_directory", "foo.wav")
with self.assertRaisesRegex(RuntimeError, "^Error saving audio file: failed to open file {0}$".format(path)):
sox_io_backend.save(path, torch.zeros(1, 1), 8000)
|
import itertools
from typing import Optional
import torch
import torchaudio
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TempDirMixin,
TorchaudioTestCase,
skipIfNoExec,
skipIfNoSox,
get_wav_data,
save_wav,
load_wav,
sox_utils,
torch_script,
)
from .common import (
name_func,
get_enc_params,
)
def py_info_func(filepath: str) -> torchaudio.backend.sox_io_backend.AudioMetaData:
return torchaudio.info(filepath)
def py_load_func(filepath: str, normalize: bool, channels_first: bool):
return torchaudio.load(
filepath, normalize=normalize, channels_first=channels_first)
def py_save_func(
filepath: str,
tensor: torch.Tensor,
sample_rate: int,
channels_first: bool = True,
compression: Optional[float] = None,
encoding: Optional[str] = None,
bits_per_sample: Optional[int] = None,
):
torchaudio.save(
filepath, tensor, sample_rate, channels_first,
compression, None, encoding, bits_per_sample)
@skipIfNoExec('sox')
@skipIfNoSox
class SoxIO(TempDirMixin, TorchaudioTestCase):
"""TorchScript-ability Test suite for `sox_io_backend`"""
backend = 'sox_io'
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_info_wav(self, dtype, sample_rate, num_channels):
"""`sox_io_backend.info` is torchscript-able and returns the same result"""
audio_path = self.get_temp_path(f'{dtype}_{sample_rate}_{num_channels}.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=1 * sample_rate)
save_wav(audio_path, data, sample_rate)
ts_info_func = torch_script(py_info_func)
py_info = py_info_func(audio_path)
ts_info = ts_info_func(audio_path)
assert py_info.sample_rate == ts_info.sample_rate
assert py_info.num_frames == ts_info.num_frames
assert py_info.num_channels == ts_info.num_channels
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
[False, True],
[False, True],
)), name_func=name_func)
def test_load_wav(self, dtype, sample_rate, num_channels, normalize, channels_first):
"""`sox_io_backend.load` is torchscript-able and returns the same result"""
audio_path = self.get_temp_path(f'test_load_{dtype}_{sample_rate}_{num_channels}_{normalize}.wav')
data = get_wav_data(dtype, num_channels, normalize=False, num_frames=1 * sample_rate)
save_wav(audio_path, data, sample_rate)
ts_load_func = torch_script(py_load_func)
py_data, py_sr = py_load_func(
audio_path, normalize=normalize, channels_first=channels_first)
ts_data, ts_sr = ts_load_func(
audio_path, normalize=normalize, channels_first=channels_first)
self.assertEqual(py_sr, ts_sr)
self.assertEqual(py_data, ts_data)
@parameterized.expand(list(itertools.product(
['float32', 'int32', 'int16', 'uint8'],
[8000, 16000],
[1, 2],
)), name_func=name_func)
def test_save_wav(self, dtype, sample_rate, num_channels):
ts_save_func = torch_script(py_save_func)
expected = get_wav_data(dtype, num_channels, normalize=False)
py_path = self.get_temp_path(f'test_save_py_{dtype}_{sample_rate}_{num_channels}.wav')
ts_path = self.get_temp_path(f'test_save_ts_{dtype}_{sample_rate}_{num_channels}.wav')
enc, bps = get_enc_params(dtype)
py_save_func(py_path, expected, sample_rate, True, None, enc, bps)
ts_save_func(ts_path, expected, sample_rate, True, None, enc, bps)
py_data, py_sr = load_wav(py_path, normalize=False)
ts_data, ts_sr = load_wav(ts_path, normalize=False)
self.assertEqual(sample_rate, py_sr)
self.assertEqual(sample_rate, ts_sr)
self.assertEqual(expected, py_data)
self.assertEqual(expected, ts_data)
@parameterized.expand(list(itertools.product(
[8000, 16000],
[1, 2],
list(range(9)),
)), name_func=name_func)
def test_save_flac(self, sample_rate, num_channels, compression_level):
ts_save_func = torch_script(py_save_func)
expected = get_wav_data('float32', num_channels)
py_path = self.get_temp_path(f'test_save_py_{sample_rate}_{num_channels}_{compression_level}.flac')
ts_path = self.get_temp_path(f'test_save_ts_{sample_rate}_{num_channels}_{compression_level}.flac')
py_save_func(py_path, expected, sample_rate, True, compression_level, None, None)
ts_save_func(ts_path, expected, sample_rate, True, compression_level, None, None)
# converting to 32 bit because flac file has 24 bit depth which scipy cannot handle.
py_path_wav = f'{py_path}.wav'
ts_path_wav = f'{ts_path}.wav'
sox_utils.convert_audio_file(py_path, py_path_wav, bit_depth=32)
sox_utils.convert_audio_file(ts_path, ts_path_wav, bit_depth=32)
py_data, py_sr = load_wav(py_path_wav, normalize=True)
ts_data, ts_sr = load_wav(ts_path_wav, normalize=True)
self.assertEqual(sample_rate, py_sr)
self.assertEqual(sample_rate, ts_sr)
self.assertEqual(expected, py_data)
self.assertEqual(expected, ts_data)
|
def name_func(func, _, params):
return f'{func.__name__}_{"_".join(str(arg) for arg in params.args)}'
def get_enc_params(dtype):
if dtype == 'float32':
return 'PCM_F', 32
if dtype == 'int32':
return 'PCM_S', 32
if dtype == 'int16':
return 'PCM_S', 16
if dtype == 'uint8':
return 'PCM_U', 8
raise ValueError(f'Unexpected dtype: {dtype}')
|
import itertools
from collections import namedtuple
import torch
from parameterized import parameterized
from torchaudio.models import ConvTasNet, DeepSpeech, Wav2Letter, WaveRNN
from torchaudio.models.wavernn import MelResNet, UpsampleNetwork
from torchaudio_unittest import common_utils
from torchaudio_unittest.common_utils import torch_script
class TestWav2Letter(common_utils.TorchaudioTestCase):
def test_waveform(self):
batch_size = 2
num_features = 1
num_classes = 40
input_length = 320
model = Wav2Letter(num_classes=num_classes, num_features=num_features)
x = torch.rand(batch_size, num_features, input_length)
out = model(x)
assert out.size() == (batch_size, num_classes, 2)
def test_mfcc(self):
batch_size = 2
num_features = 13
num_classes = 40
input_length = 2
model = Wav2Letter(num_classes=num_classes, input_type="mfcc", num_features=num_features)
x = torch.rand(batch_size, num_features, input_length)
out = model(x)
assert out.size() == (batch_size, num_classes, 2)
class TestMelResNet(common_utils.TorchaudioTestCase):
def test_waveform(self):
"""Validate the output dimensions of a MelResNet block.
"""
n_batch = 2
n_time = 200
n_freq = 100
n_output = 128
n_res_block = 10
n_hidden = 128
kernel_size = 5
model = MelResNet(n_res_block, n_freq, n_hidden, n_output, kernel_size)
x = torch.rand(n_batch, n_freq, n_time)
out = model(x)
assert out.size() == (n_batch, n_output, n_time - kernel_size + 1)
class TestUpsampleNetwork(common_utils.TorchaudioTestCase):
def test_waveform(self):
"""Validate the output dimensions of a UpsampleNetwork block.
"""
upsample_scales = [5, 5, 8]
n_batch = 2
n_time = 200
n_freq = 100
n_output = 256
n_res_block = 10
n_hidden = 128
kernel_size = 5
total_scale = 1
for upsample_scale in upsample_scales:
total_scale *= upsample_scale
model = UpsampleNetwork(upsample_scales,
n_res_block,
n_freq,
n_hidden,
n_output,
kernel_size)
x = torch.rand(n_batch, n_freq, n_time)
out1, out2 = model(x)
assert out1.size() == (n_batch, n_freq, total_scale * (n_time - kernel_size + 1))
assert out2.size() == (n_batch, n_output, total_scale * (n_time - kernel_size + 1))
class TestWaveRNN(common_utils.TorchaudioTestCase):
def test_waveform(self):
"""Validate the output dimensions of a WaveRNN model.
"""
upsample_scales = [5, 5, 8]
n_rnn = 512
n_fc = 512
n_classes = 512
hop_length = 200
n_batch = 2
n_time = 200
n_freq = 100
n_output = 256
n_res_block = 10
n_hidden = 128
kernel_size = 5
model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
x = torch.rand(n_batch, 1, hop_length * (n_time - kernel_size + 1))
mels = torch.rand(n_batch, 1, n_freq, n_time)
out = model(x, mels)
assert out.size() == (n_batch, 1, hop_length * (n_time - kernel_size + 1), n_classes)
def test_infer_waveform(self):
"""Validate the output dimensions of a WaveRNN model's infer method.
"""
upsample_scales = [5, 5, 8]
n_rnn = 128
n_fc = 128
n_classes = 128
hop_length = 200
n_batch = 2
n_time = 50
n_freq = 25
n_output = 64
n_res_block = 2
n_hidden = 32
kernel_size = 5
model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
x = torch.rand(n_batch, n_freq, n_time)
lengths = torch.tensor([n_time, n_time // 2])
out, waveform_lengths = model.infer(x, lengths)
assert out.size() == (n_batch, 1, hop_length * n_time)
assert waveform_lengths[0] == hop_length * n_time
assert waveform_lengths[1] == hop_length * n_time // 2
def test_torchscript_infer(self):
"""Scripted model outputs the same as eager mode"""
upsample_scales = [5, 5, 8]
n_rnn = 128
n_fc = 128
n_classes = 128
hop_length = 200
n_batch = 2
n_time = 50
n_freq = 25
n_output = 64
n_res_block = 2
n_hidden = 32
kernel_size = 5
model = WaveRNN(upsample_scales, n_classes, hop_length, n_res_block,
n_rnn, n_fc, kernel_size, n_freq, n_hidden, n_output)
model.eval()
x = torch.rand(n_batch, n_freq, n_time)
torch.random.manual_seed(0)
out_eager = model.infer(x)
torch.random.manual_seed(0)
out_script = torch_script(model).infer(x)
self.assertEqual(out_eager, out_script)
_ConvTasNetParams = namedtuple(
'_ConvTasNetParams',
[
'enc_num_feats',
'enc_kernel_size',
'msk_num_feats',
'msk_num_hidden_feats',
'msk_kernel_size',
'msk_num_layers',
'msk_num_stacks',
]
)
class TestConvTasNet(common_utils.TorchaudioTestCase):
@parameterized.expand(list(itertools.product(
[2, 3],
[
_ConvTasNetParams(128, 40, 128, 256, 3, 7, 2),
_ConvTasNetParams(256, 40, 128, 256, 3, 7, 2),
_ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
_ConvTasNetParams(512, 40, 128, 256, 3, 7, 2),
_ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
_ConvTasNetParams(512, 40, 128, 512, 3, 7, 2),
_ConvTasNetParams(512, 40, 256, 256, 3, 7, 2),
_ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
_ConvTasNetParams(512, 40, 256, 512, 3, 7, 2),
_ConvTasNetParams(512, 40, 128, 512, 3, 6, 4),
_ConvTasNetParams(512, 40, 128, 512, 3, 4, 6),
_ConvTasNetParams(512, 40, 128, 512, 3, 8, 3),
_ConvTasNetParams(512, 32, 128, 512, 3, 8, 3),
_ConvTasNetParams(512, 16, 128, 512, 3, 8, 3),
],
)))
def test_paper_configuration(self, num_sources, model_params):
"""ConvTasNet model works on the valid configurations in the paper"""
batch_size = 32
num_frames = 8000
model = ConvTasNet(
num_sources=num_sources,
enc_kernel_size=model_params.enc_kernel_size,
enc_num_feats=model_params.enc_num_feats,
msk_kernel_size=model_params.msk_kernel_size,
msk_num_feats=model_params.msk_num_feats,
msk_num_hidden_feats=model_params.msk_num_hidden_feats,
msk_num_layers=model_params.msk_num_layers,
msk_num_stacks=model_params.msk_num_stacks,
)
tensor = torch.rand(batch_size, 1, num_frames)
output = model(tensor)
assert output.shape == (batch_size, num_sources, num_frames)
class TestDeepSpeech(common_utils.TorchaudioTestCase):
def test_deepspeech(self):
n_batch = 2
n_feature = 1
n_channel = 1
n_class = 40
n_time = 320
model = DeepSpeech(n_feature=n_feature, n_class=n_class)
x = torch.rand(n_batch, n_channel, n_time, n_feature)
out = model(x)
assert out.size() == (n_batch, n_time, n_class)
|
import os
import torch
import torch.nn.functional as F
from typing import Tuple
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
)
from torchaudio_unittest.common_utils import (
TorchaudioTestCase,
skipIfNoQengine,
skipIfNoCuda,
torch_script,
)
from parameterized import parameterized
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
def _name_func(testcase_func, i, param):
return f"{testcase_func.__name__}_{i}_{param[0][0].__name__}"
factory_funcs = parameterized.expand([
(wav2vec2_base, ),
(wav2vec2_large, ),
(wav2vec2_large_lv60k, ),
(hubert_base, ),
(hubert_large, ),
(hubert_xlarge, ),
], name_func=_name_func)
class TestWav2Vec2Model(TorchaudioTestCase):
def _smoke_test(self, model, device, dtype):
model = model.to(device=device, dtype=dtype)
model = model.eval()
torch.manual_seed(0)
batch_size, num_frames = 3, 1024
waveforms = torch.randn(
batch_size, num_frames, device=device, dtype=dtype)
lengths = torch.randint(
low=0, high=num_frames, size=[batch_size, ], device=device)
model(waveforms, lengths)
@parameterized.expand([(torch.float32, ), (torch.float64, )])
def test_cpu_smoke_test(self, dtype):
model = wav2vec2_base()
self._smoke_test(model, torch.device('cpu'), dtype)
model = wav2vec2_base(aux_num_out=32)
self._smoke_test(model, torch.device('cpu'), dtype)
@parameterized.expand([(torch.float32, ), (torch.float64, )])
@skipIfNoCuda
def test_cuda_smoke_test(self, dtype):
model = wav2vec2_base()
self._smoke_test(model, torch.device('cuda'), dtype)
model = wav2vec2_base(aux_num_out=32)
self._smoke_test(model, torch.device('cuda'), dtype)
def _feature_extractor_test(self, model):
batch_size, num_frames = 3, 1024
model.eval()
num_layers = len(model.encoder.transformer.layers)
torch.manual_seed(0)
waveforms = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
# Not providing num_layers returns all the intermediate features from
# tranformer layers
all_features, lengths_ = model.extract_features(waveforms, lengths, num_layers=None)
assert len(all_features) == num_layers
for features in all_features:
assert features.ndim == 3
assert features.shape[0] == batch_size
assert lengths_.shape == torch.Size([batch_size])
# Limiting the number of layers to `l`.
for l in range(1, num_layers + 1):
features, lengths_ = model.extract_features(waveforms, lengths, num_layers=l)
assert len(features) == l
for i in range(l):
self.assertEqual(all_features[i], features[i])
assert lengths_.shape == torch.Size([batch_size])
@factory_funcs
def test_extract_feature(self, factory_func):
"""`extract_features` method does not fail"""
self._feature_extractor_test(factory_func(aux_num_out=32))
def _test_batch_consistency(self, model):
model.eval()
batch_size, max_frames = 5, 5 * 1024
torch.manual_seed(0)
waveforms = torch.randn(batch_size, max_frames)
input_lengths = torch.tensor([i * 3200 for i in range(1, 6)])
# Batch process with lengths
batch_logits, output_lengths = model(waveforms, input_lengths)
for i in range(batch_size):
# Par-sample process without feeding length
single_logit, _ = model(waveforms[i:i + 1, :input_lengths[i]], None)
batch_logit = batch_logits[i:i + 1, :output_lengths[i]]
# Convert to probability so that it's easier to interpretate the diff
single_prob = F.softmax(single_logit, dim=2)
batch_prob = F.softmax(batch_logit, dim=2)
# We allow max atol=0.005 -> 0.5%
self.assertEqual(single_prob, batch_prob, atol=0.005, rtol=0)
@factory_funcs
def test_pretrain_batch_consistency(self, factory_func):
"""Results from single process and batched process should be reasonably close
"""
self._test_batch_consistency(factory_func())
@factory_funcs
def test_finetune_batch_consistency(self, factory_func):
"""Results from single process and batched process should be reasonably close
"""
self._test_batch_consistency(factory_func(aux_num_out=32))
def _test_zero_length(self, model):
model.eval()
torch.manual_seed(0)
batch_size = 3
waveforms = torch.randn(batch_size, 1024)
input_lengths = torch.zeros(batch_size)
_, output_lengths = model(waveforms, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
_, output_lengths = model.extract_features(waveforms, input_lengths)
self.assertEqual(torch.zeros_like(output_lengths), output_lengths)
@factory_funcs
def test_pretrain_zero_length(self, factory_func):
"""Passing zero length should not fail"""
self._test_zero_length(factory_func())
@factory_funcs
def test_finetune_zero_length(self, factory_func):
"""Passing zero length should not fail"""
self._test_zero_length(factory_func(aux_num_out=32))
def _test_torchscript(self, model):
model.eval()
batch_size, num_frames = 3, 1024
torch.manual_seed(0)
waveforms = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
ref_out, ref_len = model(waveforms, lengths)
scripted = torch_script(model)
hyp_out, hyp_len = scripted(waveforms, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
@factory_funcs
def test_pretrain_torchscript(self, factory_func):
"""Wav2Vec2Model should be scriptable"""
if factory_func is hubert_xlarge and os.name == 'nt' and os.environ.get('CI') == 'true':
self.skipTest(
'hubert_xlarge is known to fail on Windows CI. '
'See https://github.com/pytorch/pytorch/issues/65776')
self._test_torchscript(factory_func())
@factory_funcs
def test_finetune_torchscript(self, factory_func):
"""Wav2Vec2Model should be scriptable"""
if factory_func is hubert_xlarge and os.name == 'nt' and os.environ.get('CI') == 'true':
self.skipTest(
'hubert_xlarge is known to fail on Windows CI. '
'See https://github.com/pytorch/pytorch/issues/65776')
self._test_torchscript(factory_func(aux_num_out=32))
def _test_quantize_smoke_test(self, model):
model.eval()
batch_size, num_frames = 3, 1024
# Remove the weight normalization forward hook
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
quantized = tq.quantize_dynamic(
model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
# A lazy way to check that Modules are different
assert str(quantized) != str(model), "Dynamic quantization did not modify the module."
torch.manual_seed(0)
waveforms = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
_, _ = quantized(waveforms, lengths)
@factory_funcs
@skipIfNoQengine
def test_quantize(self, factory_func):
"""Wav2Vec2Model should support basic quantization"""
self._test_quantize_smoke_test(factory_func(aux_num_out=32))
def _test_quantize_torchscript(self, model):
model.eval()
batch_size, num_frames = 3, 1024
# Remove the weight normalization forward hook
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
quantized = tq.quantize_dynamic(
model, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
# A lazy way to check that Modules are different
assert str(quantized) != str(model), "Dynamic quantization did not modify the module."
torch.manual_seed(0)
waveforms = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
ref_out, ref_len = quantized(waveforms, lengths)
# Script
scripted = torch_script(quantized)
hyp_out, hyp_len = scripted(waveforms, lengths)
self.assertEqual(hyp_out, ref_out)
self.assertEqual(hyp_len, ref_len)
@factory_funcs
@skipIfNoQengine
def test_quantize_torchscript(self, factory_func):
"""Quantized Wav2Vec2Model should be scriptable"""
self._test_quantize_torchscript(factory_func(aux_num_out=32))
|
import json
import torch
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
)
from torchaudio.models.wav2vec2.utils import import_huggingface_model
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths):
with open(f'{get_asset_path("wav2vec2", "huggingface", *paths)}.json', 'r') as file_:
return json.load(file_)
def _name_func(testcase_func, i, param):
return f"{testcase_func.__name__}_{i}_{param[0][1].__name__}"
# Pretrained
HF_BASE = _load_config('facebook', 'wav2vec2-base')
HF_LARGE = _load_config('facebook', 'wav2vec2-large')
HF_LARGE_LV60 = _load_config('facebook', 'wav2vec2-large-lv60')
HF_LARGE_XLSR_53 = _load_config('facebook', 'wav2vec2-large-xlsr-53')
HF_BASE_10K_VOXPOPULI = _load_config('facebook', 'wav2vec2-base-10k-voxpopuli')
# Finetuned
HF_BASE_960H = _load_config('facebook', 'wav2vec2-base-960h')
HF_LARGE_960H = _load_config('facebook', 'wav2vec2-large-960h')
HF_LARGE_LV60_960H = _load_config('facebook', 'wav2vec2-large-960h-lv60')
HF_LARGE_LV60_SELF_960H = _load_config('facebook', 'wav2vec2-large-960h-lv60-self')
HF_LARGE_XLSR_DE = _load_config('facebook', 'wav2vec2-large-xlsr-53-german')
# Config and corresponding factory functions
PRETRAIN_CONFIGS = parameterized.expand([
(HF_BASE, wav2vec2_base),
(HF_LARGE, wav2vec2_large),
(HF_LARGE_LV60, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_53, wav2vec2_large_lv60k),
(HF_BASE_10K_VOXPOPULI, wav2vec2_base),
], name_func=_name_func)
FINETUNE_CONFIGS = parameterized.expand([
(HF_BASE_960H, wav2vec2_base),
(HF_LARGE_960H, wav2vec2_large),
(HF_LARGE_LV60_960H, wav2vec2_large_lv60k),
(HF_LARGE_LV60_SELF_960H, wav2vec2_large_lv60k),
(HF_LARGE_XLSR_DE, wav2vec2_large_lv60k),
], name_func=_name_func)
@skipIfNoModule('transformers')
class TestHFIntegration(TorchaudioTestCase):
"""Test the process of importing the models from Hugging Face Transformers
Test methods in this test suite check the following things
1. Models loaded with Hugging Face Transformers cane be imported.
2. The same model can be recreated without Hugging Face Transformers.
"""
def _get_model(self, config):
# Helper function to avoid importing transformers on module scope.
# Normally, we use `is_module_available` helper function to check if
# the library is available, and import it on module scope if available.
# However, somehow, once "transformers" is imported, `is_module_available`
# starts to fail. Therefore, we defer importing "transformers" until
# the actual tests are started.
from transformers.models.wav2vec2 import (
Wav2Vec2Config,
Wav2Vec2Model,
Wav2Vec2ForCTC,
)
if config['architectures'] == ['Wav2Vec2Model']:
return Wav2Vec2Model(Wav2Vec2Config(**config))
if config['architectures'] == ['Wav2Vec2ForCTC']:
return Wav2Vec2ForCTC(Wav2Vec2Config(**config))
raise ValueError(f'Unexpected arch: {config["architectures"]}')
def _test_import_pretrain(self, original, imported, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref = original.feature_extractor(x).transpose(1, 2)
hyp, _ = imported.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config['conv_dim'][-1])
ref = original.feature_projection(x)[0]
hyp = imported.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config['hidden_size'])
ref = original.encoder.pos_conv_embed(x)
hyp = imported.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for original_, imported_ in zip(original.encoder.layers, imported.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref, = original_(x, attention_mask=mask, output_attentions=False)
hyp = imported_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
ref = original.encoder(x).last_hidden_state
hyp = imported.encoder.transformer(x)
self.assertEqual(ref, hyp)
def _test_import_finetune(self, original, imported, config):
# Aux
x = torch.randn(3, 10, config["hidden_size"])
ref = original.lm_head(x)
hyp = imported.aux(x)
self.assertEqual(ref, hyp)
# The whole model without mask
x = torch.randn(3, 1024)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model without mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
ref = original(x).logits
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# The whole model with mask
batch_size, num_frames = 3, 1024
x = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
mask = torch.arange(num_frames).expand(batch_size, num_frames) < lengths[:, None]
ref = original(x, attention_mask=mask).logits
hyp, output_lengths = imported(x, lengths)
for i, l in enumerate(output_lengths):
self.assertEqual(ref[i, :l, ...], hyp[i, :l, ...])
@PRETRAIN_CONFIGS
def test_import_pretrain(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original, imported, config)
@FINETUNE_CONFIGS
def test_import_finetune(self, config, _):
"""wav2vec2 models from HF transformers can be imported and yields the same results"""
original = self._get_model(config).eval()
imported = import_huggingface_model(original).eval()
self._test_import_pretrain(original.wav2vec2, imported, config)
self._test_import_finetune(original, imported, config)
def _test_recreate(self, imported, reloaded, config):
torch.manual_seed(0)
# FeatureExtractor
x = torch.randn(3, 1024)
ref, _ = imported.feature_extractor(x, None)
hyp, _ = reloaded.feature_extractor(x, None)
self.assertEqual(ref, hyp)
# Feature projection
x = torch.randn(3, 10, config['conv_dim'][-1])
ref = imported.encoder.feature_projection(x)
hyp = reloaded.encoder.feature_projection(x)
self.assertEqual(ref, hyp)
# Convolutional Positional Encoder
x = torch.randn(3, 256, config['hidden_size'])
ref = imported.encoder.transformer.pos_conv_embed(x)
hyp = reloaded.encoder.transformer.pos_conv_embed(x)
self.assertEqual(ref, hyp)
# Encoder Transformer Layer
for imported_, reloaded_ in zip(imported.encoder.transformer.layers, reloaded.encoder.transformer.layers):
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported_(x, mask)
hyp = reloaded_(x, mask)
self.assertEqual(ref, hyp)
# The whole Encoder Transformer
# TODO: Add mask pattern. Expected mask shapes and values are different.
b, l, e = 16, 3, config["hidden_size"]
x = torch.randn(b, l, e)
mask = torch.randn(b, 1, l, l)
ref = imported.encoder.transformer(x)
hyp = reloaded.encoder.transformer(x)
self.assertEqual(ref, hyp)
# Aux
if imported.aux is not None:
x = torch.randn(3, 10, config["hidden_size"])
ref = imported.aux(x)
hyp = reloaded.aux(x)
self.assertEqual(ref, hyp)
# The whole model
x = torch.randn(3, 1024)
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
@PRETRAIN_CONFIGS
def test_recreate_pretrain(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func()
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
@FINETUNE_CONFIGS
def test_recreate_finetune(self, config, factory_func):
"""Imported models can be recreated via a factory function without Hugging Face transformers."""
imported = import_huggingface_model(self._get_model(config)).eval()
reloaded = factory_func(aux_num_out=imported.aux.out_features)
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
self._test_recreate(imported, reloaded, config)
|
import json
import torch
from torchaudio.models.wav2vec2 import (
wav2vec2_base,
wav2vec2_large,
wav2vec2_large_lv60k,
hubert_base,
hubert_large,
hubert_xlarge,
)
from torchaudio.models.wav2vec2.utils import (
import_fairseq_model,
)
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
get_asset_path,
skipIfNoModule,
TorchaudioTestCase,
)
def _load_config(*paths):
with open(f'{get_asset_path("wav2vec2", "fairseq", *paths)}.json', 'r') as file_:
return json.load(file_)
def _name_func(testcase_func, i, param):
return f'{testcase_func.__name__}_{i}_{param[0][1].__name__}'
# Pretraining models
WAV2VEC2_BASE = _load_config('wav2vec_small')
WAV2VEC2_LARGE = _load_config('libri960_big')
WAV2VEC2_LARGE_LV60K = _load_config('wav2vec_vox_new')
WAV2VEC2_XLSR_53_56K = _load_config('xlsr_53_56k')
HUBERT_BASE = _load_config('hubert_base_ls960')
HUBERT_LARGE_LL60K = _load_config('hubert_large_ll60k')
HUBERT_XLARGE_LL60K = _load_config('hubert_xtralarge_ll60k')
# Finetuning models
WAV2VEC2_BASE_960H = _load_config('wav2vec_small_960h')
WAV2VEC2_LARGE_960H = _load_config('wav2vec_large_960h')
WAV2VEC2_LARGE_LV60K_960H = _load_config('wav2vec_large_lv60k_960h')
WAV2VEC2_LARGE_LV60K_SELF_960H = _load_config('wav2vec_large_lv60k_self_960h')
HUBERT_LARGE = _load_config('hubert_large_ll60k_finetune_ls960')
HUBERT_XLARGE = _load_config('hubert_xtralarge_ll60k_finetune_ls960')
# Config and corresponding factory functions
WAV2VEC2_PRETRAINING_CONFIGS = parameterized.expand([
(WAV2VEC2_BASE, wav2vec2_base),
(WAV2VEC2_LARGE, wav2vec2_large),
(WAV2VEC2_LARGE_LV60K, wav2vec2_large_lv60k),
(WAV2VEC2_XLSR_53_56K, wav2vec2_large_lv60k),
], name_func=_name_func)
HUBERT_PRETRAINING_CONFIGS = parameterized.expand([
(HUBERT_BASE, hubert_base),
(HUBERT_LARGE_LL60K, hubert_large),
(HUBERT_XLARGE_LL60K, hubert_xlarge),
], name_func=_name_func)
ALL_PRETRAINING_CONFIGS = parameterized.expand([
(WAV2VEC2_BASE, wav2vec2_base),
(WAV2VEC2_LARGE, wav2vec2_large),
(WAV2VEC2_LARGE_LV60K, wav2vec2_large_lv60k),
(WAV2VEC2_XLSR_53_56K, wav2vec2_large_lv60k),
(HUBERT_BASE, hubert_base),
(HUBERT_LARGE_LL60K, hubert_large),
(HUBERT_XLARGE_LL60K, hubert_xlarge),
], name_func=_name_func)
FINETUNING_CONFIGS = parameterized.expand([
(WAV2VEC2_BASE_960H, wav2vec2_base),
(WAV2VEC2_LARGE_960H, wav2vec2_large),
(WAV2VEC2_LARGE_LV60K_960H, wav2vec2_large_lv60k),
(WAV2VEC2_LARGE_LV60K_SELF_960H, wav2vec2_large_lv60k),
(HUBERT_LARGE, hubert_large),
(HUBERT_XLARGE, hubert_xlarge),
], name_func=_name_func)
@skipIfNoModule('fairseq')
class TestFairseqIntegration(TorchaudioTestCase):
"""Test the process of importing the models from fairseq.
Test methods in this test suite check the following things
1. Models loaded with fairseq cane be imported.
2. The same model can be recreated without fairseq.
"""
def _get_model(self, config, num_out=None):
import copy
from omegaconf import OmegaConf
from fairseq.models.wav2vec.wav2vec2 import (
Wav2Vec2Config,
Wav2Vec2Model,
)
from fairseq.models.wav2vec.wav2vec2_asr import (
Wav2VecEncoder,
Wav2Vec2CtcConfig,
)
from fairseq.models.hubert.hubert_asr import (
HubertCtcConfig,
HubertEncoder,
)
from fairseq.models.hubert.hubert import (
HubertModel,
HubertConfig,
)
from fairseq.tasks.hubert_pretraining import HubertPretrainingConfig
if config['_name'] == 'wav2vec_ctc':
config = copy.deepcopy(config)
config['w2v_args'] = OmegaConf.create(config['w2v_args'])
return Wav2VecEncoder(Wav2Vec2CtcConfig(**config), num_out)
if config['_name'] == 'wav2vec2':
return Wav2Vec2Model(Wav2Vec2Config(**config))
if config['_name'] == 'hubert_ctc':
config = copy.deepcopy(config)
config['w2v_args'] = OmegaConf.create(config['w2v_args'])
ctc_cfg = HubertCtcConfig(**config)
return HubertEncoder(ctc_cfg, tgt_dict=range(num_out))
if config['_name'] == 'hubert':
dicts = [list(range(i)) for i in config['num_classes']]
return HubertModel(
HubertConfig(**config['model']),
HubertPretrainingConfig(**config['task']),
dicts,
)
raise ValueError(f'Unexpected configuration: {config["_name"]}')
@WAV2VEC2_PRETRAINING_CONFIGS
def test_import_wave2vec2_pretraining_model(self, config, _):
"""Wav2vec2 pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024
torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()
x = torch.randn(batch_size, num_frames)
hyp, _ = imported.extract_features(x)
refs = original.extract_features(x, padding_mask=torch.zeros_like(x), layer=-1)
for i, (ref, _) in enumerate(refs['layer_results']):
self.assertEqual(hyp[i], ref.transpose(0, 1))
@HUBERT_PRETRAINING_CONFIGS
def test_import_hubert_pretraining_model(self, config, factory_func):
"""HuBERT pretraining models from fairseq can be imported and yields the same results"""
batch_size, num_frames = 3, 1024
torch.manual_seed(0)
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()
x = torch.randn(batch_size, num_frames)
mask = torch.zeros_like(x)
hyp, _ = imported.extract_features(x)
# check the last layer
ref, _ = original.extract_features(x, padding_mask=mask, output_layer=len(original.encoder.layers))
atol = 3.0e-05 if factory_func is hubert_xlarge else 1.0e-5
self.assertEqual(hyp[-1], ref, atol=atol, rtol=1.3e-6)
# check the first layer
ref, _ = original.extract_features(x, padding_mask=mask, output_layer=1)
self.assertEqual(hyp[0], ref)
@ALL_PRETRAINING_CONFIGS
def test_recreate_pretraining_model(self, config, factory_func):
"""Imported pretraining models can be recreated via a factory function without fairseq."""
batch_size, num_frames = 3, 1024
original = self._get_model(config).eval()
imported = import_fairseq_model(original).eval()
reloaded = factory_func()
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
x = torch.randn(batch_size, num_frames)
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
# Without mask
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
# With mask
ref, ref_lengths = imported(x, lengths)
hyp, hyp_lengths = reloaded(x, lengths)
self.assertEqual(ref, hyp)
self.assertEqual(ref_lengths, hyp_lengths)
@FINETUNING_CONFIGS
def test_import_finetuning_model(self, config, _):
"""Fintuned wav2vec2 models from fairseq can be imported and yields the same results"""
num_out = 28
batch_size, num_frames = 3, 1024
original = self._get_model(config, num_out).eval()
imported = import_fairseq_model(original).eval()
# Without mask
x = torch.randn(batch_size, num_frames)
ref = original(x, torch.zeros_like(x))['encoder_out'].transpose(0, 1)
hyp, _ = imported(x)
self.assertEqual(ref, hyp)
# With mask
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
mask = torch.arange(num_frames).expand(batch_size, num_frames) >= lengths[:, None]
ref = original(x, mask)['encoder_out'].transpose(0, 1)
hyp, output_lengths = imported(x, lengths)
for i, l in enumerate(output_lengths):
self.assertEqual(ref[i, :l, ...], hyp[i, :l, ...])
@FINETUNING_CONFIGS
def test_recreate_finetuning_model(self, config, factory_func):
"""Imported finetuning models can be recreated via a factory function without fairseq."""
num_out = 28
batch_size, num_frames = 3, 1024
original = self._get_model(config, num_out).eval()
imported = import_fairseq_model(original).eval()
reloaded = factory_func(aux_num_out=num_out)
reloaded.load_state_dict(imported.state_dict())
reloaded.eval()
# Without mask
torch.manual_seed(0)
x = torch.randn(batch_size, num_frames)
ref, _ = imported(x)
hyp, _ = reloaded(x)
self.assertEqual(ref, hyp)
# With mask
lengths = torch.randint(low=0, high=num_frames, size=[batch_size, ])
ref, ref_lengths = imported(x, lengths)
hyp, hyp_lengths = reloaded(x, lengths)
self.assertEqual(ref, hyp)
self.assertEqual(ref_lengths, hyp_lengths)
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .model_test_impl import (
Tacotron2EncoderTests,
Tacotron2DecoderTests,
Tacotron2Tests,
)
class TestTacotron2EncoderFloat32CPU(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2DecoderFloat32CPU(Tacotron2DecoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class TestTacotron2Float32CPU(Tacotron2Tests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
|
from typing import Tuple
import torch
from torch import Tensor
from torchaudio.models import Tacotron2
from torchaudio.models.tacotron2 import _Encoder, _Decoder
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
class Tacotron2InferenceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, text: Tensor, text_lengths: Tensor) -> Tuple[Tensor, Tensor, Tensor]:
return self.model.infer(text, text_lengths)
class Tacotron2DecoderInferenceWrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, memory: Tensor, memory_lengths: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]:
return self.model.infer(memory, memory_lengths)
class TorchscriptConsistencyMixin(TestBaseMixin):
r"""Mixin to provide easy access assert torchscript consistency"""
def _assert_torchscript_consistency(self, model, tensors):
ts_func = torch_script(model)
torch.random.manual_seed(40)
output = model(*tensors)
torch.random.manual_seed(40)
ts_output = ts_func(*tensors)
self.assertEqual(ts_output, output)
class Tacotron2EncoderTests(TorchscriptConsistencyMixin):
def test_tacotron2_torchscript_consistency(self):
r"""Validate the torchscript consistency of a Encoder."""
n_batch, n_seq, encoder_embedding_dim = 16, 64, 512
model = _Encoder(encoder_embedding_dim=encoder_embedding_dim,
encoder_n_convolution=3,
encoder_kernel_size=5).to(self.device).eval()
x = torch.rand(
n_batch, encoder_embedding_dim, n_seq, device=self.device, dtype=self.dtype
)
input_lengths = (
torch.ones(n_batch, device=self.device, dtype=torch.int32) * n_seq
)
self._assert_torchscript_consistency(model, (x, input_lengths))
def test_encoder_output_shape(self):
r"""Feed tensors with specific shape to Tacotron2 Decoder and validate
that it outputs with a tensor with expected shape.
"""
n_batch, n_seq, encoder_embedding_dim = 16, 64, 512
model = _Encoder(encoder_embedding_dim=encoder_embedding_dim,
encoder_n_convolution=3,
encoder_kernel_size=5).to(self.device).eval()
x = torch.rand(
n_batch, encoder_embedding_dim, n_seq, device=self.device, dtype=self.dtype
)
input_lengths = (
torch.ones(n_batch, device=self.device, dtype=torch.int32) * n_seq
)
out = model(x, input_lengths)
assert out.size() == (n_batch, n_seq, encoder_embedding_dim)
def _get_decoder_model(n_mels=80, encoder_embedding_dim=512,
decoder_max_step=2000, gate_threshold=0.5):
model = _Decoder(
n_mels=n_mels,
n_frames_per_step=1,
encoder_embedding_dim=encoder_embedding_dim,
decoder_rnn_dim=1024,
decoder_max_step=decoder_max_step,
decoder_dropout=0.1,
decoder_early_stopping=True,
attention_rnn_dim=1024,
attention_hidden_dim=128,
attention_location_n_filter=32,
attention_location_kernel_size=31,
attention_dropout=0.1,
prenet_dim=256,
gate_threshold=gate_threshold,
)
return model
class Tacotron2DecoderTests(TorchscriptConsistencyMixin):
def test_decoder_torchscript_consistency(self):
r"""Validate the torchscript consistency of a Decoder."""
n_batch = 16
n_mels = 80
n_seq = 200
encoder_embedding_dim = 256
n_time_steps = 150
model = _get_decoder_model(n_mels=n_mels, encoder_embedding_dim=encoder_embedding_dim)
model = model.to(self.device).eval()
memory = torch.rand(
n_batch, n_seq, encoder_embedding_dim, dtype=self.dtype, device=self.device
)
decoder_inputs = torch.rand(
n_batch, n_mels, n_time_steps, dtype=self.dtype, device=self.device
)
memory_lengths = torch.ones(n_batch, dtype=torch.int32, device=self.device)
self._assert_torchscript_consistency(
model, (memory, decoder_inputs, memory_lengths)
)
def test_decoder_output_shape(self):
r"""Feed tensors with specific shape to Tacotron2 Decoder and validate
that it outputs with a tensor with expected shape.
"""
n_batch = 16
n_mels = 80
n_seq = 200
encoder_embedding_dim = 256
n_time_steps = 150
model = _get_decoder_model(n_mels=n_mels, encoder_embedding_dim=encoder_embedding_dim)
model = model.to(self.device).eval()
memory = torch.rand(
n_batch, n_seq, encoder_embedding_dim, dtype=self.dtype, device=self.device
)
decoder_inputs = torch.rand(
n_batch, n_mels, n_time_steps, dtype=self.dtype, device=self.device
)
memory_lengths = torch.ones(n_batch, dtype=torch.int32, device=self.device)
mel_specgram, gate_outputs, alignments = model(
memory, decoder_inputs, memory_lengths
)
assert mel_specgram.size() == (n_batch, n_mels, n_time_steps)
assert gate_outputs.size() == (n_batch, n_time_steps)
assert alignments.size() == (n_batch, n_time_steps, n_seq)
def test_decoder_inference_torchscript_consistency(self):
r"""Validate the torchscript consistency of a Decoder."""
n_batch = 16
n_mels = 80
n_seq = 200
encoder_embedding_dim = 256
decoder_max_step = 300 # make inference more efficient
gate_threshold = 0.505 # make inference more efficient
model = _get_decoder_model(
n_mels=n_mels,
encoder_embedding_dim=encoder_embedding_dim,
decoder_max_step=decoder_max_step,
gate_threshold=gate_threshold,
)
model = model.to(self.device).eval()
memory = torch.rand(
n_batch, n_seq, encoder_embedding_dim, dtype=self.dtype, device=self.device
)
memory_lengths = torch.ones(n_batch, dtype=torch.int32, device=self.device)
model_wrapper = Tacotron2DecoderInferenceWrapper(model)
self._assert_torchscript_consistency(model_wrapper, (memory, memory_lengths))
def test_decoder_inference_output_shape(self):
r"""Validate the torchscript consistency of a Decoder."""
n_batch = 16
n_mels = 80
n_seq = 200
encoder_embedding_dim = 256
decoder_max_step = 300 # make inference more efficient
gate_threshold = 0.505 # if set to 0.5, the model will only run one step
model = _get_decoder_model(
n_mels=n_mels,
encoder_embedding_dim=encoder_embedding_dim,
decoder_max_step=decoder_max_step,
gate_threshold=gate_threshold,
)
model = model.to(self.device).eval()
memory = torch.rand(
n_batch, n_seq, encoder_embedding_dim, dtype=self.dtype, device=self.device
)
memory_lengths = torch.ones(n_batch, dtype=torch.int32, device=self.device)
mel_specgram, mel_specgram_lengths, gate_outputs, alignments = model.infer(
memory, memory_lengths
)
assert len(mel_specgram.size()) == 3
assert mel_specgram.size()[:-1] == (n_batch, n_mels, )
assert mel_specgram.size()[2] == mel_specgram_lengths.max().item()
assert len(mel_specgram_lengths.size()) == 1
assert mel_specgram_lengths.size()[0] == n_batch
assert mel_specgram_lengths.max().item() <= model.decoder_max_step
assert len(gate_outputs.size()) == 2
assert gate_outputs.size()[0] == n_batch
assert gate_outputs.size()[1] == mel_specgram_lengths.max().item()
assert len(alignments.size()) == 2
assert alignments.size()[0] == n_seq
assert alignments.size()[1] == mel_specgram_lengths.max().item() * n_batch
def _get_tacotron2_model(n_mels, decoder_max_step=2000, gate_threshold=0.5):
return Tacotron2(
mask_padding=False,
n_mels=n_mels,
n_symbol=148,
n_frames_per_step=1,
symbol_embedding_dim=512,
encoder_embedding_dim=512,
encoder_n_convolution=3,
encoder_kernel_size=5,
decoder_rnn_dim=1024,
decoder_max_step=decoder_max_step,
decoder_dropout=0.1,
decoder_early_stopping=True,
attention_rnn_dim=1024,
attention_hidden_dim=128,
attention_location_n_filter=32,
attention_location_kernel_size=31,
attention_dropout=0.1,
prenet_dim=256,
postnet_n_convolution=5,
postnet_kernel_size=5,
postnet_embedding_dim=512,
gate_threshold=gate_threshold,
)
class Tacotron2Tests(TorchscriptConsistencyMixin):
def _get_inputs(
self, n_mels: int, n_batch: int, max_mel_specgram_length: int, max_text_length: int
):
text = torch.randint(
0, 148, (n_batch, max_text_length), dtype=torch.int32, device=self.device
)
text_lengths = max_text_length * torch.ones(
(n_batch,), dtype=torch.int32, device=self.device
)
mel_specgram = torch.rand(
n_batch,
n_mels,
max_mel_specgram_length,
dtype=self.dtype,
device=self.device,
)
mel_specgram_lengths = max_mel_specgram_length * torch.ones(
(n_batch,), dtype=torch.int32, device=self.device
)
return text, text_lengths, mel_specgram, mel_specgram_lengths
def test_tacotron2_torchscript_consistency(self):
r"""Validate the torchscript consistency of a Tacotron2."""
n_batch = 16
n_mels = 80
max_mel_specgram_length = 300
max_text_length = 100
model = _get_tacotron2_model(n_mels).to(self.device).eval()
inputs = self._get_inputs(
n_mels, n_batch, max_mel_specgram_length, max_text_length
)
self._assert_torchscript_consistency(model, inputs)
def test_tacotron2_output_shape(self):
r"""Feed tensors with specific shape to Tacotron2 and validate
that it outputs with a tensor with expected shape.
"""
n_batch = 16
n_mels = 80
max_mel_specgram_length = 300
max_text_length = 100
model = _get_tacotron2_model(n_mels).to(self.device).eval()
inputs = self._get_inputs(
n_mels, n_batch, max_mel_specgram_length, max_text_length
)
mel_out, mel_out_postnet, gate_outputs, alignments = model(*inputs)
assert mel_out.size() == (n_batch, n_mels, max_mel_specgram_length)
assert mel_out_postnet.size() == (n_batch, n_mels, max_mel_specgram_length)
assert gate_outputs.size() == (n_batch, max_mel_specgram_length)
assert alignments.size() == (n_batch, max_mel_specgram_length, max_text_length)
def test_tacotron2_backward(self):
r"""Make sure calling the backward function on Tacotron2's outputs does
not error out. Following:
https://github.com/pytorch/vision/blob/23b8760374a5aaed53c6e5fc83a7e83dbe3b85df/test/test_models.py#L255
"""
n_batch = 16
n_mels = 80
max_mel_specgram_length = 300
max_text_length = 100
model = _get_tacotron2_model(n_mels).to(self.device)
inputs = self._get_inputs(
n_mels, n_batch, max_mel_specgram_length, max_text_length
)
mel_out, mel_out_postnet, gate_outputs, _ = model(*inputs)
mel_out.sum().backward(retain_graph=True)
mel_out_postnet.sum().backward(retain_graph=True)
gate_outputs.sum().backward()
def _get_inference_inputs(self, n_batch: int, max_text_length: int):
text = torch.randint(
0, 148, (n_batch, max_text_length), dtype=torch.int32, device=self.device
)
text_lengths = max_text_length * torch.ones(
(n_batch,), dtype=torch.int32, device=self.device
)
return text, text_lengths
def test_tacotron2_inference_torchscript_consistency(self):
r"""Validate the torchscript consistency of Tacotron2 inference function."""
n_batch = 16
n_mels = 40
max_text_length = 100
decoder_max_step = 200 # make inference more efficient
gate_threshold = 0.51 # if set to 0.5, the model will only run one step
model = _get_tacotron2_model(
n_mels, decoder_max_step=decoder_max_step, gate_threshold=gate_threshold
).to(self.device).eval()
inputs = self._get_inference_inputs(n_batch, max_text_length)
model_wrapper = Tacotron2InferenceWrapper(model)
self._assert_torchscript_consistency(model_wrapper, inputs)
def test_tacotron2_inference_output_shape(self):
r"""Feed tensors with specific shape to Tacotron2 inference function and validate
that it outputs with a tensor with expected shape.
"""
n_batch = 16
n_mels = 40
max_text_length = 100
decoder_max_step = 200 # make inference more efficient
gate_threshold = 0.51 # if set to 0.5, the model will only run one step
model = _get_tacotron2_model(
n_mels, decoder_max_step=decoder_max_step, gate_threshold=gate_threshold
).to(self.device).eval()
inputs = self._get_inference_inputs(n_batch, max_text_length)
mel_out, mel_specgram_lengths, alignments = model.infer(*inputs)
# There is no guarantee on exactly what max_mel_specgram_length should be
# We only know that it should be smaller than model.decoder.decoder_max_step
assert len(mel_out.size()) == 3
assert mel_out.size()[:2] == (n_batch, n_mels, )
assert mel_out.size()[2] == mel_specgram_lengths.max().item()
assert len(mel_specgram_lengths.size()) == 1
assert mel_specgram_lengths.size()[0] == n_batch
assert mel_specgram_lengths.max().item() <= model.decoder.decoder_max_step
assert len(alignments.size()) == 3
assert alignments.size()[0] == n_batch
assert alignments.size()[1] == mel_specgram_lengths.max().item()
assert alignments.size()[2] == max_text_length
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from .model_test_impl import (
Tacotron2EncoderTests,
Tacotron2DecoderTests,
Tacotron2Tests,
)
@skipIfNoCuda
class TestTacotron2EncoderFloat32CUDA(Tacotron2EncoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class TestTacotron2DecoderFloat32CUDA(Tacotron2DecoderTests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class TestTacotron2Float32CUDA(Tacotron2Tests, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.common_utils import TestBaseMixin, torch_script
from torchaudio.prototype import Emformer
class EmformerTestImpl(TestBaseMixin):
def _gen_model(self, input_dim, right_context_length):
emformer = Emformer(
input_dim,
8,
256,
3,
segment_length=4,
left_context_length=30,
right_context_length=right_context_length,
max_memory_size=1,
).to(device=self.device, dtype=self.dtype)
return emformer
def _gen_inputs(self, input_dim, batch_size, num_frames, right_context_length):
input = torch.rand(batch_size, num_frames, input_dim).to(
device=self.device, dtype=self.dtype
)
lengths = torch.randint(1, num_frames - right_context_length, (batch_size,)).to(
device=self.device, dtype=self.dtype
)
return input, lengths
def test_torchscript_consistency_forward(self):
r"""Verify that scripting Emformer does not change the behavior of method `forward`."""
input_dim = 128
batch_size = 10
num_frames = 400
right_context_length = 1
emformer = self._gen_model(input_dim, right_context_length)
input, lengths = self._gen_inputs(
input_dim, batch_size, num_frames, right_context_length
)
scripted = torch_script(emformer)
ref_out, ref_len = emformer(input, lengths)
scripted_out, scripted_len = scripted(input, lengths)
self.assertEqual(ref_out, scripted_out)
self.assertEqual(ref_len, scripted_len)
def test_torchscript_consistency_infer(self):
r"""Verify that scripting Emformer does not change the behavior of method `infer`."""
input_dim = 128
batch_size = 10
num_frames = 400
right_context_length = 1
emformer = self._gen_model(input_dim, right_context_length).eval()
scripted = torch_script(emformer).eval()
ref_state, scripted_state = None, None
for _ in range(3):
input, lengths = self._gen_inputs(input_dim, batch_size, num_frames, 0)
ref_out, ref_len, ref_state = emformer.infer(input, lengths, ref_state)
scripted_out, scripted_len, scripted_state = scripted.infer(
input, lengths, scripted_state
)
self.assertEqual(ref_out, scripted_out)
self.assertEqual(ref_len, scripted_len)
self.assertEqual(ref_state, scripted_state)
def test_output_shape_forward(self):
r"""Check that method `forward` produces correctly-shaped outputs."""
input_dim = 128
batch_size = 10
num_frames = 123
right_context_length = 9
emformer = self._gen_model(input_dim, right_context_length)
input, lengths = self._gen_inputs(
input_dim, batch_size, num_frames, right_context_length
)
output, output_lengths = emformer(input, lengths)
self.assertEqual(
(batch_size, num_frames - right_context_length, input_dim), output.shape
)
self.assertEqual((batch_size,), output_lengths.shape)
def test_output_shape_infer(self):
r"""Check that method `infer` produces correctly-shaped outputs."""
input_dim = 256
batch_size = 5
num_frames = 200
right_context_length = 2
emformer = self._gen_model(input_dim, right_context_length).eval()
state = None
for _ in range(3):
input, lengths = self._gen_inputs(input_dim, batch_size, num_frames, 0)
output, output_lengths, state = emformer.infer(input, lengths, state)
self.assertEqual(
(batch_size, num_frames - right_context_length, input_dim), output.shape
)
self.assertEqual((batch_size,), output_lengths.shape)
def test_output_lengths_forward(self):
r"""Check that method `forward` returns input `lengths` unmodified."""
input_dim = 88
batch_size = 13
num_frames = 123
right_context_length = 2
emformer = self._gen_model(input_dim, right_context_length)
input, lengths = self._gen_inputs(
input_dim, batch_size, num_frames, right_context_length
)
_, output_lengths = emformer(input, lengths)
self.assertEqual(lengths, output_lengths)
def test_output_lengths_infer(self):
r"""Check that method `infer` returns input `lengths` with right context length subtracted."""
input_dim = 88
batch_size = 13
num_frames = 123
right_context_length = 2
emformer = self._gen_model(input_dim, right_context_length).eval()
input, lengths = self._gen_inputs(input_dim, batch_size, num_frames, 0)
_, output_lengths, _ = emformer.infer(input, lengths)
self.assertEqual(
torch.clamp(lengths - right_context_length, min=0), output_lengths
)
|
import torch
from torchaudio_unittest.prototype.emformer_test_impl import EmformerTestImpl
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
@skipIfNoCuda
class EmformerFloat32GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cuda")
@skipIfNoCuda
class EmformerFloat64GPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cuda")
|
import torch
from torchaudio_unittest.prototype.emformer_test_impl import EmformerTestImpl
from torchaudio_unittest.common_utils import PytorchTestCase
class EmformerFloat32CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float32
device = torch.device("cpu")
class EmformerFloat64CPUTest(EmformerTestImpl, PytorchTestCase):
dtype = torch.float64
device = torch.device("cpu")
|
import torch
import torchaudio.transforms as T
from parameterized import parameterized, param
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
get_spectrogram,
nested_params,
)
from torchaudio_unittest.common_utils.psd_utils import psd_numpy
def _get_ratio(mat):
return (mat.sum() / mat.numel()).item()
class TransformsTestBase(TestBaseMixin):
def test_InverseMelScale(self):
"""Gauge the quality of InverseMelScale transform.
As InverseMelScale is currently implemented with
random initialization + iterative optimization,
it is not practically possible to assert the difference between
the estimated spectrogram and the original spectrogram as a whole.
Estimated spectrogram has very huge descrepency locally.
Thus in this test we gauge what percentage of elements are bellow
certain tolerance.
At the moment, the quality of estimated spectrogram is not good.
When implementation is changed in a way it makes the quality even worse,
this test will fail.
"""
n_fft = 400
power = 1
n_mels = 64
sample_rate = 8000
n_stft = n_fft // 2 + 1
# Generate reference spectrogram and input mel-scaled spectrogram
expected = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=1, n_channels=2),
n_fft=n_fft, power=power).to(self.device, self.dtype)
input = T.MelScale(
n_mels=n_mels, sample_rate=sample_rate, n_stft=n_stft
).to(self.device, self.dtype)(expected)
# Run transform
transform = T.InverseMelScale(
n_stft, n_mels=n_mels, sample_rate=sample_rate).to(self.device, self.dtype)
torch.random.manual_seed(0)
result = transform(input)
# Compare
epsilon = 1e-60
relative_diff = torch.abs((result - expected) / (expected + epsilon))
for tol in [1e-1, 1e-3, 1e-5, 1e-10]:
print(
f"Ratio of relative diff smaller than {tol:e} is "
f"{_get_ratio(relative_diff < tol)}")
assert _get_ratio(relative_diff < 1e-1) > 0.2
assert _get_ratio(relative_diff < 1e-3) > 5e-3
assert _get_ratio(relative_diff < 1e-5) > 1e-5
@nested_params(
["sinc_interpolation", "kaiser_window"],
[16000, 44100],
)
def test_resample_identity(self, resampling_method, sample_rate):
"""When sampling rate is not changed, the transform returns an identical Tensor"""
waveform = get_whitenoise(sample_rate=sample_rate, duration=1)
resampler = T.Resample(sample_rate, sample_rate, resampling_method)
resampled = resampler(waveform)
self.assertEqual(waveform, resampled)
@nested_params(
["sinc_interpolation", "kaiser_window"],
[None, torch.float64],
)
def test_resample_cache_dtype(self, resampling_method, dtype):
"""Providing dtype changes the kernel cache dtype"""
transform = T.Resample(16000, 44100, resampling_method, dtype=dtype)
assert transform.kernel.dtype == dtype if dtype is not None else torch.float32
@parameterized.expand([
param(n_fft=300, center=True, onesided=True),
param(n_fft=400, center=True, onesided=False),
param(n_fft=400, center=True, onesided=False),
param(n_fft=300, center=True, onesided=False),
param(n_fft=400, hop_length=10),
param(n_fft=800, win_length=400, hop_length=20),
param(n_fft=800, win_length=400, hop_length=20, normalized=True),
param(),
param(n_fft=400, pad=32),
# These tests do not work - cause runtime error
# See https://github.com/pytorch/pytorch/issues/62323
# param(n_fft=400, center=False, onesided=True),
# param(n_fft=400, center=False, onesided=False),
])
def test_roundtrip_spectrogram(self, **args):
"""Test the spectrogram + inverse spectrogram results in approximate identity."""
waveform = get_whitenoise(sample_rate=8000, duration=0.5, dtype=self.dtype)
s = T.Spectrogram(**args, power=None)
inv_s = T.InverseSpectrogram(**args)
transformed = s.forward(waveform)
restored = inv_s.forward(transformed, length=waveform.shape[-1])
self.assertEqual(waveform, restored, atol=1e-6, rtol=1e-6)
@parameterized.expand([
param(0.5, 1, True, False),
param(0.5, 1, None, False),
param(1, 4, True, True),
param(1, 6, None, True),
])
def test_psd(self, duration, channel, mask, multi_mask):
"""Providing dtype changes the kernel cache dtype"""
transform = T.PSD(multi_mask)
waveform = get_whitenoise(sample_rate=8000, duration=duration, n_channels=channel)
spectrogram = get_spectrogram(waveform, n_fft=400) # (channel, freq, time)
spectrogram = spectrogram.to(torch.cdouble)
if mask is not None:
if multi_mask:
mask = torch.rand(spectrogram.shape[-3:])
else:
mask = torch.rand(spectrogram.shape[-2:])
psd_np = psd_numpy(spectrogram.detach().numpy(), mask.detach().numpy(), multi_mask)
else:
psd_np = psd_numpy(spectrogram.detach().numpy(), mask, multi_mask)
psd = transform(spectrogram, mask)
self.assertEqual(psd, psd_np, atol=1e-5, rtol=1e-5)
|
from typing import List
import unittest
from parameterized import parameterized
import torch
from torch.autograd import gradcheck, gradgradcheck
import torchaudio.transforms as T
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
get_spectrogram,
nested_params,
rnnt_utils,
)
class _DeterministicWrapper(torch.nn.Module):
"""Helper transform wrapper to make the given transform deterministic"""
def __init__(self, transform, seed=0):
super().__init__()
self.seed = seed
self.transform = transform
def forward(self, input: torch.Tensor):
torch.random.manual_seed(self.seed)
return self.transform(input)
class AutogradTestMixin(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
*,
nondet_tol: float = 0.0,
):
transform = transform.to(dtype=torch.float64, device=self.device)
# gradcheck and gradgradcheck only pass if the input tensors are of dtype `torch.double` or
# `torch.cdouble`, when the default eps and tolerance values are used.
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(
dtype=torch.cdouble if i.is_complex() else torch.double,
device=self.device)
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_, nondet_tol=nondet_tol)
@parameterized.expand([
({'pad': 0, 'normalized': False, 'power': None, 'return_complex': True}, ),
({'pad': 3, 'normalized': False, 'power': None, 'return_complex': True}, ),
({'pad': 0, 'normalized': True, 'power': None, 'return_complex': True}, ),
({'pad': 3, 'normalized': True, 'power': None, 'return_complex': True}, ),
({'pad': 0, 'normalized': False, 'power': None}, ),
({'pad': 3, 'normalized': False, 'power': None}, ),
({'pad': 0, 'normalized': True, 'power': None}, ),
({'pad': 3, 'normalized': True, 'power': None}, ),
({'pad': 0, 'normalized': False, 'power': 1.0}, ),
({'pad': 3, 'normalized': False, 'power': 1.0}, ),
({'pad': 0, 'normalized': True, 'power': 1.0}, ),
({'pad': 3, 'normalized': True, 'power': 1.0}, ),
({'pad': 0, 'normalized': False, 'power': 2.0}, ),
({'pad': 3, 'normalized': False, 'power': 2.0}, ),
({'pad': 0, 'normalized': True, 'power': 2.0}, ),
({'pad': 3, 'normalized': True, 'power': 2.0}, ),
])
def test_spectrogram(self, kwargs):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~2.7756e-17) difference.
#
# See https://github.com/pytorch/pytorch/issues/54093
transform = T.Spectrogram(**kwargs)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_inverse_spectrogram(self):
# create a realistic input:
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
length = waveform.shape[-1]
spectrogram = get_spectrogram(waveform, n_fft=400)
# test
inv_transform = T.InverseSpectrogram(n_fft=400)
self.assert_grad(inv_transform, [spectrogram, length])
def test_melspectrogram(self):
# replication_pad1d_backward_cuda is not deteministic and
# gives very small (~2.7756e-17) difference.
#
# See https://github.com/pytorch/pytorch/issues/54093
sample_rate = 8000
transform = T.MelSpectrogram(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
@nested_params(
[0, 0.99],
[False, True],
)
def test_griffinlim(self, momentum, rand_init):
n_fft = 400
power = 1
n_iter = 3
spec = get_spectrogram(
get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2),
n_fft=n_fft, power=power)
transform = _DeterministicWrapper(
T.GriffinLim(n_fft=n_fft, n_iter=n_iter, momentum=momentum, rand_init=rand_init, power=power))
self.assert_grad(transform, [spec])
@parameterized.expand([(False, ), (True, )])
def test_mfcc(self, log_mels):
sample_rate = 8000
transform = T.MFCC(sample_rate=sample_rate, log_mels=log_mels)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform])
@parameterized.expand([(False, ), (True, )])
def test_lfcc(self, log_lf):
sample_rate = 8000
transform = T.LFCC(sample_rate=sample_rate, log_lf=log_lf)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform])
def test_compute_deltas(self):
transform = T.ComputeDeltas()
spec = torch.rand(10, 20)
self.assert_grad(transform, [spec])
@parameterized.expand([(8000, 8000), (8000, 4000), (4000, 8000)])
def test_resample(self, orig_freq, new_freq):
transform = T.Resample(orig_freq=orig_freq, new_freq=new_freq)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform])
@parameterized.expand([("linear", ), ("exponential", ), ("logarithmic", ), ("quarter_sine", ), ("half_sine", )])
def test_fade(self, fade_shape):
transform = T.Fade(fade_shape=fade_shape)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
@parameterized.expand([(T.TimeMasking,), (T.FrequencyMasking,)])
def test_masking(self, masking_transform):
sample_rate = 8000
n_fft = 400
spectrogram = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2),
n_fft=n_fft, power=1)
deterministic_transform = _DeterministicWrapper(masking_transform(400))
self.assert_grad(deterministic_transform, [spectrogram])
@parameterized.expand([(T.TimeMasking,), (T.FrequencyMasking,)])
def test_masking_iid(self, masking_transform):
sample_rate = 8000
n_fft = 400
specs = [get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2, seed=i),
n_fft=n_fft, power=1)
for i in range(3)
]
batch = torch.stack(specs)
assert batch.ndim == 4
deterministic_transform = _DeterministicWrapper(masking_transform(400, True))
self.assert_grad(deterministic_transform, [batch])
def test_spectral_centroid(self):
sample_rate = 8000
transform = T.SpectralCentroid(sample_rate=sample_rate)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform], nondet_tol=1e-10)
def test_amplitude_to_db(self):
sample_rate = 8000
transform = T.AmplitudeToDB()
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform])
def test_melscale(self):
sample_rate = 8000
n_fft = 400
n_mels = n_fft // 2 + 1
transform = T.MelScale(sample_rate=sample_rate, n_mels=n_mels)
spec = get_spectrogram(
get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2),
n_fft=n_fft, power=1)
self.assert_grad(transform, [spec])
@parameterized.expand([(1.5, "amplitude"), (2, "power"), (10, "db")])
def test_vol(self, gain, gain_type):
sample_rate = 8000
transform = T.Vol(gain=gain, gain_type=gain_type)
waveform = get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=2)
self.assert_grad(transform, [waveform])
@parameterized.expand([
({'cmn_window': 100, 'min_cmn_window': 50, 'center': False, 'norm_vars': False}, ),
({'cmn_window': 100, 'min_cmn_window': 50, 'center': True, 'norm_vars': False}, ),
({'cmn_window': 100, 'min_cmn_window': 50, 'center': False, 'norm_vars': True}, ),
({'cmn_window': 100, 'min_cmn_window': 50, 'center': True, 'norm_vars': True}, ),
])
def test_sliding_window_cmn(self, kwargs):
n_fft = 10
power = 1
spec = get_spectrogram(
get_whitenoise(sample_rate=200, duration=0.05, n_channels=2),
n_fft=n_fft, power=power)
spec_reshaped = spec.transpose(-1, -2)
transform = T.SlidingWindowCmn(**kwargs)
self.assert_grad(transform, [spec_reshaped])
@unittest.expectedFailure
def test_timestretch_zeros_fail(self):
"""Test that ``T.TimeStretch`` fails gradcheck at 0
This is because ``F.phase_vocoder`` converts data from cartesian to polar coordinate,
which performs ``atan2(img, real)``, and gradient is not defined at 0.
"""
n_fft = 16
transform = T.TimeStretch(n_freq=n_fft // 2 + 1, fixed_rate=0.99)
waveform = torch.zeros(2, 40)
spectrogram = get_spectrogram(waveform, n_fft=n_fft, power=None)
self.assert_grad(transform, [spectrogram])
@nested_params([0.7, 0.8, 0.9, 1.0, 1.3])
def test_timestretch_non_zero(self, rate):
"""Verify that ``T.TimeStretch`` does not fail if it's not close to 0
``T.TimeStrech`` is not differentiable around 0, so this test checks the differentiability
for cases where input is not zero.
As tested above, when spectrogram contains values close to zero, the gradients are unstable
and gradcheck fails.
In this test, we generate spectrogram from random signal, then we push the points around
zero away from the origin.
This process does not reflect the real use-case, and it is not practical for users, but
this helps us understand to what degree the function is differentiable and when not.
"""
n_fft = 16
transform = T.TimeStretch(n_freq=n_fft // 2 + 1, fixed_rate=rate)
waveform = get_whitenoise(sample_rate=40, duration=1, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=n_fft, power=None)
# 1e-3 is too small (on CPU)
epsilon = 1e-2
too_close = spectrogram.abs() < epsilon
spectrogram[too_close] = epsilon * spectrogram[too_close] / spectrogram[too_close].abs()
self.assert_grad(transform, [spectrogram])
def test_psd(self):
transform = T.PSD()
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
self.assert_grad(transform, [spectrogram])
@parameterized.expand([
[True],
[False],
])
def test_psd_with_mask(self, multi_mask):
transform = T.PSD(multi_mask=multi_mask)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
if multi_mask:
mask = torch.rand(spectrogram.shape[-3:])
else:
mask = torch.rand(spectrogram.shape[-2:])
self.assert_grad(transform, [spectrogram, mask])
@parameterized.expand([
"ref_channel",
# stv_power test time too long, comment for now
# "stv_power",
# stv_evd will fail since the eigenvalues are not distinct
# "stv_evd",
])
def test_mvdr(self, solution):
transform = T.MVDR(solution=solution)
waveform = get_whitenoise(sample_rate=8000, duration=0.05, n_channels=2)
spectrogram = get_spectrogram(waveform, n_fft=400)
mask_s = torch.rand(spectrogram.shape[-2:])
mask_n = torch.rand(spectrogram.shape[-2:])
self.assert_grad(transform, [spectrogram, mask_s, mask_n])
class AutogradTestFloat32(TestBaseMixin):
def assert_grad(
self,
transform: torch.nn.Module,
inputs: List[torch.Tensor],
):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=torch.float32, device=self.device)
inputs_.append(i)
# gradcheck with float32 requires higher atol and epsilon
assert gradcheck(transform, inputs, eps=1e-3, atol=1e-3, nondet_tol=0.)
@parameterized.expand([
(rnnt_utils.get_B1_T10_U3_D4_data, ),
(rnnt_utils.get_B2_T4_U3_D3_data, ),
(rnnt_utils.get_B1_T2_U3_D5_data, ),
])
def test_rnnt_loss(self, data_func):
def get_data(data_func, device):
data = data_func()
if type(data) == tuple:
data = data[0]
return data
data = get_data(data_func, self.device)
inputs = (
data["logits"].to(torch.float32),
data["targets"],
data["logit_lengths"],
data["target_lengths"],
)
loss = T.RNNTLoss(blank=data["blank"])
self.assert_grad(loss, inputs)
|
import warnings
import torch
import torchaudio.transforms as T
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
skipIfNoSox,
skipIfNoExec,
TempDirMixin,
TorchaudioTestCase,
get_asset_path,
sox_utils,
load_wav,
save_wav,
get_whitenoise,
)
@skipIfNoSox
@skipIfNoExec('sox')
class TestFunctionalFiltering(TempDirMixin, TorchaudioTestCase):
def run_sox_effect(self, input_file, effect):
output_file = self.get_temp_path('expected.wav')
sox_utils.run_sox_effect(input_file, output_file, [str(e) for e in effect])
return load_wav(output_file)
def assert_sox_effect(self, result, input_path, effects, atol=1e-04, rtol=1e-5):
expected, _ = self.run_sox_effect(input_path, effects)
self.assertEqual(result, expected, atol=atol, rtol=rtol)
def get_whitenoise(self, sample_rate=8000):
noise = get_whitenoise(
sample_rate=sample_rate, duration=3, scale_factor=0.9,
)
path = self.get_temp_path("whitenoise.wav")
save_wav(path, noise, sample_rate)
return noise, path
@parameterized.expand([
('q', 'quarter_sine'),
('h', 'half_sine'),
('t', 'linear'),
])
def test_fade(self, fade_shape_sox, fade_shape):
fade_in_len, fade_out_len = 44100, 44100
data, path = self.get_whitenoise(sample_rate=44100)
result = T.Fade(fade_in_len, fade_out_len, fade_shape)(data)
self.assert_sox_effect(result, path, ['fade', fade_shape_sox, '1', '0', '1'])
@parameterized.expand([
('amplitude', 1.1),
('db', 2),
('power', 2),
])
def test_vol(self, gain_type, gain):
data, path = self.get_whitenoise()
result = T.Vol(gain, gain_type)(data)
self.assert_sox_effect(result, path, ['vol', f'{gain}', gain_type])
@parameterized.expand(['vad-go-stereo-44100.wav', 'vad-go-mono-32000.wav'])
def test_vad(self, filename):
path = get_asset_path(filename)
data, sample_rate = load_wav(path)
result = T.Vad(sample_rate)(data)
self.assert_sox_effect(result, path, ['vad'])
def test_vad_warning(self):
"""vad should throw a warning if input dimension is greater than 2"""
sample_rate = 41100
data = torch.rand(5, 5, sample_rate)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
T.Vad(sample_rate)(data)
assert len(w) == 1
data = torch.rand(5, sample_rate)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
T.Vad(sample_rate)(data)
assert len(w) == 0
data = torch.rand(sample_rate)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
T.Vad(sample_rate)(data)
assert len(w) == 0
|
import math
import torch
import torchaudio
import torchaudio.transforms as transforms
import torchaudio.functional as F
from torchaudio_unittest import common_utils
class Tester(common_utils.TorchaudioTestCase):
backend = 'default'
# create a sinewave signal for testing
sample_rate = 16000
freq = 440
volume = .3
waveform = (torch.cos(2 * math.pi * torch.arange(0, 4 * sample_rate).float() * freq / sample_rate))
waveform.unsqueeze_(0) # (1, 64000)
waveform = (waveform * volume * 2**31).long()
def scale(self, waveform, factor=2.0**31):
# scales a waveform by a factor
if not waveform.is_floating_point():
waveform = waveform.to(torch.get_default_dtype())
return waveform / factor
def test_mu_law_companding(self):
quantization_channels = 256
waveform = self.waveform.clone()
if not waveform.is_floating_point():
waveform = waveform.to(torch.get_default_dtype())
waveform /= torch.abs(waveform).max()
self.assertTrue(waveform.min() >= -1. and waveform.max() <= 1.)
waveform_mu = transforms.MuLawEncoding(quantization_channels)(waveform)
self.assertTrue(waveform_mu.min() >= 0. and waveform_mu.max() <= quantization_channels)
waveform_exp = transforms.MuLawDecoding(quantization_channels)(waveform_mu)
self.assertTrue(waveform_exp.min() >= -1. and waveform_exp.max() <= 1.)
def test_AmplitudeToDB(self):
filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
waveform = common_utils.load_wav(filepath)[0]
mag_to_db_transform = transforms.AmplitudeToDB('magnitude', 80.)
power_to_db_transform = transforms.AmplitudeToDB('power', 80.)
mag_to_db_torch = mag_to_db_transform(torch.abs(waveform))
power_to_db_torch = power_to_db_transform(torch.pow(waveform, 2))
self.assertEqual(mag_to_db_torch, power_to_db_torch)
def test_melscale_load_save(self):
specgram = torch.ones(1, 201, 100)
melscale_transform = transforms.MelScale()
melscale_transform(specgram)
melscale_transform_copy = transforms.MelScale()
melscale_transform_copy.load_state_dict(melscale_transform.state_dict())
fb = melscale_transform.fb
fb_copy = melscale_transform_copy.fb
self.assertEqual(fb_copy.size(), (201, 128))
self.assertEqual(fb, fb_copy)
def test_melspectrogram_load_save(self):
waveform = self.waveform.float()
mel_spectrogram_transform = transforms.MelSpectrogram()
mel_spectrogram_transform(waveform)
mel_spectrogram_transform_copy = transforms.MelSpectrogram()
mel_spectrogram_transform_copy.load_state_dict(mel_spectrogram_transform.state_dict())
window = mel_spectrogram_transform.spectrogram.window
window_copy = mel_spectrogram_transform_copy.spectrogram.window
fb = mel_spectrogram_transform.mel_scale.fb
fb_copy = mel_spectrogram_transform_copy.mel_scale.fb
self.assertEqual(window, window_copy)
# the default for n_fft = 400 and n_mels = 128
self.assertEqual(fb_copy.size(), (201, 128))
self.assertEqual(fb, fb_copy)
def test_mel2(self):
top_db = 80.
s2db = transforms.AmplitudeToDB('power', top_db)
waveform = self.waveform.clone() # (1, 16000)
waveform_scaled = self.scale(waveform) # (1, 16000)
mel_transform = transforms.MelSpectrogram()
# check defaults
spectrogram_torch = s2db(mel_transform(waveform_scaled)) # (1, 128, 321)
self.assertTrue(spectrogram_torch.dim() == 3)
self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
self.assertEqual(spectrogram_torch.size(1), mel_transform.n_mels)
# check correctness of filterbank conversion matrix
self.assertTrue(mel_transform.mel_scale.fb.sum(1).le(1.).all())
self.assertTrue(mel_transform.mel_scale.fb.sum(1).ge(0.).all())
# check options
kwargs = {'window_fn': torch.hamming_window, 'pad': 10, 'win_length': 500,
'hop_length': 125, 'n_fft': 800, 'n_mels': 50}
mel_transform2 = transforms.MelSpectrogram(**kwargs)
spectrogram2_torch = s2db(mel_transform2(waveform_scaled)) # (1, 50, 513)
self.assertTrue(spectrogram2_torch.dim() == 3)
self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
self.assertEqual(spectrogram2_torch.size(1), mel_transform2.n_mels)
self.assertTrue(mel_transform2.mel_scale.fb.sum(1).le(1.).all())
self.assertTrue(mel_transform2.mel_scale.fb.sum(1).ge(0.).all())
# check on multi-channel audio
filepath = common_utils.get_asset_path('steam-train-whistle-daniel_simon.wav')
x_stereo = common_utils.load_wav(filepath)[0] # (2, 278756), 44100
spectrogram_stereo = s2db(mel_transform(x_stereo)) # (2, 128, 1394)
self.assertTrue(spectrogram_stereo.dim() == 3)
self.assertTrue(spectrogram_stereo.size(0) == 2)
self.assertTrue(spectrogram_torch.ge(spectrogram_torch.max() - top_db).all())
self.assertEqual(spectrogram_stereo.size(1), mel_transform.n_mels)
# check filterbank matrix creation
fb_matrix_transform = transforms.MelScale(
n_mels=100, sample_rate=16000, f_min=0., f_max=None, n_stft=400)
self.assertTrue(fb_matrix_transform.fb.sum(1).le(1.).all())
self.assertTrue(fb_matrix_transform.fb.sum(1).ge(0.).all())
self.assertEqual(fb_matrix_transform.fb.size(), (400, 100))
def test_mfcc_defaults(self):
"""Check the default configuration of the MFCC transform.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_mfcc = 40
mfcc_transform = torchaudio.transforms.MFCC(sample_rate=sample_rate,
n_mfcc=n_mfcc,
norm='ortho')
torch_mfcc = mfcc_transform(audio) # (1, 40, 81)
self.assertEqual(torch_mfcc.dim(), 3)
self.assertEqual(torch_mfcc.shape[1], n_mfcc)
self.assertEqual(torch_mfcc.shape[2], 81)
def test_mfcc_kwargs_passthrough(self):
"""Check kwargs get correctly passed to the MelSpectrogram transform.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_mfcc = 40
melkwargs = {'win_length': 200}
mfcc_transform = torchaudio.transforms.MFCC(sample_rate=sample_rate,
n_mfcc=n_mfcc,
norm='ortho',
melkwargs=melkwargs)
torch_mfcc = mfcc_transform(audio) # (1, 40, 161)
self.assertEqual(torch_mfcc.shape[2], 161)
def test_mfcc_norms(self):
"""Check if MFCC-DCT norms work correctly.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_mfcc = 40
n_mels = 128
mfcc_transform = torchaudio.transforms.MFCC(sample_rate=sample_rate,
n_mfcc=n_mfcc,
norm='ortho')
# check norms work correctly
mfcc_transform_norm_none = torchaudio.transforms.MFCC(sample_rate=sample_rate,
n_mfcc=n_mfcc,
norm=None)
torch_mfcc_norm_none = mfcc_transform_norm_none(audio) # (1, 40, 81)
norm_check = mfcc_transform(audio)
norm_check[:, 0, :] *= math.sqrt(n_mels) * 2
norm_check[:, 1:, :] *= math.sqrt(n_mels / 2) * 2
self.assertEqual(torch_mfcc_norm_none, norm_check)
def test_lfcc_defaults(self):
"""Check default settings for LFCC transform.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_lfcc = 40
n_filter = 128
lfcc_transform = torchaudio.transforms.LFCC(sample_rate=sample_rate,
n_filter=n_filter,
n_lfcc=n_lfcc,
norm='ortho')
torch_lfcc = lfcc_transform(audio) # (1, 40, 81)
self.assertEqual(torch_lfcc.dim(), 3)
self.assertEqual(torch_lfcc.shape[1], n_lfcc)
self.assertEqual(torch_lfcc.shape[2], 81)
def test_lfcc_arg_passthrough(self):
"""Check if kwargs get correctly passed to the underlying Spectrogram transform.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_lfcc = 40
n_filter = 128
speckwargs = {'win_length': 200}
lfcc_transform = torchaudio.transforms.LFCC(sample_rate=sample_rate,
n_filter=n_filter,
n_lfcc=n_lfcc,
norm='ortho',
speckwargs=speckwargs)
torch_lfcc = lfcc_transform(audio) # (1, 40, 161)
self.assertEqual(torch_lfcc.shape[2], 161)
def test_lfcc_norms(self):
"""Check if LFCC-DCT norm works correctly.
"""
sample_rate = 16000
audio = common_utils.get_whitenoise(sample_rate=sample_rate)
n_lfcc = 40
n_filter = 128
lfcc_transform = torchaudio.transforms.LFCC(sample_rate=sample_rate,
n_filter=n_filter,
n_lfcc=n_lfcc,
norm='ortho')
lfcc_transform_norm_none = torchaudio.transforms.LFCC(sample_rate=sample_rate,
n_filter=n_filter,
n_lfcc=n_lfcc,
norm=None)
torch_lfcc_norm_none = lfcc_transform_norm_none(audio) # (1, 40, 161)
norm_check = lfcc_transform(audio) # (1, 40, 161)
norm_check[:, 0, :] *= math.sqrt(n_filter) * 2
norm_check[:, 1:, :] *= math.sqrt(n_filter / 2) * 2
self.assertEqual(torch_lfcc_norm_none, norm_check)
def test_resample_size(self):
input_path = common_utils.get_asset_path('sinewave.wav')
waveform, sample_rate = common_utils.load_wav(input_path)
upsample_rate = sample_rate * 2
downsample_rate = sample_rate // 2
invalid_resampling_method = 'foo'
with self.assertRaises(ValueError):
torchaudio.transforms.Resample(sample_rate, upsample_rate,
resampling_method=invalid_resampling_method)
upsample_resample = torchaudio.transforms.Resample(
sample_rate, upsample_rate, resampling_method='sinc_interpolation')
up_sampled = upsample_resample(waveform)
# we expect the upsampled signal to have twice as many samples
self.assertTrue(up_sampled.size(-1) == waveform.size(-1) * 2)
downsample_resample = torchaudio.transforms.Resample(
sample_rate, downsample_rate, resampling_method='sinc_interpolation')
down_sampled = downsample_resample(waveform)
# we expect the downsampled signal to have half as many samples
self.assertTrue(down_sampled.size(-1) == waveform.size(-1) // 2)
def test_compute_deltas(self):
channel = 13
n_mfcc = channel * 3
time = 1021
win_length = 2 * 7 + 1
specgram = torch.randn(channel, n_mfcc, time)
transform = transforms.ComputeDeltas(win_length=win_length)
computed = transform(specgram)
self.assertTrue(computed.shape == specgram.shape, (computed.shape, specgram.shape))
def test_compute_deltas_transform_same_as_functional(self, atol=1e-6, rtol=1e-8):
channel = 13
n_mfcc = channel * 3
time = 1021
win_length = 2 * 7 + 1
specgram = torch.randn(channel, n_mfcc, time)
transform = transforms.ComputeDeltas(win_length=win_length)
computed_transform = transform(specgram)
computed_functional = F.compute_deltas(specgram, win_length=win_length)
self.assertEqual(computed_functional, computed_transform, atol=atol, rtol=rtol)
def test_compute_deltas_twochannel(self):
specgram = torch.tensor([1., 2., 3., 4.]).repeat(1, 2, 1)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5],
[0.5, 1.0, 1.0, 0.5]]])
transform = transforms.ComputeDeltas(win_length=3)
computed = transform(specgram)
assert computed.shape == expected.shape, (computed.shape, expected.shape)
self.assertEqual(computed, expected, atol=1e-6, rtol=1e-8)
class SmokeTest(common_utils.TorchaudioTestCase):
def test_spectrogram(self):
specgram = transforms.Spectrogram(center=False, pad_mode="reflect", onesided=False)
self.assertEqual(specgram.center, False)
self.assertEqual(specgram.pad_mode, "reflect")
self.assertEqual(specgram.onesided, False)
def test_melspectrogram(self):
melspecgram = transforms.MelSpectrogram(center=True, pad_mode="reflect", onesided=False)
specgram = melspecgram.spectrogram
self.assertEqual(specgram.center, True)
self.assertEqual(specgram.pad_mode, "reflect")
self.assertEqual(specgram.onesided, False)
|
import torch
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoCuda,
)
from . transforms_test_impl import TransformsTestBase
@skipIfNoCuda
class TransformsCUDAFloat32Test(TransformsTestBase, PytorchTestCase):
device = 'cuda'
dtype = torch.float32
@skipIfNoCuda
class TransformsCUDAFloat64Test(TransformsTestBase, PytorchTestCase):
device = 'cuda'
dtype = torch.float64
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from . transforms_test_impl import TransformsTestBase
class TransformsCPUFloat32Test(TransformsTestBase, PytorchTestCase):
device = 'cpu'
dtype = torch.float32
class TransformsCPUFloat64Test(TransformsTestBase, PytorchTestCase):
device = 'cpu'
dtype = torch.float64
|
from torchaudio_unittest.common_utils import (
PytorchTestCase,
skipIfNoCuda,
)
from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32
@skipIfNoCuda
class AutogradCUDATest(AutogradTestMixin, PytorchTestCase):
device = 'cuda'
@skipIfNoCuda
class AutogradRNNTCUDATest(AutogradTestFloat32, PytorchTestCase):
device = 'cuda'
|
"""Test suites for jit-ability and its numerical compatibility"""
import torch
import torchaudio.transforms as T
from parameterized import parameterized
from torchaudio_unittest import common_utils
from torchaudio_unittest.common_utils import (
skipIfRocm,
TestBaseMixin,
torch_script,
)
class Transforms(TestBaseMixin):
"""Implements test for Transforms that are performed for different devices"""
def _assert_consistency(self, transform, tensor, *args):
tensor = tensor.to(device=self.device, dtype=self.dtype)
transform = transform.to(device=self.device, dtype=self.dtype)
ts_transform = torch_script(transform)
output = transform(tensor, *args)
ts_output = ts_transform(tensor, *args)
self.assertEqual(ts_output, output)
def _assert_consistency_complex(self, transform, tensor, *args):
assert tensor.is_complex()
tensor = tensor.to(device=self.device, dtype=self.complex_dtype)
transform = transform.to(device=self.device, dtype=self.dtype)
ts_transform = torch_script(transform)
output = transform(tensor, *args)
ts_output = ts_transform(tensor, *args)
self.assertEqual(ts_output, output)
def test_Spectrogram(self):
tensor = torch.rand((1, 1000))
self._assert_consistency(T.Spectrogram(), tensor)
def test_Spectrogram_return_complex(self):
tensor = torch.rand((1, 1000))
self._assert_consistency(T.Spectrogram(power=None, return_complex=True), tensor)
def test_InverseSpectrogram(self):
tensor = common_utils.get_whitenoise(sample_rate=8000)
spectrogram = common_utils.get_spectrogram(tensor, n_fft=400, hop_length=100)
self._assert_consistency_complex(T.InverseSpectrogram(n_fft=400, hop_length=100), spectrogram)
@skipIfRocm
def test_GriffinLim(self):
tensor = torch.rand((1, 201, 6))
self._assert_consistency(T.GriffinLim(length=1000, rand_init=False), tensor)
def test_AmplitudeToDB(self):
spec = torch.rand((6, 201))
self._assert_consistency(T.AmplitudeToDB(), spec)
def test_MelScale(self):
spec_f = torch.rand((1, 201, 6))
self._assert_consistency(T.MelScale(n_stft=201), spec_f)
def test_MelSpectrogram(self):
tensor = torch.rand((1, 1000))
self._assert_consistency(T.MelSpectrogram(), tensor)
def test_MFCC(self):
tensor = torch.rand((1, 1000))
self._assert_consistency(T.MFCC(), tensor)
def test_LFCC(self):
tensor = torch.rand((1, 1000))
self._assert_consistency(T.LFCC(), tensor)
def test_Resample(self):
sr1, sr2 = 16000, 8000
tensor = common_utils.get_whitenoise(sample_rate=sr1)
self._assert_consistency(T.Resample(sr1, sr2), tensor)
def test_MuLawEncoding(self):
tensor = common_utils.get_whitenoise()
self._assert_consistency(T.MuLawEncoding(), tensor)
def test_MuLawDecoding(self):
tensor = torch.rand((1, 10))
self._assert_consistency(T.MuLawDecoding(), tensor)
def test_Fade(self):
waveform = common_utils.get_whitenoise()
fade_in_len = 3000
fade_out_len = 3000
self._assert_consistency(T.Fade(fade_in_len, fade_out_len), waveform)
def test_FrequencyMasking(self):
tensor = torch.rand((10, 2, 50, 10, 2))
self._assert_consistency(T.FrequencyMasking(freq_mask_param=60, iid_masks=False), tensor)
def test_TimeMasking(self):
tensor = torch.rand((10, 2, 50, 10, 2))
self._assert_consistency(T.TimeMasking(time_mask_param=30, iid_masks=False), tensor)
def test_Vol(self):
waveform = common_utils.get_whitenoise()
self._assert_consistency(T.Vol(1.1), waveform)
def test_SlidingWindowCmn(self):
tensor = torch.rand((1000, 10))
self._assert_consistency(T.SlidingWindowCmn(), tensor)
def test_Vad(self):
filepath = common_utils.get_asset_path("vad-go-mono-32000.wav")
waveform, sample_rate = common_utils.load_wav(filepath)
self._assert_consistency(T.Vad(sample_rate=sample_rate), waveform)
def test_SpectralCentroid(self):
sample_rate = 44100
waveform = common_utils.get_whitenoise(sample_rate=sample_rate)
self._assert_consistency(T.SpectralCentroid(sample_rate=sample_rate), waveform)
def test_TimeStretch(self):
n_fft = 1025
n_freq = n_fft // 2 + 1
hop_length = 512
fixed_rate = 1.3
tensor = torch.rand((10, 2, n_freq, 10), dtype=torch.cfloat)
batch = 10
num_channels = 2
waveform = common_utils.get_whitenoise(sample_rate=8000, n_channels=batch * num_channels)
tensor = common_utils.get_spectrogram(waveform, n_fft=n_fft)
tensor = tensor.reshape(batch, num_channels, n_freq, -1)
self._assert_consistency_complex(
T.TimeStretch(n_freq=n_freq, hop_length=hop_length, fixed_rate=fixed_rate),
tensor,
)
def test_PitchShift(self):
sample_rate = 8000
n_steps = 4
waveform = common_utils.get_whitenoise(sample_rate=sample_rate)
self._assert_consistency(
T.PitchShift(sample_rate=sample_rate, n_steps=n_steps),
waveform
)
def test_PSD(self):
tensor = common_utils.get_whitenoise(sample_rate=8000, n_channels=4)
spectrogram = common_utils.get_spectrogram(tensor, n_fft=400, hop_length=100)
spectrogram = spectrogram.to(self.device)
self._assert_consistency_complex(T.PSD(), spectrogram)
def test_PSD_with_mask(self):
tensor = common_utils.get_whitenoise(sample_rate=8000, n_channels=4)
spectrogram = common_utils.get_spectrogram(tensor, n_fft=400, hop_length=100)
spectrogram = spectrogram.to(self.device)
mask = torch.rand(spectrogram.shape[-2:], device=self.device)
self._assert_consistency_complex(T.PSD(), spectrogram, mask)
class TransformsFloat32Only(TestBaseMixin):
def test_rnnt_loss(self):
logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1]]]])
tensor = logits.to(device=self.device, dtype=torch.float32)
targets = torch.tensor([[1, 2]], device=tensor.device, dtype=torch.int32)
logit_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
target_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
self._assert_consistency(T.RNNTLoss(), logits, targets, logit_lengths, target_lengths)
class TransformsFloat64Only(TestBaseMixin):
@parameterized.expand([
["ref_channel", True],
["stv_evd", True],
["stv_power", True],
["ref_channel", False],
["stv_evd", False],
["stv_power", False],
])
def test_MVDR(self, solution, online):
tensor = common_utils.get_whitenoise(sample_rate=8000, n_channels=4)
spectrogram = common_utils.get_spectrogram(tensor, n_fft=400, hop_length=100)
spectrogram = spectrogram.to(device=self.device, dtype=torch.cdouble)
mask_s = torch.rand(spectrogram.shape[-2:], device=self.device)
mask_n = torch.rand(spectrogram.shape[-2:], device=self.device)
self._assert_consistency_complex(
T.MVDR(solution=solution, online=online),
spectrogram, mask_s, mask_n
)
|
from torchaudio_unittest.common_utils import PytorchTestCase
from .autograd_test_impl import AutogradTestMixin, AutogradTestFloat32
class AutogradCPUTest(AutogradTestMixin, PytorchTestCase):
device = 'cpu'
class AutogradRNNTCPUTest(AutogradTestFloat32, PytorchTestCase):
device = 'cpu'
|
"""Test numerical consistency among single input and batched input."""
import torch
from parameterized import parameterized
from torchaudio import transforms as T
from torchaudio_unittest import common_utils
class TestTransforms(common_utils.TorchaudioTestCase):
"""Test suite for classes defined in `transforms` module"""
backend = 'default'
def assert_batch_consistency(
self, transform, batch, *args, atol=1e-8, rtol=1e-5, seed=42,
**kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([
transform(items_input[i], *args, **kwargs) for i in range(n)
])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = transform(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
def test_batch_AmplitudeToDB(self):
spec = torch.rand((3, 2, 6, 201))
transform = T.AmplitudeToDB()
self.assert_batch_consistency(transform, spec)
def test_batch_Resample(self):
waveform = torch.randn(3, 2, 2786)
transform = T.Resample()
self.assert_batch_consistency(transform, waveform)
def test_batch_MelScale(self):
specgram = torch.randn(3, 2, 201, 256)
transform = T.MelScale()
self.assert_batch_consistency(transform, specgram)
def test_batch_InverseMelScale(self):
n_mels = 32
n_stft = 5
mel_spec = torch.randn(3, 2, n_mels, 32) ** 2
transform = T.InverseMelScale(n_stft, n_mels)
# Because InverseMelScale runs SGD on randomly initialized values so they do not yield
# exactly same result. For this reason, tolerance is very relaxed here.
self.assert_batch_consistency(transform, mel_spec, atol=1.0, rtol=1e-5)
def test_batch_compute_deltas(self):
specgram = torch.randn(3, 2, 31, 2786)
transform = T.ComputeDeltas()
self.assert_batch_consistency(transform, specgram)
def test_batch_mulaw(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
# Single then transform then batch
expected = [T.MuLawEncoding()(waveform[i]) for i in range(3)]
expected = torch.stack(expected)
# Batch then transform
computed = T.MuLawEncoding()(waveform)
# shape = (3, 2, 201, 1394)
self.assertEqual(computed, expected)
# Single then transform then batch
expected_decoded = [T.MuLawDecoding()(expected[i]) for i in range(3)]
expected_decoded = torch.stack(expected_decoded)
# Batch then transform
computed_decoded = T.MuLawDecoding()(computed)
# shape = (3, 2, 201, 1394)
self.assertEqual(computed_decoded, expected_decoded)
def test_batch_spectrogram(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.Spectrogram()
self.assert_batch_consistency(transform, waveform)
def test_batch_inverse_spectrogram(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
specgram = common_utils.get_spectrogram(waveform, n_fft=400)
specgram = specgram.reshape(3, 2, specgram.shape[-2], specgram.shape[-1])
transform = T.InverseSpectrogram(n_fft=400)
self.assert_batch_consistency(transform, specgram)
def test_batch_melspectrogram(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.MelSpectrogram()
self.assert_batch_consistency(transform, waveform)
def test_batch_mfcc(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.MFCC()
self.assert_batch_consistency(transform, waveform, atol=1e-4, rtol=1e-5)
def test_batch_lfcc(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.LFCC()
self.assert_batch_consistency(transform, waveform, atol=1e-4, rtol=1e-5)
def test_batch_TimeStretch(self):
rate = 2
num_freq = 1025
batch = 3
tensor = common_utils.get_whitenoise(sample_rate=8000, n_channels=batch)
spec = common_utils.get_spectrogram(tensor, n_fft=num_freq)
transform = T.TimeStretch(
fixed_rate=rate,
n_freq=num_freq // 2 + 1,
hop_length=512
)
self.assert_batch_consistency(transform, spec, atol=1e-5, rtol=1e-5)
def test_batch_Fade(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
fade_in_len = 3000
fade_out_len = 3000
transform = T.Fade(fade_in_len, fade_out_len)
self.assert_batch_consistency(transform, waveform)
def test_batch_Vol(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.Vol(gain=1.1)
self.assert_batch_consistency(transform, waveform)
def test_batch_spectral_centroid(self):
sample_rate = 44100
waveform = common_utils.get_whitenoise(sample_rate=sample_rate, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.SpectralCentroid(sample_rate)
self.assert_batch_consistency(transform, waveform)
def test_batch_pitch_shift(self):
sample_rate = 8000
n_steps = -2
waveform = common_utils.get_whitenoise(sample_rate=sample_rate, duration=0.05, n_channels=6)
waveform = waveform.reshape(3, 2, -1)
transform = T.PitchShift(sample_rate, n_steps, n_fft=400)
self.assert_batch_consistency(transform, waveform)
def test_batch_PSD(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
specgram = common_utils.get_spectrogram(waveform, n_fft=400)
specgram = specgram.reshape(3, 2, specgram.shape[-2], specgram.shape[-1])
transform = T.PSD()
self.assert_batch_consistency(transform, specgram)
def test_batch_PSD_with_mask(self):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.to(torch.double)
specgram = common_utils.get_spectrogram(waveform, n_fft=400)
specgram = specgram.reshape(3, 2, specgram.shape[-2], specgram.shape[-1])
mask = torch.rand((3, specgram.shape[-2], specgram.shape[-1]))
transform = T.PSD()
# Single then transform then batch
expected = [transform(specgram[i], mask[i]) for i in range(3)]
expected = torch.stack(expected)
# Batch then transform
computed = transform(specgram, mask)
self.assertEqual(computed, expected)
@parameterized.expand([
[True],
[False],
])
def test_MVDR(self, multi_mask):
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=1, n_channels=6)
waveform = waveform.to(torch.double)
specgram = common_utils.get_spectrogram(waveform, n_fft=400)
specgram = specgram.reshape(3, 2, specgram.shape[-2], specgram.shape[-1])
if multi_mask:
mask_s = torch.rand((3, 2, specgram.shape[-2], specgram.shape[-1]))
mask_n = torch.rand((3, 2, specgram.shape[-2], specgram.shape[-1]))
else:
mask_s = torch.rand((3, specgram.shape[-2], specgram.shape[-1]))
mask_n = torch.rand((3, specgram.shape[-2], specgram.shape[-1]))
transform = T.MVDR(multi_mask=multi_mask)
# Single then transform then batch
expected = [transform(specgram[i], mask_s[i], mask_n[i]) for i in range(3)]
expected = torch.stack(expected)
# Batch then transform
computed = transform(specgram, mask_s, mask_n)
self.assertEqual(computed, expected)
|
import unittest
import torch
import torchaudio.transforms as T
from torchaudio._internal.module_utils import is_module_available
from parameterized import param, parameterized
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
get_sinusoid,
get_spectrogram,
nested_params,
)
LIBROSA_AVAILABLE = is_module_available('librosa')
if LIBROSA_AVAILABLE:
import librosa
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class TransformsTestBase(TestBaseMixin):
@parameterized.expand([
param(n_fft=400, hop_length=200, power=2.0),
param(n_fft=600, hop_length=100, power=2.0),
param(n_fft=400, hop_length=200, power=3.0),
param(n_fft=200, hop_length=50, power=2.0),
])
def test_Spectrogram(self, n_fft, hop_length, power):
sample_rate = 16000
waveform = get_whitenoise(
sample_rate=sample_rate, n_channels=1,
).to(self.device, self.dtype)
expected = librosa.core.spectrum._spectrogram(
y=waveform[0].cpu().numpy(),
n_fft=n_fft, hop_length=hop_length, power=power)[0]
result = T.Spectrogram(
n_fft=n_fft, hop_length=hop_length, power=power,
).to(self.device, self.dtype)(waveform)[0]
self.assertEqual(result, torch.from_numpy(expected), atol=1e-5, rtol=1e-5)
def test_Spectrogram_complex(self):
n_fft = 400
hop_length = 200
sample_rate = 16000
waveform = get_whitenoise(
sample_rate=sample_rate, n_channels=1,
).to(self.device, self.dtype)
expected = librosa.core.spectrum._spectrogram(
y=waveform[0].cpu().numpy(),
n_fft=n_fft, hop_length=hop_length, power=1)[0]
result = T.Spectrogram(
n_fft=n_fft, hop_length=hop_length, power=None, return_complex=True,
).to(self.device, self.dtype)(waveform)[0]
self.assertEqual(result.abs(), torch.from_numpy(expected), atol=1e-5, rtol=1e-5)
@nested_params(
[
param(n_fft=400, hop_length=200, n_mels=64),
param(n_fft=600, hop_length=100, n_mels=128),
param(n_fft=200, hop_length=50, n_mels=32),
],
[param(norm=norm) for norm in [None, 'slaney']],
[param(mel_scale=mel_scale) for mel_scale in ['htk', 'slaney']],
)
def test_MelSpectrogram(self, n_fft, hop_length, n_mels, norm, mel_scale):
sample_rate = 16000
waveform = get_sinusoid(
sample_rate=sample_rate, n_channels=1,
).to(self.device, self.dtype)
expected = librosa.feature.melspectrogram(
y=waveform[0].cpu().numpy(),
sr=sample_rate, n_fft=n_fft,
hop_length=hop_length, n_mels=n_mels, norm=norm,
htk=mel_scale == "htk")
result = T.MelSpectrogram(
sample_rate=sample_rate, window_fn=torch.hann_window,
hop_length=hop_length, n_mels=n_mels,
n_fft=n_fft, norm=norm, mel_scale=mel_scale,
).to(self.device, self.dtype)(waveform)[0]
self.assertEqual(result, torch.from_numpy(expected), atol=5e-4, rtol=1e-5)
def test_magnitude_to_db(self):
spectrogram = get_spectrogram(
get_whitenoise(), n_fft=400, power=2).to(self.device, self.dtype)
result = T.AmplitudeToDB('magnitude', 80.).to(self.device, self.dtype)(spectrogram)[0]
expected = librosa.core.spectrum.amplitude_to_db(spectrogram[0].cpu().numpy())
self.assertEqual(result, torch.from_numpy(expected))
def test_power_to_db(self):
spectrogram = get_spectrogram(
get_whitenoise(), n_fft=400, power=2).to(self.device, self.dtype)
result = T.AmplitudeToDB('power', 80.).to(self.device, self.dtype)(spectrogram)[0]
expected = librosa.core.spectrum.power_to_db(spectrogram[0].cpu().numpy())
self.assertEqual(result, torch.from_numpy(expected))
@nested_params([
param(n_fft=400, hop_length=200, n_mels=64, n_mfcc=40),
param(n_fft=600, hop_length=100, n_mels=128, n_mfcc=20),
param(n_fft=200, hop_length=50, n_mels=32, n_mfcc=25),
])
def test_mfcc(self, n_fft, hop_length, n_mels, n_mfcc):
sample_rate = 16000
waveform = get_whitenoise(
sample_rate=sample_rate, n_channels=1).to(self.device, self.dtype)
result = T.MFCC(
sample_rate=sample_rate, n_mfcc=n_mfcc, norm='ortho',
melkwargs={'hop_length': hop_length, 'n_fft': n_fft, 'n_mels': n_mels},
).to(self.device, self.dtype)(waveform)[0]
melspec = librosa.feature.melspectrogram(
y=waveform[0].cpu().numpy(), sr=sample_rate, n_fft=n_fft,
win_length=n_fft, hop_length=hop_length,
n_mels=n_mels, htk=True, norm=None)
expected = librosa.feature.mfcc(
S=librosa.core.spectrum.power_to_db(melspec),
n_mfcc=n_mfcc, dct_type=2, norm='ortho')
self.assertEqual(result, torch.from_numpy(expected), atol=5e-4, rtol=1e-5)
@parameterized.expand([
param(n_fft=400, hop_length=200),
param(n_fft=600, hop_length=100),
param(n_fft=200, hop_length=50),
])
def test_spectral_centroid(self, n_fft, hop_length):
sample_rate = 16000
waveform = get_whitenoise(
sample_rate=sample_rate, n_channels=1).to(self.device, self.dtype)
result = T.SpectralCentroid(
sample_rate=sample_rate, n_fft=n_fft, hop_length=hop_length,
).to(self.device, self.dtype)(waveform)
expected = librosa.feature.spectral_centroid(
y=waveform[0].cpu().numpy(), sr=sample_rate, n_fft=n_fft, hop_length=hop_length)
self.assertEqual(result, torch.from_numpy(expected), atol=5e-4, rtol=1e-5)
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from .torchscript_consistency_impl import Transforms, TransformsFloat32Only, TransformsFloat64Only
@skipIfNoCuda
class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@skipIfNoCuda
class TestTransformsFloat64(Transforms, TransformsFloat64Only, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_impl import Transforms, TransformsFloat32Only, TransformsFloat64Only
class TestTransformsFloat32(Transforms, TransformsFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestTransformsFloat64(Transforms, TransformsFloat64Only, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
import torch
from torchaudio_unittest import common_utils
from .kaldi_compatibility_impl import Kaldi
@common_utils.skipIfNoCuda
class TestKaldiFloat32(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestKaldiFloat64(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
"""Test suites for checking numerical compatibility against Kaldi"""
import torchaudio.compliance.kaldi
from parameterized import parameterized
from torchaudio_unittest.common_utils import (
TestBaseMixin,
TempDirMixin,
load_params,
skipIfNoExec,
get_asset_path,
load_wav,
)
from torchaudio_unittest.common_utils.kaldi_utils import (
convert_args,
run_kaldi,
)
class Kaldi(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@parameterized.expand(load_params('kaldi_test_fbank_args.jsonl'))
@skipIfNoExec('compute-fbank-feats')
def test_fbank(self, kwargs):
"""fbank should be numerically compatible with compute-fbank-feats"""
wave_file = get_asset_path('kaldi_file.wav')
waveform = load_wav(wave_file, normalize=False)[0].to(dtype=self.dtype, device=self.device)
result = torchaudio.compliance.kaldi.fbank(waveform, **kwargs)
command = ['compute-fbank-feats'] + convert_args(**kwargs) + ['scp:-', 'ark:-']
kaldi_result = run_kaldi(command, 'scp', wave_file)
self.assert_equal(result, expected=kaldi_result, rtol=1e-4, atol=1e-8)
@parameterized.expand(load_params('kaldi_test_spectrogram_args.jsonl'))
@skipIfNoExec('compute-spectrogram-feats')
def test_spectrogram(self, kwargs):
"""spectrogram should be numerically compatible with compute-spectrogram-feats"""
wave_file = get_asset_path('kaldi_file.wav')
waveform = load_wav(wave_file, normalize=False)[0].to(dtype=self.dtype, device=self.device)
result = torchaudio.compliance.kaldi.spectrogram(waveform, **kwargs)
command = ['compute-spectrogram-feats'] + convert_args(**kwargs) + ['scp:-', 'ark:-']
kaldi_result = run_kaldi(command, 'scp', wave_file)
self.assert_equal(result, expected=kaldi_result, rtol=1e-4, atol=1e-8)
@parameterized.expand(load_params('kaldi_test_mfcc_args.jsonl'))
@skipIfNoExec('compute-mfcc-feats')
def test_mfcc(self, kwargs):
"""mfcc should be numerically compatible with compute-mfcc-feats"""
wave_file = get_asset_path('kaldi_file.wav')
waveform = load_wav(wave_file, normalize=False)[0].to(dtype=self.dtype, device=self.device)
result = torchaudio.compliance.kaldi.mfcc(waveform, **kwargs)
command = ['compute-mfcc-feats'] + convert_args(**kwargs) + ['scp:-', 'ark:-']
kaldi_result = run_kaldi(command, 'scp', wave_file)
self.assert_equal(result, expected=kaldi_result, rtol=1e-4, atol=1e-8)
|
import torch
from torchaudio_unittest import common_utils
from .kaldi_compatibility_impl import Kaldi
class TestKaldiFloat32(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat64(Kaldi, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .librosa_compatibility_test_impl import TransformsTestBase
@skipIfNoCuda
class TestTransforms(TransformsTestBase, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .librosa_compatibility_test_impl import TransformsTestBase
class TestTransforms(TransformsTestBase, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
"""Generate opus file for testing load functions"""
import argparse
import subprocess
import scipy.io.wavfile
import torch
def _parse_args():
parser = argparse.ArgumentParser(
description='Generate opus files for test'
)
parser.add_argument('--num-channels', required=True, type=int)
parser.add_argument('--compression-level', required=True, type=int, choices=list(range(11)))
parser.add_argument('--bitrate', default='96k')
return parser.parse_args()
def convert_to_opus(
src_path, dst_path,
*, bitrate, compression_level):
"""Convert audio file with `ffmpeg` command."""
command = ['ffmpeg', '-y', '-i', src_path, '-c:a', 'libopus', '-b:a', bitrate]
if compression_level is not None:
command += ['-compression_level', str(compression_level)]
command += [dst_path]
print(' '.join(command))
subprocess.run(command, check=True)
def _generate(num_channels, compression_level, bitrate):
org_path = 'original.wav'
ops_path = f'{bitrate}_{compression_level}_{num_channels}ch.opus'
# Note: ffmpeg forces sample rate 48k Hz for opus https://stackoverflow.com/a/39186779
# 1. generate original wav
data = torch.linspace(-32768, 32767, 32768, dtype=torch.int16).repeat([num_channels, 1]).t()
scipy.io.wavfile.write(org_path, 48000, data.numpy())
# 2. convert to opus
convert_to_opus(org_path, ops_path, bitrate=bitrate, compression_level=compression_level)
def _main():
args = _parse_args()
_generate(args.num_channels, args.compression_level, args.bitrate)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Generate the conf JSON from fairseq pretrained weight file, that is consumed by unit tests
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/wav2vec
2. Download the dict from https://dl.fbaipublicfiles.com/fairseq/wav2vec/dict.ltr.txt
and put it in the same directory as parameter files.
3. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
# Pretrained
python generate_wav2vec2_model_config.py \
--model-file wav2vec_small.pt \
> wav2vec_small.json
python generate_wav2vec2_model_config.py \
--model-file libri960_big.pt \
> libri960_big.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_vox_new.pt \
> wav2vec_vox_new.json
# Fine-tuned
python generate_wav2vec2_model_config.py \
--model-file wav2vec_small_960h.pt \
> wav2vec_small_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_big_960h.pt \
> wav2vec_large_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec2_vox_960h_new.pt \
> wav2vec_large_lv60_960h.json
python generate_wav2vec2_model_config.py \
--model-file wav2vec_vox_960h_pl.pt \
> wav2vec_large_lv60_self_960h.json
```
"""
import os
import json
import argparse
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--model-file',
required=True,
help=(
'A point file from '
'https://github.com/pytorch/fairseq/tree/main/examples/wav2vec'
)
)
parser.add_argument(
'--dict-dir',
help=(
'Directory where `dict.ltr.txt` file is found. '
'Default: the directory of the given model.'
)
)
args = parser.parse_args()
if args.dict_dir is None:
args.dict_dir = os.path.dirname(args.model_file)
return args
def _to_json(conf):
import yaml
from omegaconf import OmegaConf
return yaml.safe_load(OmegaConf.to_yaml(conf))
def _load(model_file, dict_dir):
import fairseq
overrides = {'data': dict_dir}
_, args, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[model_file], arg_overrides=overrides
)
return _to_json(args['model'])
def _main():
args = _parse_args()
conf = _load(args.model_file, args.dict_dir)
if conf['_name'] == 'wav2vec_ctc':
del conf['data']
del conf['w2v_args']['task']['data']
conf['w2v_args'] = {
key: conf['w2v_args'][key] for key in ['model', 'task']
}
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Generate the conf JSONs from fairseq pretrained weight file, consumed by unit tests
Note:
The current configuration files were generated on fairseq e47a4c84
Usage:
1. Download pretrained parameters from https://github.com/pytorch/fairseq/tree/main/examples/hubert
2. Run this script and save the resulting JSON configuration in assets directory.
Example:
```
python generate_hubert_model_config.py \
--model-file hubert_base_ls960.pt \
> hubert_base_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_large_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k.pt \
> hubert_large_ll60k.json
python generate_hubert_model_config.py \
--model-file hubert_xlarge_ll60k_finetune_ls960.pt \
> hubert_large_ll60k_finetune_ls960.json
```
"""
import json
import argparse
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'--model-file',
required=True,
help=(
'A pt file from '
'https://github.com/pytorch/fairseq/tree/main/examples/hubert'
)
)
return parser.parse_args()
def _load(model_file):
import fairseq
from omegaconf import OmegaConf
models, cfg, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task([model_file])
model = models[0]
cfg = OmegaConf.to_container(cfg)
return model, cfg
def _main():
args = _parse_args()
model, cfg = _load(args.model_file)
if model.__class__.__name__ == 'HubertModel':
cfg['task']['data'] = '/foo/bar'
cfg['task']['label_dir'] = None
conf = {
'_name': 'hubert',
'model': cfg['model'],
'task': cfg['task'],
'num_classes': model.num_classes,
}
elif model.__class__.__name__ == 'HubertCtc':
conf = cfg['model']
del conf['w2v_path']
keep = ['_name', 'task', 'model']
for key in list(k for k in conf['w2v_args'] if k not in keep):
del conf['w2v_args'][key]
conf['data'] = '/foo/bar/'
conf['w2v_args']['task']['data'] = '/foo/bar'
conf['w2v_args']['task']['labels'] = []
conf['w2v_args']['task']['label_dir'] = '/foo/bar'
print(json.dumps(conf, indent=4, sort_keys=True))
if __name__ == '__main__':
_main()
|
import os
import json
from transformers import Wav2Vec2Model
_THIS_DIR = os.path.dirname(os.path.abspath(__file__))
def _main():
keys = [
# pretrained
"facebook/wav2vec2-base",
"facebook/wav2vec2-large",
"facebook/wav2vec2-large-lv60",
"facebook/wav2vec2-base-10k-voxpopuli",
"facebook/wav2vec2-large-xlsr-53",
# finetuned
"facebook/wav2vec2-base-960h",
"facebook/wav2vec2-large-960h",
"facebook/wav2vec2-large-960h-lv60",
"facebook/wav2vec2-large-960h-lv60-self",
"facebook/wav2vec2-large-xlsr-53-german",
]
for key in keys:
path = os.path.join(_THIS_DIR, f'{key}.json')
print('Generating ', path)
cfg = Wav2Vec2Model.from_pretrained(key).config
cfg = json.loads(cfg.to_json_string())
del cfg['_name_or_path']
with open(path, 'w') as file_:
file_.write(json.dumps(cfg, indent=4, sort_keys=True))
file_.write('\n')
if __name__ == '__main__':
_main()
|
from typing import Callable, Tuple
from functools import partial
import torch
from parameterized import parameterized
from torch import Tensor
import torchaudio.functional as F
from torch.autograd import gradcheck, gradgradcheck
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_whitenoise,
rnnt_utils,
)
class Autograd(TestBaseMixin):
def assert_grad(
self,
transform: Callable[..., Tensor],
inputs: Tuple[torch.Tensor],
*,
enable_all_grad: bool = True,
):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=self.dtype, device=self.device)
if enable_all_grad:
i.requires_grad = True
inputs_.append(i)
assert gradcheck(transform, inputs_)
assert gradgradcheck(transform, inputs_)
def test_lfilter_x(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
x.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
a.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
b.requires_grad = True
self.assert_grad(F.lfilter, (x, a, b), enable_all_grad=False)
def test_lfilter_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.lfilter, (x, a, b))
def test_lfilter_filterbanks(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=3)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(partial(F.lfilter, batching=False), (x, a, b))
def test_lfilter_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(F.lfilter, (x, a, b))
def test_filtfilt_a(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
a.requires_grad = True
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_b(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
b.requires_grad = True
self.assert_grad(F.filtfilt, (x, a, b), enable_all_grad=False)
def test_filtfilt_all_inputs(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.filtfilt, (x, a, b))
def test_filtfilt_batching(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=2)
a = torch.tensor([[0.7, 0.2, 0.6],
[0.8, 0.2, 0.9]])
b = torch.tensor([[0.4, 0.2, 0.9],
[0.7, 0.2, 0.6]])
self.assert_grad(F.filtfilt, (x, a, b))
def test_biquad(self):
torch.random.manual_seed(2434)
x = get_whitenoise(sample_rate=22050, duration=0.01, n_channels=1)
a = torch.tensor([0.7, 0.2, 0.6])
b = torch.tensor([0.4, 0.2, 0.9])
self.assert_grad(F.biquad, (x, b[0], b[1], b[2], a[0], a[1], a[2]))
@parameterized.expand([
(800, 0.7, True),
(800, 0.7, False),
])
def test_band_biquad(self, central_freq, Q, noise):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.band_biquad, (x, sr, central_freq, Q, noise))
@parameterized.expand([
(800, 0.7, 10),
(800, 0.7, -10),
])
def test_bass_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.bass_biquad, (x, sr, gain, central_freq, Q))
@parameterized.expand([
(3000, 0.7, 10),
(3000, 0.7, -10),
])
def test_treble_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.treble_biquad, (x, sr, gain, central_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_allpass_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.allpass_biquad, (x, sr, central_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_lowpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
Q = torch.tensor(Q)
self.assert_grad(F.lowpass_biquad, (x, sr, cutoff_freq, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_highpass_biquad(self, cutoff_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
cutoff_freq = torch.tensor(cutoff_freq)
Q = torch.tensor(Q)
self.assert_grad(F.highpass_biquad, (x, sr, cutoff_freq, Q))
@parameterized.expand([
(800, 0.7, True),
(800, 0.7, False),
])
def test_bandpass_biquad(self, central_freq, Q, const_skirt_gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.bandpass_biquad, (x, sr, central_freq, Q, const_skirt_gain))
@parameterized.expand([
(800, 0.7, 10),
(800, 0.7, -10),
])
def test_equalizer_biquad(self, central_freq, Q, gain):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
gain = torch.tensor(gain)
self.assert_grad(F.equalizer_biquad, (x, sr, central_freq, gain, Q))
@parameterized.expand([
(800, 0.7, ),
])
def test_bandreject_biquad(self, central_freq, Q):
torch.random.manual_seed(2434)
sr = 22050
x = get_whitenoise(sample_rate=sr, duration=0.01, n_channels=1)
central_freq = torch.tensor(central_freq)
Q = torch.tensor(Q)
self.assert_grad(F.bandreject_biquad, (x, sr, central_freq, Q))
class AutogradFloat32(TestBaseMixin):
def assert_grad(
self,
transform: Callable[..., Tensor],
inputs: Tuple[torch.Tensor],
enable_all_grad: bool = True,
):
inputs_ = []
for i in inputs:
if torch.is_tensor(i):
i = i.to(dtype=self.dtype, device=self.device)
if enable_all_grad:
i.requires_grad = True
inputs_.append(i)
# gradcheck with float32 requires higher atol and epsilon
assert gradcheck(transform, inputs, eps=1e-3, atol=1e-3, nondet_tol=0.)
@parameterized.expand([
(rnnt_utils.get_B1_T10_U3_D4_data, ),
(rnnt_utils.get_B2_T4_U3_D3_data, ),
(rnnt_utils.get_B1_T2_U3_D5_data, ),
])
def test_rnnt_loss(self, data_func):
def get_data(data_func, device):
data = data_func()
if type(data) == tuple:
data = data[0]
return data
data = get_data(data_func, self.device)
inputs = (
data["logits"].to(torch.float32), # logits
data["targets"], # targets
data["logit_lengths"], # logit_lengths
data["target_lengths"], # target_lengths
data["blank"], # blank
-1, # clamp
)
self.assert_grad(F.rnnt_loss, inputs, enable_all_grad=False)
|
import torch
import unittest
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .functional_impl import Functional
@skipIfNoCuda
class TestFunctionalFloat32(Functional, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
@skipIfNoCuda
class TestLFilterFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
import torchaudio.functional as F
from torchaudio_unittest.common_utils import (
skipIfNoSox,
skipIfNoExec,
TempDirMixin,
TorchaudioTestCase,
get_asset_path,
sox_utils,
load_wav,
save_wav,
get_whitenoise,
)
@skipIfNoSox
@skipIfNoExec('sox')
class TestFunctionalFiltering(TempDirMixin, TorchaudioTestCase):
def run_sox_effect(self, input_file, effect):
output_file = self.get_temp_path('expected.wav')
sox_utils.run_sox_effect(input_file, output_file, [str(e) for e in effect])
return load_wav(output_file)
def assert_sox_effect(self, result, input_path, effects, atol=1e-04, rtol=1e-5):
expected, _ = self.run_sox_effect(input_path, effects)
self.assertEqual(result, expected, atol=atol, rtol=rtol)
def get_whitenoise(self, sample_rate=8000):
noise = get_whitenoise(
sample_rate=sample_rate, duration=3, scale_factor=0.9,
)
path = self.get_temp_path("whitenoise.wav")
save_wav(path, noise, sample_rate)
return noise, path
def test_gain(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.gain(data, 3)
self.assert_sox_effect(result, path, ['gain', 3])
def test_dither(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.dither(data)
self.assert_sox_effect(result, path, ['dither'])
def test_dither_noise(self):
path = get_asset_path('steam-train-whistle-daniel_simon.wav')
data, _ = load_wav(path)
result = F.dither(data, noise_shaping=True)
self.assert_sox_effect(result, path, ['dither', '-s'], atol=1.5e-4)
def test_lowpass(self):
cutoff_freq = 3000
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.lowpass_biquad(data, sample_rate, cutoff_freq)
self.assert_sox_effect(result, path, ['lowpass', cutoff_freq], atol=1.5e-4)
def test_highpass(self):
cutoff_freq = 2000
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.highpass_biquad(data, sample_rate, cutoff_freq)
self.assert_sox_effect(result, path, ['highpass', cutoff_freq], atol=1.5e-4)
def test_allpass(self):
central_freq = 1000
q = 0.707
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.allpass_biquad(data, sample_rate, central_freq, q)
self.assert_sox_effect(result, path, ['allpass', central_freq, f'{q}q'])
def test_bandpass_with_csg(self):
central_freq = 1000
q = 0.707
const_skirt_gain = True
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandpass_biquad(data, sample_rate, central_freq, q, const_skirt_gain)
self.assert_sox_effect(result, path, ['bandpass', '-c', central_freq, f'{q}q'])
def test_bandpass_without_csg(self):
central_freq = 1000
q = 0.707
const_skirt_gain = False
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandpass_biquad(data, sample_rate, central_freq, q, const_skirt_gain)
self.assert_sox_effect(result, path, ['bandpass', central_freq, f'{q}q'])
def test_bandreject(self):
central_freq = 1000
q = 0.707
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bandreject_biquad(data, sample_rate, central_freq, q)
self.assert_sox_effect(result, path, ['bandreject', central_freq, f'{q}q'])
def test_band_with_noise(self):
central_freq = 1000
q = 0.707
noise = True
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.band_biquad(data, sample_rate, central_freq, q, noise)
self.assert_sox_effect(result, path, ['band', '-n', central_freq, f'{q}q'])
def test_band_without_noise(self):
central_freq = 1000
q = 0.707
noise = False
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.band_biquad(data, sample_rate, central_freq, q, noise)
self.assert_sox_effect(result, path, ['band', central_freq, f'{q}q'])
def test_treble(self):
central_freq = 1000
q = 0.707
gain = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.treble_biquad(data, sample_rate, gain, central_freq, q)
self.assert_sox_effect(result, path, ['treble', gain, central_freq, f'{q}q'])
def test_bass(self):
central_freq = 1000
q = 0.707
gain = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.bass_biquad(data, sample_rate, gain, central_freq, q)
self.assert_sox_effect(result, path, ['bass', gain, central_freq, f'{q}q'], atol=1.5e-4)
def test_deemph(self):
sample_rate = 44100
data, path = self.get_whitenoise(sample_rate)
result = F.deemph_biquad(data, sample_rate)
self.assert_sox_effect(result, path, ['deemph'])
def test_riaa(self):
sample_rate = 44100
data, path = self.get_whitenoise(sample_rate)
result = F.riaa_biquad(data, sample_rate)
self.assert_sox_effect(result, path, ['riaa'])
def test_contrast(self):
enhancement_amount = 80.
data, path = self.get_whitenoise()
result = F.contrast(data, enhancement_amount)
self.assert_sox_effect(result, path, ['contrast', enhancement_amount])
def test_dcshift_with_limiter(self):
shift = 0.5
limiter_gain = 0.05
data, path = self.get_whitenoise()
result = F.dcshift(data, shift, limiter_gain)
self.assert_sox_effect(result, path, ['dcshift', shift, limiter_gain])
def test_dcshift_without_limiter(self):
shift = 0.6
data, path = self.get_whitenoise()
result = F.dcshift(data, shift)
self.assert_sox_effect(result, path, ['dcshift', shift])
def test_overdrive(self):
gain = 30
colour = 40
data, path = self.get_whitenoise()
result = F.overdrive(data, gain, colour)
self.assert_sox_effect(result, path, ['overdrive', gain, colour])
def test_phaser_sine(self):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.phaser(data, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=True)
self.assert_sox_effect(result, path, ['phaser', gain_in, gain_out, delay_ms, decay, speed, '-s'])
def test_phaser_triangle(self):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.phaser(data, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=False)
self.assert_sox_effect(result, path, ['phaser', gain_in, gain_out, delay_ms, decay, speed, '-t'])
def test_flanger_triangle_linear(self):
delay = 0.6
depth = 0.87
regen = 3.0
width = 0.9
speed = 0.5
phase = 30
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='triangular', interpolation='linear')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'triangle', phase, 'linear'])
def test_flanger_triangle_quad(self):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.4
speed = 0.5
phase = 40
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='triangular', interpolation='quadratic')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'triangle', phase, 'quadratic'])
def test_flanger_sine_linear(self):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.23
speed = 1.3
phase = 60
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='sinusoidal', interpolation='linear')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'sine', phase, 'linear'])
def test_flanger_sine_quad(self):
delay = 0.9
depth = 0.9
regen = 4.0
width = 0.23
speed = 1.3
phase = 25
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.flanger(
data, sample_rate, delay, depth, regen, width, speed, phase,
modulation='sinusoidal', interpolation='quadratic')
self.assert_sox_effect(
result, path, ['flanger', delay, depth, regen, width, speed, 'sine', phase, 'quadratic'])
def test_equalizer(self):
center_freq = 300
q = 0.707
gain = 1
sample_rate = 8000
data, path = self.get_whitenoise(sample_rate)
result = F.equalizer_biquad(data, sample_rate, center_freq, gain, q)
self.assert_sox_effect(result, path, ['equalizer', center_freq, q, gain])
def test_perf_biquad_filtering(self):
b0 = 0.4
b1 = 0.2
b2 = 0.9
a0 = 0.7
a1 = 0.2
a2 = 0.6
data, path = self.get_whitenoise()
result = F.lfilter(data, torch.tensor([a0, a1, a2]), torch.tensor([b0, b1, b2]))
self.assert_sox_effect(result, path, ['biquad', b0, b1, b2, a0, a1, a2])
|
import torch
import torchaudio.functional as F
import unittest
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, TorchaudioTestCase, skipIfNoSox
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
@unittest.expectedFailure
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform,
sample_rate,
format,
True,
compression
)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
import torch
from .autograd_impl import Autograd, AutogradFloat32
from torchaudio_unittest import common_utils
@common_utils.skipIfNoCuda
class TestAutogradLfilterCUDA(Autograd, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
@common_utils.skipIfNoCuda
class TestAutogradRNNTCUDA(AutogradFloat32, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
|
"""Test suites for jit-ability and its numerical compatibility"""
import unittest
import torch
import torchaudio.functional as F
from torchaudio_unittest import common_utils
from torchaudio_unittest.common_utils import (
TempDirMixin,
TestBaseMixin,
skipIfRocm,
torch_script,
)
class Functional(TempDirMixin, TestBaseMixin):
"""Implements test for `functional` module that are performed for different devices"""
def _assert_consistency(self, func, tensor, shape_only=False):
tensor = tensor.to(device=self.device, dtype=self.dtype)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(tensor)
torch.random.manual_seed(40)
ts_output = ts_func(tensor)
if shape_only:
ts_output = ts_output.shape
output = output.shape
self.assertEqual(ts_output, output)
def _assert_consistency_complex(self, func, tensor):
assert tensor.is_complex()
tensor = tensor.to(device=self.device, dtype=self.complex_dtype)
ts_func = torch_script(func)
torch.random.manual_seed(40)
output = func(tensor)
torch.random.manual_seed(40)
ts_output = ts_func(tensor)
self.assertEqual(ts_output, output)
def test_spectrogram(self):
def func(tensor):
n_fft = 400
ws = 400
hop = 200
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
power = None
normalize = False
return F.spectrogram(tensor, pad, window, n_fft, hop, ws, power, normalize)
tensor = common_utils.get_whitenoise()
self._assert_consistency(func, tensor)
def test_inverse_spectrogram(self):
def func(tensor):
length = 400
n_fft = 400
hop = 200
ws = 400
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=torch.float64)
normalize = False
return F.inverse_spectrogram(tensor, length, pad, window, n_fft, hop, ws, normalize)
waveform = common_utils.get_whitenoise(sample_rate=8000, duration=0.05)
tensor = common_utils.get_spectrogram(waveform, n_fft=400, hop_length=200)
self._assert_consistency_complex(func, tensor)
@skipIfRocm
def test_griffinlim(self):
def func(tensor):
n_fft = 400
ws = 400
hop = 200
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
power = 2.
momentum = 0.99
n_iter = 32
length = 1000
rand_int = False
return F.griffinlim(tensor, window, n_fft, hop, ws, power, n_iter, momentum, length, rand_int)
tensor = torch.rand((1, 201, 6))
self._assert_consistency(func, tensor)
def test_compute_deltas(self):
def func(tensor):
win_length = 2 * 7 + 1
return F.compute_deltas(tensor, win_length=win_length)
channel = 13
n_mfcc = channel * 3
time = 1021
tensor = torch.randn(channel, n_mfcc, time)
self._assert_consistency(func, tensor)
def test_detect_pitch_frequency(self):
waveform = common_utils.get_sinusoid(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.detect_pitch_frequency(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_melscale_fbanks(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_stft = 100
f_min = 0.0
f_max = 20.0
n_mels = 10
sample_rate = 16000
norm = "slaney"
return F.melscale_fbanks(n_stft, f_min, f_max, n_mels, sample_rate, norm)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_linear_fbanks(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_stft = 100
f_min = 0.0
f_max = 20.0
n_filter = 10
sample_rate = 16000
return F.linear_fbanks(n_stft, f_min, f_max, n_filter, sample_rate)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_amplitude_to_DB(self):
def func(tensor):
multiplier = 10.0
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
return F.amplitude_to_DB(tensor, multiplier, amin, db_multiplier, top_db)
tensor = torch.rand((6, 201))
self._assert_consistency(func, tensor)
def test_DB_to_amplitude(self):
def func(tensor):
ref = 1.
power = 1.
return F.DB_to_amplitude(tensor, ref, power)
tensor = torch.rand((1, 100))
self._assert_consistency(func, tensor)
def test_create_dct(self):
if self.device != torch.device('cpu'):
raise unittest.SkipTest('No need to perform test on device other than CPU')
def func(_):
n_mfcc = 40
n_mels = 128
norm = "ortho"
return F.create_dct(n_mfcc, n_mels, norm)
dummy = torch.zeros(1, 1)
self._assert_consistency(func, dummy)
def test_mu_law_encoding(self):
def func(tensor):
qc = 256
return F.mu_law_encoding(tensor, qc)
waveform = common_utils.get_whitenoise()
self._assert_consistency(func, waveform)
def test_mu_law_decoding(self):
def func(tensor):
qc = 256
return F.mu_law_decoding(tensor, qc)
tensor = torch.rand((1, 10))
self._assert_consistency(func, tensor)
def test_mask_along_axis(self):
def func(tensor):
mask_param = 100
mask_value = 30.
axis = 2
return F.mask_along_axis(tensor, mask_param, mask_value, axis)
tensor = torch.randn(2, 1025, 400)
self._assert_consistency(func, tensor)
def test_mask_along_axis_iid(self):
def func(tensor):
mask_param = 100
mask_value = 30.
axis = 2
return F.mask_along_axis_iid(tensor, mask_param, mask_value, axis)
tensor = torch.randn(4, 2, 1025, 400)
self._assert_consistency(func, tensor)
def test_gain(self):
def func(tensor):
gainDB = 2.0
return F.gain(tensor, gainDB)
tensor = torch.rand((1, 1000))
self._assert_consistency(func, tensor)
def test_dither_TPDF(self):
def func(tensor):
return F.dither(tensor, 'TPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_RPDF(self):
def func(tensor):
return F.dither(tensor, 'RPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_GPDF(self):
def func(tensor):
return F.dither(tensor, 'GPDF')
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor, shape_only=True)
def test_dither_noise_shaping(self):
def func(tensor):
return F.dither(tensor, noise_shaping=True)
tensor = common_utils.get_whitenoise(n_channels=2)
self._assert_consistency(func, tensor)
def test_lfilter(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise()
def func(tensor):
# Design an IIR lowpass filter using scipy.signal filter design
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.iirdesign.html#scipy.signal.iirdesign
#
# Example
# >>> from scipy.signal import iirdesign
# >>> b, a = iirdesign(0.2, 0.3, 1, 60)
b_coeffs = torch.tensor(
[
0.00299893,
-0.0051152,
0.00841964,
-0.00747802,
0.00841964,
-0.0051152,
0.00299893,
],
device=tensor.device,
dtype=tensor.dtype,
)
a_coeffs = torch.tensor(
[
1.0,
-4.8155751,
10.2217618,
-12.14481273,
8.49018171,
-3.3066882,
0.56088705,
],
device=tensor.device,
dtype=tensor.dtype,
)
return F.lfilter(tensor, a_coeffs, b_coeffs)
self._assert_consistency(func, waveform)
def test_filtfilt(self):
def func(tensor):
torch.manual_seed(296)
b_coeffs = torch.rand(4, device=tensor.device, dtype=tensor.dtype)
a_coeffs = torch.rand(4, device=tensor.device, dtype=tensor.dtype)
return F.filtfilt(tensor, a_coeffs, b_coeffs)
waveform = common_utils.get_whitenoise(sample_rate=8000)
self._assert_consistency(func, waveform)
def test_lowpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
cutoff_freq = 3000.
return F.lowpass_biquad(tensor, sample_rate, cutoff_freq)
self._assert_consistency(func, waveform)
def test_highpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
cutoff_freq = 2000.
return F.highpass_biquad(tensor, sample_rate, cutoff_freq)
self._assert_consistency(func, waveform)
def test_allpass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
return F.allpass_biquad(tensor, sample_rate, central_freq, q)
self._assert_consistency(func, waveform)
def test_bandpass_with_csg(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
const_skirt_gain = True
return F.bandpass_biquad(tensor, sample_rate, central_freq, q, const_skirt_gain)
self._assert_consistency(func, waveform)
def test_bandpass_without_csg(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
const_skirt_gain = True
return F.bandpass_biquad(tensor, sample_rate, central_freq, q, const_skirt_gain)
self._assert_consistency(func, waveform)
def test_bandreject(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
return F.bandreject_biquad(tensor, sample_rate, central_freq, q)
self._assert_consistency(func, waveform)
def test_band_with_noise(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
noise = True
return F.band_biquad(tensor, sample_rate, central_freq, q, noise)
self._assert_consistency(func, waveform)
def test_band_without_noise(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
central_freq = 1000.
q = 0.707
noise = False
return F.band_biquad(tensor, sample_rate, central_freq, q, noise)
self._assert_consistency(func, waveform)
def test_treble(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
gain = 40.
central_freq = 1000.
q = 0.707
return F.treble_biquad(tensor, sample_rate, gain, central_freq, q)
self._assert_consistency(func, waveform)
def test_bass(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
gain = 40.
central_freq = 1000.
q = 0.707
return F.bass_biquad(tensor, sample_rate, gain, central_freq, q)
self._assert_consistency(func, waveform)
def test_deemph(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.deemph_biquad(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_riaa(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
return F.riaa_biquad(tensor, sample_rate)
self._assert_consistency(func, waveform)
def test_equalizer(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
sample_rate = 44100
center_freq = 300.
gain = 1.
q = 0.707
return F.equalizer_biquad(tensor, sample_rate, center_freq, gain, q)
self._assert_consistency(func, waveform)
def test_perf_biquad_filtering(self):
if self.dtype == torch.float64:
raise unittest.SkipTest("This test is known to fail for float64")
waveform = common_utils.get_whitenoise()
def func(tensor):
a = torch.tensor([0.7, 0.2, 0.6], device=tensor.device, dtype=tensor.dtype)
b = torch.tensor([0.4, 0.2, 0.9], device=tensor.device, dtype=tensor.dtype)
return F.lfilter(tensor, a, b)
self._assert_consistency(func, waveform)
def test_sliding_window_cmn(self):
def func(tensor):
cmn_window = 600
min_cmn_window = 100
center = False
norm_vars = False
a = torch.tensor(
[
[
-1.915875792503357,
1.147700309753418
],
[
1.8242558240890503,
1.3869990110397339
]
],
device=tensor.device,
dtype=tensor.dtype
)
return F.sliding_window_cmn(a, cmn_window, min_cmn_window, center, norm_vars)
b = torch.tensor(
[
[
-1.8701,
-0.1196
],
[
1.8701,
0.1196
]
]
)
self._assert_consistency(func, b)
def test_contrast(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
enhancement_amount = 80.
return F.contrast(tensor, enhancement_amount)
self._assert_consistency(func, waveform)
def test_dcshift(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
shift = 0.5
limiter_gain = 0.05
return F.dcshift(tensor, shift, limiter_gain)
self._assert_consistency(func, waveform)
def test_overdrive(self):
waveform = common_utils.get_whitenoise()
def func(tensor):
gain = 30.
colour = 50.
return F.overdrive(tensor, gain, colour)
self._assert_consistency(func, waveform)
def test_phaser(self):
waveform = common_utils.get_whitenoise(sample_rate=44100)
def func(tensor):
gain_in = 0.5
gain_out = 0.8
delay_ms = 2.0
decay = 0.4
speed = 0.5
sample_rate = 44100
return F.phaser(tensor, sample_rate, gain_in, gain_out, delay_ms, decay, speed, sinusoidal=True)
self._assert_consistency(func, waveform)
def test_flanger(self):
torch.random.manual_seed(40)
waveform = torch.rand(2, 100) - 0.5
def func(tensor):
delay = 0.8
depth = 0.88
regen = 3.0
width = 0.23
speed = 1.3
phase = 60.
sample_rate = 44100
return F.flanger(tensor, sample_rate, delay, depth, regen, width, speed,
phase, modulation='sinusoidal', interpolation='linear')
self._assert_consistency(func, waveform)
def test_spectral_centroid(self):
def func(tensor):
sample_rate = 44100
n_fft = 400
ws = 400
hop = 200
pad = 0
window = torch.hann_window(ws, device=tensor.device, dtype=tensor.dtype)
return F.spectral_centroid(tensor, sample_rate, pad, window, n_fft, hop, ws)
tensor = common_utils.get_whitenoise(sample_rate=44100)
self._assert_consistency(func, tensor)
@common_utils.skipIfNoKaldi
def test_compute_kaldi_pitch(self):
if self.dtype != torch.float32 or self.device != torch.device('cpu'):
raise unittest.SkipTest("Only float32, cpu is supported.")
def func(tensor):
sample_rate: float = 44100.
return F.compute_kaldi_pitch(tensor, sample_rate)
tensor = common_utils.get_whitenoise(sample_rate=44100)
self._assert_consistency(func, tensor)
def test_resample_sinc(self):
def func(tensor):
sr1, sr2 = 16000, 8000
return F.resample(tensor, sr1, sr2, resampling_method="sinc_interpolation")
tensor = common_utils.get_whitenoise(sample_rate=16000)
self._assert_consistency(func, tensor)
def test_resample_kaiser(self):
def func(tensor):
sr1, sr2 = 16000, 8000
return F.resample(tensor, sr1, sr2, resampling_method="kaiser_window")
def func_beta(tensor):
sr1, sr2 = 16000, 8000
beta = 6.
return F.resample(tensor, sr1, sr2, resampling_method="kaiser_window", beta=beta)
tensor = common_utils.get_whitenoise(sample_rate=16000)
self._assert_consistency(func, tensor)
self._assert_consistency(func_beta, tensor)
def test_phase_vocoder(self):
def func(tensor):
n_freq = tensor.size(-2)
rate = 0.5
hop_length = 256
phase_advance = torch.linspace(
0,
3.14 * hop_length,
n_freq,
dtype=torch.real(tensor).dtype,
device=tensor.device,
)[..., None]
return F.phase_vocoder(tensor, rate, phase_advance)
tensor = torch.view_as_complex(torch.randn(2, 1025, 400, 2))
self._assert_consistency_complex(func, tensor)
class FunctionalFloat32Only(TestBaseMixin):
def test_rnnt_loss(self):
def func(tensor):
targets = torch.tensor([[1, 2]], device=tensor.device, dtype=torch.int32)
logit_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
target_lengths = torch.tensor([2], device=tensor.device, dtype=torch.int32)
return F.rnnt_loss(tensor, targets, logit_lengths, target_lengths)
logits = torch.tensor([[[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1]]]])
tensor = logits.to(device=self.device, dtype=torch.float32)
self._assert_consistency(func, tensor)
|
from parameterized import parameterized
import torch
import torchaudio.functional as F
from torchaudio_unittest.common_utils import (
get_sinusoid,
load_params,
save_wav,
skipIfNoExec,
TempDirMixin,
TestBaseMixin,
)
from torchaudio_unittest.common_utils.kaldi_utils import (
convert_args,
run_kaldi,
)
class Kaldi(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@skipIfNoExec('apply-cmvn-sliding')
def test_sliding_window_cmn(self):
"""sliding_window_cmn should be numerically compatible with apply-cmvn-sliding"""
kwargs = {
'cmn_window': 600,
'min_cmn_window': 100,
'center': False,
'norm_vars': False,
}
tensor = torch.randn(40, 10, dtype=self.dtype, device=self.device)
result = F.sliding_window_cmn(tensor, **kwargs)
command = ['apply-cmvn-sliding'] + convert_args(**kwargs) + ['ark:-', 'ark:-']
kaldi_result = run_kaldi(command, 'ark', tensor)
self.assert_equal(result, expected=kaldi_result)
class KaldiCPUOnly(TempDirMixin, TestBaseMixin):
def assert_equal(self, output, *, expected, rtol=None, atol=None):
expected = expected.to(dtype=self.dtype, device=self.device)
self.assertEqual(output, expected, rtol=rtol, atol=atol)
@parameterized.expand(load_params('kaldi_test_pitch_args.jsonl'))
@skipIfNoExec('compute-kaldi-pitch-feats')
def test_pitch_feats(self, kwargs):
"""compute_kaldi_pitch produces numerically compatible result with compute-kaldi-pitch-feats"""
sample_rate = kwargs['sample_rate']
waveform = get_sinusoid(dtype='float32', sample_rate=sample_rate)
result = F.compute_kaldi_pitch(waveform[0], **kwargs)
waveform = get_sinusoid(dtype='int16', sample_rate=sample_rate)
wave_file = self.get_temp_path('test.wav')
save_wav(wave_file, waveform, sample_rate)
command = ['compute-kaldi-pitch-feats'] + convert_args(**kwargs) + ['scp:-', 'ark:-']
kaldi_result = run_kaldi(command, 'scp', wave_file)
self.assert_equal(result, expected=kaldi_result)
|
import torch
from .autograd_impl import Autograd, AutogradFloat32
from torchaudio_unittest import common_utils
class TestAutogradLfilterCPU(Autograd, common_utils.PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
class TestAutogradRNNTCPU(AutogradFloat32, common_utils.PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
|
"""Test numerical consistency among single input and batched input."""
import itertools
import math
from parameterized import parameterized, parameterized_class
import torch
import torchaudio.functional as F
from torchaudio_unittest import common_utils
def _name_from_args(func, _, params):
"""Return a parameterized test name, based on parameter values."""
return "{}_{}".format(
func.__name__,
"_".join(str(arg) for arg in params.args))
@parameterized_class([
# Single-item batch isolates problems that come purely from adding a
# dimension (rather than processing multiple items)
{"batch_size": 1},
{"batch_size": 3},
])
class TestFunctional(common_utils.TorchaudioTestCase):
"""Test functions defined in `functional` module"""
backend = 'default'
def assert_batch_consistency(
self, functional, batch, *args, atol=1e-8, rtol=1e-5, seed=42,
**kwargs):
n = batch.size(0)
# Compute items separately, then batch the result
torch.random.manual_seed(seed)
items_input = batch.clone()
items_result = torch.stack([
functional(items_input[i], *args, **kwargs) for i in range(n)
])
# Batch the input and run
torch.random.manual_seed(seed)
batch_input = batch.clone()
batch_result = functional(batch_input, *args, **kwargs)
self.assertEqual(items_input, batch_input, rtol=rtol, atol=atol)
self.assertEqual(items_result, batch_result, rtol=rtol, atol=atol)
def test_griffinlim(self):
n_fft = 400
ws = 400
hop = 200
window = torch.hann_window(ws)
power = 2
momentum = 0.99
n_iter = 32
length = 1000
torch.random.manual_seed(0)
batch = torch.rand(self.batch_size, 1, 201, 6)
self.assert_batch_consistency(
F.griffinlim, batch, window, n_fft, hop, ws, power,
n_iter, momentum, length, 0, atol=5e-5)
@parameterized.expand(list(itertools.product(
[8000, 16000, 44100],
[1, 2],
)), name_func=_name_from_args)
def test_detect_pitch_frequency(self, sample_rate, n_channels):
# Use different frequencies to ensure each item in the batch returns a
# different answer.
torch.manual_seed(0)
frequencies = torch.randint(100, 1000, [self.batch_size])
waveforms = torch.stack([
common_utils.get_sinusoid(
frequency=frequency, sample_rate=sample_rate,
n_channels=n_channels, duration=5)
for frequency in frequencies
])
self.assert_batch_consistency(
F.detect_pitch_frequency, waveforms, sample_rate)
def test_amplitude_to_DB(self):
torch.manual_seed(0)
spec = torch.rand(self.batch_size, 2, 100, 100) * 200
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
# Test with & without a `top_db` clamp
self.assert_batch_consistency(
F.amplitude_to_DB, spec, amplitude_mult,
amin, db_mult, top_db=None)
self.assert_batch_consistency(
F.amplitude_to_DB, spec, amplitude_mult,
amin, db_mult, top_db=40.)
def test_amplitude_to_DB_itemwise_clamps(self):
"""Ensure that the clamps are separate for each spectrogram in a batch.
The clamp was determined per-batch in a prior implementation, which
meant it was determined by the loudest item, thus items weren't
independent. See:
https://github.com/pytorch/audio/issues/994
"""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 20.
# Make a batch of noise
torch.manual_seed(0)
spec = torch.rand([2, 2, 100, 100]) * 200
# Make one item blow out the other
spec[0] += 50
batchwise_dbs = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
itemwise_dbs = torch.stack([
F.amplitude_to_DB(item, amplitude_mult, amin,
db_mult, top_db=top_db)
for item in spec
])
self.assertEqual(batchwise_dbs, itemwise_dbs)
def test_amplitude_to_DB_not_channelwise_clamps(self):
"""Check that clamps are applied per-item, not per channel."""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 40.
torch.manual_seed(0)
spec = torch.rand([1, 2, 100, 100]) * 200
# Make one channel blow out the other
spec[:, 0] += 50
specwise_dbs = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
channelwise_dbs = torch.stack([
F.amplitude_to_DB(spec[:, i], amplitude_mult, amin,
db_mult, top_db=top_db)
for i in range(spec.size(-3))
])
# Just check channelwise gives a different answer.
difference = (specwise_dbs - channelwise_dbs).abs()
assert (difference >= 1e-5).any()
def test_contrast(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.contrast, waveforms, enhancement_amount=80.)
def test_dcshift(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.dcshift, waveforms, shift=0.5, limiter_gain=0.05)
def test_overdrive(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
self.assert_batch_consistency(
F.overdrive, waveforms, gain=45, colour=30)
def test_phaser(self):
sample_rate = 44100
n_channels = 2
waveform = common_utils.get_whitenoise(
sample_rate=sample_rate, n_channels=self.batch_size * n_channels,
duration=1)
batch = waveform.view(self.batch_size, n_channels, waveform.size(-1))
self.assert_batch_consistency(F.phaser, batch, sample_rate)
def test_flanger(self):
torch.random.manual_seed(0)
waveforms = torch.rand(self.batch_size, 2, 100) - 0.5
sample_rate = 44100
self.assert_batch_consistency(F.flanger, waveforms, sample_rate)
@parameterized.expand(list(itertools.product(
[True, False], # center
[True, False], # norm_vars
)), name_func=_name_from_args)
def test_sliding_window_cmn(self, center, norm_vars):
torch.manual_seed(0)
spectrogram = torch.rand(self.batch_size, 2, 1024, 1024) * 200
self.assert_batch_consistency(
F.sliding_window_cmn, spectrogram, center=center,
norm_vars=norm_vars)
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform(self, resampling_method):
num_channels = 3
sr = 16000
new_sr = sr // 2
multi_sound = common_utils.get_whitenoise(sample_rate=sr, n_channels=num_channels, duration=0.5,)
self.assert_batch_consistency(
F.resample, multi_sound, orig_freq=sr, new_freq=new_sr,
resampling_method=resampling_method, rtol=1e-4, atol=1e-7)
@common_utils.skipIfNoKaldi
def test_compute_kaldi_pitch(self):
sample_rate = 44100
n_channels = 2
waveform = common_utils.get_whitenoise(
sample_rate=sample_rate, n_channels=self.batch_size * n_channels)
batch = waveform.view(self.batch_size, n_channels, waveform.size(-1))
self.assert_batch_consistency(
F.compute_kaldi_pitch, batch, sample_rate=sample_rate)
def test_lfilter(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
batchwise_output = F.lfilter(x, a, b, batching=True)
itemwise_output = torch.stack([
F.lfilter(x[i], a[i], b[i])
for i in range(self.batch_size)
])
self.assertEqual(batchwise_output, itemwise_output)
def test_filtfilt(self):
signal_length = 2048
torch.manual_seed(2434)
x = torch.randn(self.batch_size, signal_length)
a = torch.rand(self.batch_size, 3)
b = torch.rand(self.batch_size, 3)
batchwise_output = F.filtfilt(x, a, b)
itemwise_output = torch.stack([
F.filtfilt(x[i], a[i], b[i])
for i in range(self.batch_size)
])
self.assertEqual(batchwise_output, itemwise_output)
|
import unittest
from distutils.version import StrictVersion
import torch
from parameterized import param
import torchaudio.functional as F
from torchaudio._internal.module_utils import is_module_available
LIBROSA_AVAILABLE = is_module_available('librosa')
if LIBROSA_AVAILABLE:
import numpy as np
import librosa
from torchaudio_unittest.common_utils import (
TestBaseMixin,
nested_params,
get_whitenoise,
get_spectrogram,
)
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class Functional(TestBaseMixin):
"""Test suite for functions in `functional` module."""
dtype = torch.float64
@nested_params([0, 0.99])
def test_griffinlim(self, momentum):
# FFT params
n_fft = 400
win_length = n_fft
hop_length = n_fft // 4
window = torch.hann_window(win_length, device=self.device)
power = 1
# GriffinLim params
n_iter = 8
waveform = get_whitenoise(device=self.device, dtype=self.dtype)
specgram = get_spectrogram(
waveform, n_fft=n_fft, hop_length=hop_length, power=power,
win_length=win_length, window=window)
result = F.griffinlim(
specgram,
window=window,
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
power=power,
n_iter=n_iter,
momentum=momentum,
length=waveform.size(1),
rand_init=False)
expected = librosa.griffinlim(
specgram[0].cpu().numpy(),
n_iter=n_iter,
hop_length=hop_length,
momentum=momentum,
init=None,
length=waveform.size(1))[None, ...]
self.assertEqual(result, torch.from_numpy(expected), atol=5e-5, rtol=1e-07)
@nested_params(
[
param(),
param(n_mels=128, sample_rate=44100),
param(n_mels=128, fmin=2000.0, fmax=5000.0),
param(n_mels=56, fmin=100.0, fmax=9000.0),
param(n_mels=56, fmin=800.0, fmax=900.0),
param(n_mels=56, fmin=1900.0, fmax=900.0),
param(n_mels=10, fmin=1900.0, fmax=900.0),
],
[param(norm=n) for n in [None, 'slaney']],
[param(mel_scale=s) for s in ['htk', 'slaney']],
)
def test_create_mel_fb(self, n_mels=40, sample_rate=22050, n_fft=2048,
fmin=0.0, fmax=8000.0, norm=None, mel_scale="htk"):
if (norm == "slaney" and StrictVersion(librosa.__version__) < StrictVersion("0.7.2")):
self.skipTest('Test is known to fail with older versions of librosa.')
if self.device != 'cpu':
self.skipTest('No need to run this test on CUDA')
expected = librosa.filters.mel(
sr=sample_rate,
n_fft=n_fft,
n_mels=n_mels,
fmax=fmax,
fmin=fmin,
htk=mel_scale == "htk",
norm=norm).T
result = F.melscale_fbanks(
sample_rate=sample_rate,
n_mels=n_mels,
f_max=fmax,
f_min=fmin,
n_freqs=(n_fft // 2 + 1),
norm=norm,
mel_scale=mel_scale)
self.assertEqual(result, torch.from_numpy(expected), atol=7e-5, rtol=1.3e-6)
def test_amplitude_to_DB_power(self):
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
multiplier = 10.0
spec = get_spectrogram(get_whitenoise(device=self.device, dtype=self.dtype), power=2)
result = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
expected = librosa.core.power_to_db(spec[0].cpu().numpy())[None, ...]
self.assertEqual(result, torch.from_numpy(expected))
def test_amplitude_to_DB(self):
amin = 1e-10
db_multiplier = 0.0
top_db = 80.0
multiplier = 20.0
spec = get_spectrogram(get_whitenoise(device=self.device, dtype=self.dtype), power=1)
result = F.amplitude_to_DB(spec, multiplier, amin, db_multiplier, top_db)
expected = librosa.core.amplitude_to_db(spec[0].cpu().numpy())[None, ...]
self.assertEqual(result, torch.from_numpy(expected))
@unittest.skipIf(not LIBROSA_AVAILABLE, "Librosa not available")
class FunctionalComplex(TestBaseMixin):
@nested_params([0.5, 1.01, 1.3])
def test_phase_vocoder(self, rate):
hop_length = 256
num_freq = 1025
num_frames = 400
torch.random.manual_seed(42)
# Due to cummulative sum, numerical error in using torch.float32 will
# result in bottom right values of the stretched sectrogram to not
# match with librosa.
spec = torch.randn(num_freq, num_frames, device=self.device, dtype=torch.complex128)
phase_advance = torch.linspace(
0,
np.pi * hop_length,
num_freq,
device=self.device,
dtype=torch.float64)[..., None]
stretched = F.phase_vocoder(spec, rate=rate, phase_advance=phase_advance)
expected_stretched = librosa.phase_vocoder(
spec.cpu().numpy(),
rate=rate,
hop_length=hop_length)
self.assertEqual(stretched, torch.from_numpy(expected_stretched))
|
import torch
from torchaudio_unittest.common_utils import skipIfNoCuda, PytorchTestCase
from .torchscript_consistency_impl import Functional, FunctionalFloat32Only
@skipIfNoCuda
class TestFunctionalFloat32(Functional, FunctionalFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@skipIfNoCuda
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .torchscript_consistency_impl import Functional, FunctionalFloat32Only
class TestFunctionalFloat32(Functional, FunctionalFloat32Only, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .kaldi_compatibility_test_impl import Kaldi
@skipIfNoCuda
class TestKaldiFloat32(Kaldi, PytorchTestCase):
dtype = torch.float32
device = torch.device('cuda')
@skipIfNoCuda
class TestKaldiFloat64(Kaldi, PytorchTestCase):
dtype = torch.float64
device = torch.device('cuda')
|
import torch
from torchaudio_unittest.common_utils import PytorchTestCase
from .kaldi_compatibility_test_impl import Kaldi, KaldiCPUOnly
class TestKaldiCPUOnly(KaldiCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat32(Kaldi, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
class TestKaldiFloat64(Kaldi, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
|
from torchaudio_unittest.common_utils import PytorchTestCase, skipIfNoCuda
from .librosa_compatibility_test_impl import Functional, FunctionalComplex
@skipIfNoCuda
class TestFunctionalCUDA(Functional, PytorchTestCase):
device = 'cuda'
@skipIfNoCuda
class TestFunctionalComplexCUDA(FunctionalComplex, PytorchTestCase):
device = 'cuda'
|
from torchaudio_unittest.common_utils import PytorchTestCase
from .librosa_compatibility_test_impl import Functional, FunctionalComplex
class TestFunctionalCPU(Functional, PytorchTestCase):
device = 'cpu'
class TestFunctionalComplexCPU(FunctionalComplex, PytorchTestCase):
device = 'cpu'
|
"""Test definition common to CPU and CUDA"""
import math
import itertools
import warnings
import numpy as np
import torch
import torchaudio.functional as F
from parameterized import parameterized
from scipy import signal
from torchaudio_unittest.common_utils import (
TestBaseMixin,
get_sinusoid,
nested_params,
get_whitenoise,
rnnt_utils,
)
class Functional(TestBaseMixin):
def _test_resample_waveform_accuracy(self, up_scale_factor=None, down_scale_factor=None,
resampling_method="sinc_interpolation", atol=1e-1, rtol=1e-4):
# resample the signal and compare it to the ground truth
n_to_trim = 20
sample_rate = 1000
new_sample_rate = sample_rate
if up_scale_factor is not None:
new_sample_rate = int(new_sample_rate * up_scale_factor)
if down_scale_factor is not None:
new_sample_rate = int(new_sample_rate / down_scale_factor)
duration = 5 # seconds
original_timestamps = torch.arange(0, duration, 1.0 / sample_rate)
sound = 123 * torch.cos(2 * math.pi * 3 * original_timestamps).unsqueeze(0)
estimate = F.resample(sound, sample_rate, new_sample_rate,
resampling_method=resampling_method).squeeze()
new_timestamps = torch.arange(0, duration, 1.0 / new_sample_rate)[:estimate.size(0)]
ground_truth = 123 * torch.cos(2 * math.pi * 3 * new_timestamps)
# trim the first/last n samples as these points have boundary effects
ground_truth = ground_truth[..., n_to_trim:-n_to_trim]
estimate = estimate[..., n_to_trim:-n_to_trim]
self.assertEqual(estimate, ground_truth, atol=atol, rtol=rtol)
def _test_costs_and_gradients(
self, data, ref_costs, ref_gradients, atol=1e-6, rtol=1e-2
):
logits_shape = data["logits"].shape
costs, gradients = rnnt_utils.compute_with_pytorch_transducer(data=data)
self.assertEqual(costs, ref_costs, atol=atol, rtol=rtol)
self.assertEqual(logits_shape, gradients.shape)
self.assertEqual(gradients, ref_gradients, atol=atol, rtol=rtol)
def test_lfilter_simple(self):
"""
Create a very basic signal,
Then make a simple 4th order delay
The output should be same as the input but shifted
"""
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform[:, 3:], waveform[:, 0:-3], atol=1e-5, rtol=1e-5)
def test_lfilter_clamp(self):
input_signal = torch.ones(1, 44100 * 1, dtype=self.dtype, device=self.device)
b_coeffs = torch.tensor([1, 0], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, -0.95], dtype=self.dtype, device=self.device)
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=True)
assert output_signal.max() <= 1
output_signal = F.lfilter(input_signal, a_coeffs, b_coeffs, clamp=False)
assert output_signal.max() > 1
@parameterized.expand([
((44100,), (4,), (44100,)),
((3, 44100), (4,), (3, 44100,)),
((2, 3, 44100), (4,), (2, 3, 44100,)),
((1, 2, 3, 44100), (4,), (1, 2, 3, 44100,)),
((44100,), (2, 4), (2, 44100)),
((3, 44100), (1, 4), (3, 1, 44100)),
((1, 2, 44100), (3, 4), (1, 2, 3, 44100))
])
def test_lfilter_shape(self, input_shape, coeff_shape, target_shape):
torch.random.manual_seed(42)
waveform = torch.rand(*input_shape, dtype=self.dtype, device=self.device)
b_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
a_coeffs = torch.rand(*coeff_shape, dtype=self.dtype, device=self.device)
output_waveform = F.lfilter(waveform, a_coeffs, b_coeffs, batching=False)
assert input_shape == waveform.size()
assert target_shape == output_waveform.size()
def test_lfilter_9th_order_filter_stability(self):
"""
Validate the precision of lfilter against reference scipy implementation when using high order filter.
The reference implementation use cascaded second-order filters so is more numerically accurate.
"""
# create an impulse signal
x = torch.zeros(1024, dtype=self.dtype, device=self.device)
x[0] = 1
# get target impulse response
sos = signal.butter(9, 850, 'hp', fs=22050, output='sos')
y = torch.from_numpy(signal.sosfilt(sos, x.cpu().numpy())).to(self.dtype).to(self.device)
# get lfilter coefficients
b, a = signal.butter(9, 850, 'hp', fs=22050, output='ba')
b, a = torch.from_numpy(b).to(self.dtype).to(self.device), torch.from_numpy(
a).to(self.dtype).to(self.device)
# predict impulse response
yhat = F.lfilter(x, a, b, False)
self.assertEqual(yhat, y, atol=1e-4, rtol=1e-5)
def test_filtfilt_simple(self):
"""
Check that, for an arbitrary signal, applying filtfilt with filter coefficients
corresponding to a pure delay filter imparts no time delay.
"""
waveform = get_whitenoise(sample_rate=8000, n_channels=2, dtype=self.dtype).to(
device=self.device
)
b_coeffs = torch.tensor([0, 0, 0, 1], dtype=self.dtype, device=self.device)
a_coeffs = torch.tensor([1, 0, 0, 0], dtype=self.dtype, device=self.device)
padded_waveform = torch.cat(
(waveform, torch.zeros(2, 3, dtype=self.dtype, device=self.device)), axis=1
)
output_waveform = F.filtfilt(padded_waveform, a_coeffs, b_coeffs)
self.assertEqual(output_waveform, padded_waveform, atol=1e-5, rtol=1e-5)
def test_filtfilt_filter_sinusoid(self):
"""
Check that, for a signal comprising two sinusoids, applying filtfilt
with appropriate filter coefficients correctly removes the higher-frequency
sinusoid while imparting no time delay.
"""
T = 1.0
samples = 1000
waveform_k0 = get_sinusoid(
frequency=5, sample_rate=samples // T, dtype=self.dtype, device=self.device
).squeeze(0)
waveform_k1 = get_sinusoid(
frequency=200,
sample_rate=samples // T,
dtype=self.dtype,
device=self.device,
).squeeze(0)
waveform = waveform_k0 + waveform_k1
# Transfer function numerator and denominator polynomial coefficients
# corresponding to 8th-order Butterworth filter with 100-cycle/T cutoff.
# Generated with
# >>> from scipy import signal
# >>> b_coeffs, a_coeffs = signal.butter(8, 0.2)
b_coeffs = torch.tensor(
[
2.39596441e-05,
1.91677153e-04,
6.70870035e-04,
1.34174007e-03,
1.67717509e-03,
1.34174007e-03,
6.70870035e-04,
1.91677153e-04,
2.39596441e-05,
],
dtype=self.dtype,
device=self.device,
)
a_coeffs = torch.tensor(
[
1.0,
-4.78451489,
10.44504107,
-13.45771989,
11.12933104,
-6.0252604,
2.0792738,
-0.41721716,
0.0372001,
],
dtype=self.dtype,
device=self.device,
)
# Extend waveform in each direction, preserving periodicity.
padded_waveform = torch.cat((waveform[:-1], waveform, waveform[1:]))
output_waveform = F.filtfilt(padded_waveform, a_coeffs, b_coeffs)
# Remove padding from output waveform; confirm that result
# closely matches waveform_k0.
self.assertEqual(
output_waveform[samples - 1: 2 * samples - 1],
waveform_k0,
atol=1e-3,
rtol=1e-3,
)
@parameterized.expand([(0., ), (1., ), (2., ), (3., )])
def test_spectrogram_grad_at_zero(self, power):
"""The gradient of power spectrogram should not be nan but zero near x=0
https://github.com/pytorch/audio/issues/993
"""
x = torch.zeros(1, 22050, requires_grad=True)
spec = F.spectrogram(
x,
pad=0,
window=None,
n_fft=2048,
hop_length=None,
win_length=None,
power=power,
normalized=False,
)
spec.sum().backward()
assert not x.grad.isnan().sum()
def test_compute_deltas_one_channel(self):
specgram = torch.tensor([[[1.0, 2.0, 3.0, 4.0]]], dtype=self.dtype, device=self.device)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5]]], dtype=self.dtype, device=self.device)
computed = F.compute_deltas(specgram, win_length=3)
self.assertEqual(computed, expected)
def test_compute_deltas_two_channels(self):
specgram = torch.tensor([[[1.0, 2.0, 3.0, 4.0],
[1.0, 2.0, 3.0, 4.0]]], dtype=self.dtype, device=self.device)
expected = torch.tensor([[[0.5, 1.0, 1.0, 0.5],
[0.5, 1.0, 1.0, 0.5]]], dtype=self.dtype, device=self.device)
computed = F.compute_deltas(specgram, win_length=3)
self.assertEqual(computed, expected)
@parameterized.expand([(100,), (440,)])
def test_detect_pitch_frequency_pitch(self, frequency):
sample_rate = 44100
test_sine_waveform = get_sinusoid(
frequency=frequency, sample_rate=sample_rate, duration=5
)
freq = F.detect_pitch_frequency(test_sine_waveform, sample_rate)
threshold = 1
s = ((freq - frequency).abs() > threshold).sum()
self.assertFalse(s)
@parameterized.expand([([100, 100],), ([2, 100, 100],), ([2, 2, 100, 100],)])
def test_amplitude_to_DB_reversible(self, shape):
"""Round trip between amplitude and db should return the original for various shape
This implicitly also tests `DB_to_amplitude`.
"""
amplitude_mult = 20.
power_mult = 10.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
torch.manual_seed(0)
spec = torch.rand(*shape, dtype=self.dtype, device=self.device) * 200
# Spectrogram amplitude -> DB -> amplitude
db = F.amplitude_to_DB(spec, amplitude_mult, amin, db_mult, top_db=None)
x2 = F.DB_to_amplitude(db, ref, 0.5)
self.assertEqual(x2, spec, atol=5e-5, rtol=1e-5)
# Spectrogram power -> DB -> power
db = F.amplitude_to_DB(spec, power_mult, amin, db_mult, top_db=None)
x2 = F.DB_to_amplitude(db, ref, 1.)
self.assertEqual(x2, spec)
@parameterized.expand([([100, 100],), ([2, 100, 100],), ([2, 2, 100, 100],)])
def test_amplitude_to_DB_top_db_clamp(self, shape):
"""Ensure values are properly clamped when `top_db` is supplied."""
amplitude_mult = 20.
amin = 1e-10
ref = 1.0
db_mult = math.log10(max(amin, ref))
top_db = 40.
torch.manual_seed(0)
# A random tensor is used for increased entropy, but the max and min for
# each spectrogram still need to be predictable. The max determines the
# decibel cutoff, and the distance from the min must be large enough
# that it triggers a clamp.
spec = torch.rand(*shape, dtype=self.dtype, device=self.device)
# Ensure each spectrogram has a min of 0 and a max of 1.
spec -= spec.amin([-2, -1])[..., None, None]
spec /= spec.amax([-2, -1])[..., None, None]
# Expand the range to (0, 200) - wide enough to properly test clamping.
spec *= 200
decibels = F.amplitude_to_DB(spec, amplitude_mult, amin,
db_mult, top_db=top_db)
# Ensure the clamp was applied
below_limit = decibels < 6.0205
assert not below_limit.any(), (
"{} decibel values were below the expected cutoff:\n{}".format(
below_limit.sum().item(), decibels
)
)
# Ensure it didn't over-clamp
close_to_limit = decibels < 6.0207
assert close_to_limit.any(), (
f"No values were close to the limit. Did it over-clamp?\n{decibels}"
)
@parameterized.expand(
list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0., 30.], [1, 2]))
)
def test_mask_along_axis(self, shape, mask_param, mask_value, axis):
torch.random.manual_seed(42)
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
mask_specgram = F.mask_along_axis(specgram, mask_param, mask_value, axis)
other_axis = 1 if axis == 2 else 2
masked_columns = (mask_specgram == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgram.size(other_axis)).sum()
num_masked_columns = torch.div(
num_masked_columns, mask_specgram.size(0), rounding_mode='floor')
assert mask_specgram.size() == specgram.size()
assert num_masked_columns < mask_param
@parameterized.expand(list(itertools.product([100], [0., 30.], [2, 3])))
def test_mask_along_axis_iid(self, mask_param, mask_value, axis):
torch.random.manual_seed(42)
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
mask_specgrams = F.mask_along_axis_iid(specgrams, mask_param, mask_value, axis)
other_axis = 2 if axis == 3 else 3
masked_columns = (mask_specgrams == mask_value).sum(other_axis)
num_masked_columns = (masked_columns == mask_specgrams.size(other_axis)).sum(-1)
assert mask_specgrams.size() == specgrams.size()
assert (num_masked_columns < mask_param).sum() == num_masked_columns.numel()
@parameterized.expand(
list(itertools.product([(2, 1025, 400), (1, 201, 100)], [100], [0., 30.], [1, 2]))
)
def test_mask_along_axis_preserve(self, shape, mask_param, mask_value, axis):
"""mask_along_axis should not alter original input Tensor
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgram = torch.randn(*shape, dtype=self.dtype, device=self.device)
specgram_copy = specgram.clone()
F.mask_along_axis(specgram, mask_param, mask_value, axis)
self.assertEqual(specgram, specgram_copy)
@parameterized.expand(list(itertools.product([100], [0., 30.], [2, 3])))
def test_mask_along_axis_iid_preserve(self, mask_param, mask_value, axis):
"""mask_along_axis_iid should not alter original input Tensor
Test is run 5 times to bound the probability of no masking occurring to 1e-10
See https://github.com/pytorch/audio/issues/1478
"""
torch.random.manual_seed(42)
for _ in range(5):
specgrams = torch.randn(4, 2, 1025, 400, dtype=self.dtype, device=self.device)
specgrams_copy = specgrams.clone()
F.mask_along_axis_iid(specgrams, mask_param, mask_value, axis)
self.assertEqual(specgrams, specgrams_copy)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
[16000, 44100],
)))
def test_resample_identity(self, resampling_method, sample_rate):
waveform = get_whitenoise(sample_rate=sample_rate, duration=1)
resampled = F.resample(waveform, sample_rate, sample_rate)
self.assertEqual(waveform, resampled)
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_upsample_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
upsampled = F.resample(waveform, sr, sr * 2, resampling_method=resampling_method)
assert upsampled.size(-1) == waveform.size(-1) * 2
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_downsample_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
downsampled = F.resample(waveform, sr, sr // 2, resampling_method=resampling_method)
assert downsampled.size(-1) == waveform.size(-1) // 2
@parameterized.expand([("sinc_interpolation"), ("kaiser_window")])
def test_resample_waveform_identity_size(self, resampling_method):
sr = 16000
waveform = get_whitenoise(sample_rate=sr, duration=0.5,)
resampled = F.resample(waveform, sr, sr, resampling_method=resampling_method)
assert resampled.size(-1) == waveform.size(-1)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
list(range(1, 20)),
)))
def test_resample_waveform_downsample_accuracy(self, resampling_method, i):
self._test_resample_waveform_accuracy(down_scale_factor=i * 2, resampling_method=resampling_method)
@parameterized.expand(list(itertools.product(
["sinc_interpolation", "kaiser_window"],
list(range(1, 20)),
)))
def test_resample_waveform_upsample_accuracy(self, resampling_method, i):
self._test_resample_waveform_accuracy(up_scale_factor=1.0 + i / 20.0, resampling_method=resampling_method)
@nested_params([0.5, 1.01, 1.3])
def test_phase_vocoder_shape(self, rate):
"""Verify the output shape of phase vocoder"""
hop_length = 256
num_freq = 1025
num_frames = 400
batch_size = 2
torch.random.manual_seed(42)
spec = torch.randn(
batch_size, num_freq, num_frames, dtype=self.complex_dtype, device=self.device)
phase_advance = torch.linspace(
0,
np.pi * hop_length,
num_freq,
dtype=self.dtype, device=self.device)[..., None]
spec_stretch = F.phase_vocoder(spec, rate=rate, phase_advance=phase_advance)
assert spec.dim() == spec_stretch.dim()
expected_shape = torch.Size([batch_size, num_freq, int(np.ceil(num_frames / rate))])
output_shape = spec_stretch.shape
assert output_shape == expected_shape
@parameterized.expand(
[
# words
["", "", 0], # equal
["abc", "abc", 0],
["ᑌᑎIᑕO", "ᑌᑎIᑕO", 0],
["abc", "", 3], # deletion
["aa", "aaa", 1],
["aaa", "aa", 1],
["ᑌᑎI", "ᑌᑎIᑕO", 2],
["aaa", "aba", 1], # substitution
["aba", "aaa", 1],
["aba", " ", 3],
["abc", "bcd", 2], # mix deletion and substitution
["0ᑌᑎI", "ᑌᑎIᑕO", 3],
# sentences
[["hello", "", "Tᕮ᙭T"], ["hello", "", "Tᕮ᙭T"], 0], # equal
[[], [], 0],
[["hello", "world"], ["hello", "world", "!"], 1], # deletion
[["hello", "world"], ["world"], 1],
[["hello", "world"], [], 2],
[["Tᕮ᙭T", ], ["world"], 1], # substitution
[["Tᕮ᙭T", "XD"], ["world", "hello"], 2],
[["", "XD"], ["world", ""], 2],
["aba", " ", 3],
[["hello", "world"], ["world", "hello", "!"], 2], # mix deletion and substitution
[["Tᕮ᙭T", "world", "LOL", "XD"], ["world", "hello", "ʕ•́ᴥ•̀ʔっ"], 3],
]
)
def test_simple_case_edit_distance(self, seq1, seq2, distance):
assert F.edit_distance(seq1, seq2) == distance
assert F.edit_distance(seq2, seq1) == distance
@nested_params(
[-4, -2, 0, 2, 4],
)
def test_pitch_shift_shape(self, n_steps):
sample_rate = 16000
torch.random.manual_seed(42)
waveform = torch.rand(2, 44100 * 1, dtype=self.dtype, device=self.device)
waveform_shift = F.pitch_shift(waveform, sample_rate, n_steps)
assert waveform.size() == waveform_shift.size()
def test_rnnt_loss_basic_backward(self):
logits, targets, logit_lengths, target_lengths = rnnt_utils.get_basic_data(self.device)
loss = F.rnnt_loss(logits, targets, logit_lengths, target_lengths)
loss.backward()
def test_rnnt_loss_basic_forward_no_grad(self):
"""In early stage, calls to `rnnt_loss` resulted in segmentation fault when
`logits` have `requires_grad = False`. This test makes sure that this no longer
occurs and the functional call runs without error.
See https://github.com/pytorch/audio/pull/1707
"""
logits, targets, logit_lengths, target_lengths = rnnt_utils.get_basic_data(self.device)
logits.requires_grad_(False)
F.rnnt_loss(logits, targets, logit_lengths, target_lengths)
@parameterized.expand([
(rnnt_utils.get_B1_T2_U3_D5_data, torch.float32, 1e-6, 1e-2),
(rnnt_utils.get_B2_T4_U3_D3_data, torch.float32, 1e-6, 1e-2),
(rnnt_utils.get_B1_T2_U3_D5_data, torch.float16, 1e-3, 1e-2),
(rnnt_utils.get_B2_T4_U3_D3_data, torch.float16, 1e-3, 1e-2),
])
def test_rnnt_loss_costs_and_gradients(self, data_func, dtype, atol, rtol):
data, ref_costs, ref_gradients = data_func(
dtype=dtype,
device=self.device,
)
self._test_costs_and_gradients(
data=data,
ref_costs=ref_costs,
ref_gradients=ref_gradients,
atol=atol,
rtol=rtol,
)
def test_rnnt_loss_costs_and_gradients_random_data_with_numpy_fp32(self):
seed = 777
for i in range(5):
data = rnnt_utils.get_random_data(dtype=torch.float32, device=self.device, seed=(seed + i))
ref_costs, ref_gradients = rnnt_utils.compute_with_numpy_transducer(data=data)
self._test_costs_and_gradients(
data=data, ref_costs=ref_costs, ref_gradients=ref_gradients
)
class FunctionalCPUOnly(TestBaseMixin):
def test_melscale_fbanks_no_warning_high_n_freq(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(288, 0, 8000, 128, 16000)
assert len(w) == 0
def test_melscale_fbanks_no_warning_low_n_mels(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(201, 0, 8000, 89, 16000)
assert len(w) == 0
def test_melscale_fbanks_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
F.melscale_fbanks(201, 0, 8000, 128, 16000)
assert len(w) == 1
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os
import re
import pytorch_sphinx_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.6'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinxcontrib.katex',
'sphinxcontrib.bibtex',
'sphinx_gallery.gen_gallery',
]
# katex options
#
#
katex_options = r'''
delimiters : [
{left: "$$", right: "$$", display: true},
{left: "\\(", right: "\\)", display: false},
{left: "\\[", right: "\\]", display: true}
]
'''
bibtex_bibfiles = ['refs.bib']
def _get_var(var, default=False):
if var not in os.environ:
return default
val = os.environ.get(var, '0')
trues = ['1', 'true', 'TRUE', 'on', 'ON', 'yes', 'YES']
falses = ['0', 'false', 'FALSE', 'off', 'OFF', 'no', 'NO']
if val in trues:
return True
if val not in falses:
print(
f' --- WARNING: Unexpected environment variable value `{var}={val}`. '
f'Expected one of {trues + falses}')
return False
def _get_pattern():
pattern = os.getenv('GALLERY_PATTERN')
# If BUILD_GALLERY is falsy -> no build
# If BUILD_GALLERY is truey -> build
# If BUILD_GALLERY is undefined
# If GALLERY_PATTERN is defined -> build
# If GALLERY_PATTERN is not defined -> not build
if not _get_var('BUILD_GALLERY', default=False if pattern is None else True):
if pattern is not None:
print(
' --- WARNING: "GALLERY_PATTERN" is provided, but "BUILD_GALLERY" value is falsy. '
'Sphinx galleries are not built. To build galleries, set `BUILD_GALLERY=1`.'
)
return {
'ignore_pattern': r'\.py',
}
ret = {'filename_pattern': 'tutorial.py'}
if os.getenv('GALLERY_PATTERN'):
# See https://github.com/pytorch/tutorials/blob/cbf2238df0e78d84c15bd94288966d2f4b2e83ae/conf.py#L75-L83
ret['ignore_pattern'] = r'/(?!' + re.escape(os.getenv('GALLERY_PATTERN')) + r')[^/]+$'
return ret
sphinx_gallery_conf = {
'examples_dirs': [
'../../examples/tutorials',
],
'gallery_dirs': [
'tutorials',
],
**_get_pattern(),
'backreferences_dir': 'gen_modules/backreferences',
'first_notebook_cell': None,
'doc_module': ('torchaudio',),
}
autosummary_generate = True
napoleon_use_ivar = True
napoleon_numpy_docstring = False
napoleon_google_docstring = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Torchaudio'
copyright = '2018, Torchaudio Contributors'
author = 'Torchaudio Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
version = 'main '
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = 'main'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['*/index.rst']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'pytorch_project': 'audio',
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
'navigation_with_keys': True,
'analytics_id': 'UA-117752657-2',
}
html_logo = '_static/img/pytorch-logo-dark.svg'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css'
]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TorchAudiodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pytorch.tex', 'Torchaudio Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Torchaudio', 'Torchaudio Documentation',
author, 'Torchaudio', 'Load audio files into pytorch tensors.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'torch': ('https://pytorch.org/docs/stable/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (list, str, tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
|
#!/usr/bin/env python3
"""
Create a data preprocess pipeline that can be run with libtorchaudio
"""
import os
import argparse
import torch
import torchaudio
class Pipeline(torch.nn.Module):
"""Example audio process pipeline.
This example load waveform from a file then apply effects and save it to a file.
"""
def __init__(self, rir_path: str):
super().__init__()
rir, sample_rate = torchaudio.load(rir_path)
self.register_buffer('rir', rir)
self.rir_sample_rate: int = sample_rate
def forward(self, input_path: str, output_path: str):
torchaudio.sox_effects.init_sox_effects()
# 1. load audio
waveform, sample_rate = torchaudio.load(input_path)
# 2. Add background noise
alpha = 0.01
waveform = alpha * torch.randn_like(waveform) + (1 - alpha) * waveform
# 3. Reample the RIR filter to much the audio sample rate
rir, _ = torchaudio.sox_effects.apply_effects_tensor(
self.rir, self.rir_sample_rate, effects=[["rate", str(sample_rate)]])
rir = rir / torch.norm(rir, p=2)
rir = torch.flip(rir, [1])
# 4. Apply RIR filter
waveform = torch.nn.functional.pad(waveform, (rir.shape[1] - 1, 0))
waveform = torch.nn.functional.conv1d(waveform[None, ...], rir[None, ...])[0]
# Save
torchaudio.save(output_path, waveform, sample_rate)
def _create_jit_pipeline(rir_path, output_path):
module = torch.jit.script(Pipeline(rir_path))
print("*" * 40)
print("* Pipeline code")
print("*" * 40)
print()
print(module.code)
print("*" * 40)
module.save(output_path)
def _get_path(*paths):
return os.path.join(os.path.dirname(__file__), *paths)
def _parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--rir-path",
default=_get_path("..", "data", "rir.wav"),
help="Audio dara for room impulse response."
)
parser.add_argument(
"--output-path",
default=_get_path("pipeline.zip"),
help="Output JIT file."
)
return parser.parse_args()
def _main():
args = _parse_args()
_create_jit_pipeline(args.rir_path, args.output_path)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python
"""Parse a directory contains VoxForge dataset.
Recursively search for "PROMPTS" file in the given directory and print out
`<ID>\\t<AUDIO_PATH>\\t<TRANSCRIPTION>`
example: python parse_voxforge.py voxforge/de/Helge-20150608-aku
de5-001\t/datasets/voxforge/de/guenter-20140214-afn/wav/de5-001.wav\tES SOLL ETWA FÜNFZIGTAUSEND VERSCHIEDENE SORTEN GEBEN
...
Dataset can be obtained from http://www.repository.voxforge1.org/downloads/de/Trunk/Audio/Main/16kHz_16bit/
""" # noqa: E501
import os
import argparse
from pathlib import Path
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'input_dir',
type=Path,
help='Directory where `*.trans.txt` files are searched.'
)
return parser.parse_args()
def _parse_prompts(path):
base_dir = path.parent.parent
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if not line:
continue
id_, transcript = line.split(' ', maxsplit=1)
if not transcript:
continue
transcript = transcript.upper()
filename = id_.split('/')[-1]
audio_path = base_dir / 'wav' / f'{filename}.wav'
if os.path.exists(audio_path):
yield id_, audio_path, transcript
def _parse_directory(root_dir: Path):
for prompt_file in root_dir.glob('**/PROMPTS'):
try:
yield from _parse_prompts(prompt_file)
except UnicodeDecodeError:
pass
def _main():
args = _parse_args()
for id_, path, transcription in _parse_directory(args.input_dir):
print(f'{id_}\t{path}\t{transcription}')
if __name__ == '__main__':
_main()
|
import torch
class Decoder(torch.nn.Module):
def __init__(self, labels):
super().__init__()
self.labels = labels
def forward(self, logits: torch.Tensor) -> str:
"""Given a sequence logits over labels, get the best path string
Args:
logits (Tensor): Logit tensors. Shape `[num_seq, num_label]`.
Returns:
str: The resulting transcript
"""
best_path = torch.argmax(logits, dim=-1) # [num_seq,]
best_path = torch.unique_consecutive(best_path, dim=-1)
hypothesis = ''
for i in best_path:
char = self.labels[i]
if char in ['<s>', '<pad>']:
continue
if char == '|':
char = ' '
hypothesis += char
return hypothesis
|
#!/usr/bin/evn python3
"""Build Speech Recognition pipeline based on fairseq's wav2vec2.0 and dump it to TorchScript file.
To use this script, you need `fairseq`.
"""
import os
import argparse
import logging
from typing import Tuple
import torch
from torch.utils.mobile_optimizer import optimize_for_mobile
import torchaudio
from torchaudio.models.wav2vec2.utils.import_fairseq import import_fairseq_model
import fairseq
from greedy_decoder import Decoder
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
_LG = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--model-file',
required=True,
help='Path to the input pretrained weight file.'
)
parser.add_argument(
'--dict-dir',
help=(
'Path to the directory in which `dict.ltr.txt` file is found. '
'Required only when the model is finetuned.'
)
)
parser.add_argument(
'--output-path',
help='Path to the directory, where the TorchScript-ed pipelines are saved.',
)
parser.add_argument(
'--test-file',
help='Path to a test audio file.',
)
parser.add_argument(
'--debug',
action='store_true',
help=(
'When enabled, individual components are separately tested '
'for the numerical compatibility and TorchScript compatibility.'
)
)
parser.add_argument(
'--quantize',
action='store_true',
help='Apply quantization to model.'
)
parser.add_argument(
'--optimize-for-mobile',
action='store_true',
help='Apply optmization for mobile.'
)
return parser.parse_args()
class Loader(torch.nn.Module):
def forward(self, audio_path: str) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
return waveform
class Encoder(torch.nn.Module):
def __init__(self, encoder: torch.nn.Module):
super().__init__()
self.encoder = encoder
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
result, _ = self.encoder(waveform)
return result[0]
def _get_decoder():
labels = [
"<s>",
"<pad>",
"</s>",
"<unk>",
"|",
"E",
"T",
"A",
"O",
"N",
"I",
"H",
"S",
"R",
"D",
"L",
"U",
"M",
"W",
"C",
"F",
"G",
"Y",
"P",
"B",
"V",
"K",
"'",
"X",
"J",
"Q",
"Z",
]
return Decoder(labels)
def _load_fairseq_model(input_file, data_dir=None):
overrides = {}
if data_dir:
overrides['data'] = data_dir
model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[input_file], arg_overrides=overrides
)
model = model[0]
return model
def _get_model(model_file, dict_dir):
original = _load_fairseq_model(model_file, dict_dir)
model = import_fairseq_model(original.w2v_encoder)
return model
def _main():
args = _parse_args()
_init_logging(args.debug)
loader = Loader()
model = _get_model(args.model_file, args.dict_dir).eval()
encoder = Encoder(model)
decoder = _get_decoder()
_LG.info(encoder)
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
# test
if args.test_file:
_LG.info('Testing with %s', args.test_file)
waveform = loader(args.test_file)
emission = encoder(waveform)
transcript = decoder(emission)
_LG.info(transcript)
torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
scripted = torch.jit.script(encoder)
if args.optimize_for_mobile:
scripted = optimize_for_mobile(scripted)
scripted.save(os.path.join(args.output_path, 'encoder.zip'))
def _init_logging(debug=False):
level = logging.DEBUG if debug else logging.INFO
format_ = (
'%(message)s' if not debug else
'%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
)
logging.basicConfig(level=level, format=format_)
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
"""Parse a directory contains Librispeech dataset.
Recursively search for "*.trans.txt" file in the given directory and print out
`<ID>\\t<AUDIO_PATH>\\t<TRANSCRIPTION>`
example: python parse_librispeech.py LibriSpeech/test-clean
1089-134691-0000\t/LibriSpeech/test-clean/1089/134691/1089-134691-0000.flac\tHE COULD WAIT NO LONGER
...
Dataset can be obtained from https://www.openslr.org/12
"""
import argparse
from pathlib import Path
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter,
)
parser.add_argument(
'input_dir',
type=Path,
help='Directory where `*.trans.txt` files are searched.'
)
return parser.parse_args()
def _parse_transcript(path):
with open(path) as trans_fileobj:
for line in trans_fileobj:
line = line.strip()
if line:
yield line.split(' ', maxsplit=1)
def _parse_directory(root_dir: Path):
for trans_file in root_dir.glob('**/*.trans.txt'):
trans_dir = trans_file.parent
for id_, transcription in _parse_transcript(trans_file):
audio_path = trans_dir / f'{id_}.flac'
yield id_, audio_path, transcription
def _main():
args = _parse_args()
for id_, path, transcription in _parse_directory(args.input_dir):
print(f'{id_}\t{path}\t{transcription}')
if __name__ == '__main__':
_main()
|
#!/usr/bin/env python3
import argparse
import logging
import os
from typing import Tuple
import torch
import torchaudio
from torchaudio.models.wav2vec2.utils.import_huggingface import import_huggingface_model
from greedy_decoder import Decoder
TORCH_VERSION: Tuple[int, ...] = tuple(int(x) for x in torch.__version__.split(".")[:2])
if TORCH_VERSION >= (1, 10):
import torch.ao.quantization as tq
else:
import torch.quantization as tq
_LG = logging.getLogger(__name__)
def _parse_args():
parser = argparse.ArgumentParser(
description=__doc__,
)
parser.add_argument(
'--model',
required=True,
help='Path to the input pretrained weight file.'
)
parser.add_argument(
'--output-path',
help='Path to the directory, where the Torchscript-ed pipelines are saved.',
)
parser.add_argument(
'--test-file',
help='Path to a test audio file.',
)
parser.add_argument(
'--quantize',
action='store_true',
help='Quantize the model.',
)
parser.add_argument(
'--debug',
action='store_true',
help=(
'When enabled, individual components are separately tested '
'for the numerical compatibility and TorchScript compatibility.'
)
)
return parser.parse_args()
class Loader(torch.nn.Module):
def forward(self, audio_path: str) -> torch.Tensor:
waveform, sample_rate = torchaudio.load(audio_path)
if sample_rate != 16000:
waveform = torchaudio.functional.resample(waveform, float(sample_rate), 16000.)
return waveform
class Encoder(torch.nn.Module):
def __init__(self, encoder: torch.nn.Module):
super().__init__()
self.encoder = encoder
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
result, _ = self.encoder(waveform)
return result[0]
def _get_model(model_id):
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
tokenizer = Wav2Vec2Processor.from_pretrained(model_id).tokenizer
labels = [k for k, v in sorted(tokenizer.get_vocab().items(), key=lambda kv: kv[1])]
original = Wav2Vec2ForCTC.from_pretrained(model_id)
model = import_huggingface_model(original)
return model.eval(), labels
def _get_decoder(labels):
return Decoder(labels)
def _main():
args = _parse_args()
_init_logging(args.debug)
_LG.info('Loading model: %s', args.model)
model, labels = _get_model(args.model)
_LG.info('Labels: %s', labels)
_LG.info('Building pipeline')
loader = Loader()
encoder = Encoder(model)
decoder = _get_decoder(labels)
_LG.info(encoder)
if args.quantize:
_LG.info('Quantizing the model')
model.encoder.transformer.pos_conv_embed.__prepare_scriptable__()
encoder = tq.quantize_dynamic(
encoder, qconfig_spec={torch.nn.Linear}, dtype=torch.qint8)
_LG.info(encoder)
# test
if args.test_file:
_LG.info('Testing with %s', args.test_file)
waveform = loader(args.test_file)
emission = encoder(waveform)
transcript = decoder(emission)
_LG.info(transcript)
torch.jit.script(loader).save(os.path.join(args.output_path, 'loader.zip'))
torch.jit.script(encoder).save(os.path.join(args.output_path, 'encoder.zip'))
torch.jit.script(decoder).save(os.path.join(args.output_path, 'decoder.zip'))
def _init_logging(debug=False):
level = logging.DEBUG if debug else logging.INFO
format_ = (
'%(message)s' if not debug else
'%(asctime)s: %(levelname)7s: %(funcName)10s: %(message)s'
)
logging.basicConfig(level=level, format=format_)
if __name__ == '__main__':
_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.