python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.tts.parts.utils.helpers import regulate_len, sort_tensor, unsort_tensor
def sample_duration_input(max_length=64, group_size=2, batch_size=3):
generator = torch.Generator()
generator.manual_seed(0)
lengths = torch.randint(max_length // 4, max_length - 7, (batch_size,), generator=generator)
durs = torch.ones(batch_size, max_length) * group_size
durs[0, lengths[0]] += 1
durs[2, lengths[2]] -= 1
enc = torch.randint(16, 64, (batch_size, max_length, 17))
return durs, enc, lengths
@pytest.mark.unit
def test_sort_unsort():
durs_in, enc_in, dur_lens = sample_duration_input(batch_size=13)
print("In: ", enc_in)
sorted_enc, sorted_len, sorted_ids = sort_tensor(enc_in, dur_lens)
unsorted_enc = unsort_tensor(sorted_enc, sorted_ids)
print("Out: ", unsorted_enc)
assert torch.all(unsorted_enc == enc_in)
@pytest.mark.unit
def test_regulate_len():
group_size = 2
durs_in, enc_in, dur_lens = sample_duration_input(group_size=group_size)
enc_out, lens_out = regulate_len(durs_in, enc_in, group_size=group_size, dur_lens=dur_lens)
# make sure lens_out are rounded
sum_diff = lens_out - torch.mul(lens_out // group_size, group_size)
assert sum_diff.sum(dim=0) == 0
# make sure all round-ups are <= group_size
diff = lens_out - durs_in.sum(dim=1)
assert torch.max(diff) < group_size
| NeMo-main | tests/collections/tts/test_helpers.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.tts.models import WaveGlowModel
from nemo.core.classes import typecheck
mcfg = DictConfig(
{
"_target_": "nemo.collections.tts.modules.waveglow.WaveGlowModule",
"n_flows": 12,
"n_group": 8,
"n_mel_channels": 80,
"n_early_every": 4,
"n_early_size": 2,
"n_wn_channels": 512,
"n_wn_layers": 8,
"wn_kernel_size": 3,
}
)
pcfg = DictConfig(
{
"_target_": "nemo.collections.asr.parts.preprocessing.features.FilterbankFeatures",
"dither": 0.0,
"nfilt": 80,
"stft_conv": False,
}
)
wcfg = DictConfig({"waveglow": mcfg, "sigma": 1.0, "preprocessor": pcfg,})
def input_example(sz):
mel = torch.randn(1, 1, 80, sz).cuda().half()
z = torch.randn(1, 8, sz * 256 // 8, 1).cuda().half()
return (
mel,
z,
)
def taco2wg(spec, z):
spec = spec.permute(0, 3, 2, 1).contiguous()
return spec.view(spec.size(0), spec.size(1), -1), z.view(z.size(0), z.size(1), -1)
# Wrapper method to convert Jasper's Taco2 output to WG input and call inference
def forward_wrapper(self, spec, z=None):
spec, z = taco2wg(spec, z)
audio = self.waveglow.norm_dist_to_audio(spec=spec, sigma=1.0, z=z)
return audio
class TestWaveGlow:
@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_export_to_onnx(self):
model = WaveGlowModel(wcfg)
model = model.cuda().half()
typecheck.set_typecheck_enabled(enabled=False)
with tempfile.TemporaryDirectory() as tmpdir, model.nemo_infer():
tmp_file_name = os.path.join(tmpdir, "waveglow.onnx")
n_mels = 80
# Test export.
inp = input_example(n_mels)
inp1 = taco2wg(*inp)
inp2 = inp1
res1 = model.waveglow(*inp1)
res2 = model.waveglow(*inp2)
assert torch.allclose(res1, res2, rtol=0.01, atol=0.1)
WaveGlowModel.forward_for_export = forward_wrapper
model.export(
tmp_file_name, input_example=inp, verbose=False, check_trace=False, do_constant_folding=True,
)
if __name__ == "__main__":
t = TestWaveGlow()
t.test_export_to_onnx()
| NeMo-main | tests/collections/tts/test_waveglow.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from pathlib import Path
import pytest
import torch
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import EnglishPhonemesTokenizer
from nemo.collections.tts.data.dataset import TTSDataset
from nemo.collections.tts.g2p.models.en_us_arpabet import EnglishG2p
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_base_dir
class TestTTSDataset:
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
def test_dataset(self, test_data_dir):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
sup_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/sup')
dataset = TTSDataset(
manifest_filepath=manifest_path,
sample_rate=22050,
sup_data_types=["pitch"],
sup_data_path=sup_path,
text_tokenizer=EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
space=' ',
apostrophe=True,
pad_with_space=True,
g2p=EnglishG2p(),
),
)
dataloader = torch.utils.data.DataLoader(dataset, 2, collate_fn=dataset._collate_fn)
data, _, _, _, _, _ = next(iter(dataloader))
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
def test_raise_exception_on_not_supported_sup_data_types(self, test_data_dir):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
sup_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/sup')
with pytest.raises(NotImplementedError):
dataset = TTSDataset(
manifest_filepath=manifest_path,
sample_rate=22050,
sup_data_types=["not_supported_sup_data_type"],
sup_data_path=sup_path,
text_tokenizer=EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
space=' ',
apostrophe=True,
pad_with_space=True,
g2p=EnglishG2p(),
),
)
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
def test_raise_exception_on_not_supported_window(self, test_data_dir):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
sup_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/sup')
with pytest.raises(NotImplementedError):
dataset = TTSDataset(
manifest_filepath=manifest_path,
sample_rate=22050,
sup_data_types=["pitch"],
sup_data_path=sup_path,
window="not_supported_window",
text_tokenizer=EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
space=' ',
apostrophe=True,
pad_with_space=True,
g2p=EnglishG2p(),
),
)
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
@pytest.mark.parametrize("sup_data_type", ["voiced_mask", "p_voiced"])
def test_raise_exception_on_missing_pitch_sup_data_type_if_use_voiced(self, test_data_dir, sup_data_type):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
sup_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/sup')
with pytest.raises(ValueError):
dataset = TTSDataset(
manifest_filepath=manifest_path,
sample_rate=22050,
sup_data_types=[sup_data_type],
sup_data_path=sup_path,
window="hann",
text_tokenizer=EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
space=' ',
apostrophe=True,
pad_with_space=True,
g2p=EnglishG2p(),
),
)
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
@pytest.mark.parametrize(
"sup_data_types, output_indices",
[
(["p_voiced", "pitch", "voiced_mask"], [-4, -3, -1]),
(["voiced_mask", "pitch"], [-3, -2]),
(["pitch", "p_voiced"], [-3, -1]),
(["pitch"], [-2]),
],
)
def test_save_voiced_items_if_pt_file_not_exist(self, test_data_dir, sup_data_types, output_indices, tmp_path):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
sup_path = tmp_path / "sup_data"
print(f"sup_path={sup_path}")
dataset = TTSDataset(
manifest_filepath=manifest_path,
sample_rate=22050,
sup_data_types=sup_data_types,
sup_data_path=sup_path,
text_tokenizer=EnglishPhonemesTokenizer(
punct=True,
stresses=True,
chars=True,
space=' ',
apostrophe=True,
pad_with_space=True,
g2p=EnglishG2p(),
),
)
# load manifest
audio_filepaths = []
with open(manifest_path, 'r', encoding="utf-8") as fjson:
for line in fjson:
audio_filepaths.append(json.loads(line)["audio_filepath"])
base_data_dir = get_base_dir(audio_filepaths)
dataloader = torch.utils.data.DataLoader(dataset=dataset, batch_size=1, collate_fn=dataset._collate_fn)
for batch, audio_filepath in zip(dataloader, audio_filepaths):
rel_audio_path = Path(audio_filepath).relative_to(base_data_dir).with_suffix("")
rel_audio_path_as_text_id = str(rel_audio_path).replace("/", "_")
for sup_data_type, output_index in zip(sup_data_types, output_indices):
sup_data = batch[output_index]
sup_data = sup_data.squeeze(0)
assert sup_data is not None
assert torch.equal(sup_data, torch.load(f"{sup_path}/{sup_data_type}/{rel_audio_path_as_text_id}.pt"))
if sup_data_type == "pitch":
pitch_lengths = batch[output_index + 1]
pitch_lengths = pitch_lengths.squeeze(0)
assert pitch_lengths is not None
# test pitch, voiced_mask, and p_voiced do not have the same values.
if len(sup_data_types) == 3:
x = torch.load(f"{sup_path}/{sup_data_types[0]}/{rel_audio_path_as_text_id}.pt")
y = torch.load(f"{sup_path}/{sup_data_types[1]}/{rel_audio_path_as_text_id}.pt")
z = torch.load(f"{sup_path}/{sup_data_types[2]}/{rel_audio_path_as_text_id}.pt")
assert not torch.equal(x, y)
assert not torch.equal(x, z)
| NeMo-main | tests/collections/tts/test_torch_tts.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.tts.losses.audio_codec_loss import MaskedMAELoss, MaskedMSELoss
class TestAudioCodecLoss:
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_masked_loss_l1(self):
loss_fn = MaskedMAELoss()
target = torch.tensor([[[1.0], [2.0], [0.0]], [[3.0], [0.0], [0.0]]]).transpose(1, 2)
predicted = torch.tensor([[[0.5], [1.0], [0.0]], [[4.5], [0.0], [0.0]]]).transpose(1, 2)
target_len = torch.tensor([2, 1])
loss = loss_fn(predicted=predicted, target=target, target_len=target_len)
assert loss == 1.125
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_masked_loss_l2(self):
loss_fn = MaskedMSELoss()
target = torch.tensor([[[1.0], [2.0], [4.0]], [[3.0], [0.0], [0.0]]]).transpose(1, 2)
predicted = torch.tensor([[[0.5], [1.0], [4.0]], [[4.5], [0.0], [0.0]]]).transpose(1, 2)
target_len = torch.tensor([3, 1])
loss = loss_fn(predicted=predicted, target=target, target_len=target_len)
assert loss == (4 / 3)
| NeMo-main | tests/collections/tts/losses/test_audio_codec_loss.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unicodedata
import pytest
from nemo.collections.tts.g2p.models.i18n_ipa import IpaG2p
from nemo.collections.tts.g2p.utils import GRAPHEME_CASE_LOWER, GRAPHEME_CASE_MIXED, GRAPHEME_CASE_UPPER
class TestIpaG2p:
PHONEME_DICT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "phoneme_dict")
PHONEME_DICT_PATH_DE = os.path.join(PHONEME_DICT_DIR, "test_dict_de.txt")
PHONEME_DICT_PATH_EN = os.path.join(PHONEME_DICT_DIR, "test_dict_en.txt")
PHONEME_DICT_PATH_ES = os.path.join(PHONEME_DICT_DIR, "test_dict_es.txt")
GRAPHEME_PREFIX = "#"
@staticmethod
def _create_g2p(
phoneme_dict=PHONEME_DICT_PATH_EN,
locale=None,
apply_to_oov_word=lambda x: x,
use_chars=False,
phoneme_probability=None,
grapheme_case=GRAPHEME_CASE_UPPER,
grapheme_prefix="",
):
return IpaG2p(
phoneme_dict,
locale=locale,
apply_to_oov_word=apply_to_oov_word,
use_chars=use_chars,
phoneme_probability=phoneme_probability,
grapheme_case=grapheme_case,
grapheme_prefix=grapheme_prefix,
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_dict_with_phonemes(self):
# fmt: off
expected_symbols = {
'h', 'ə', 'ˈ', 'ɫ', 'o', 'ʊ',
'ˈ', 'w', 'ɝ', 'ɫ', 'd',
'ˈ', 'l', 'ɛ', 'd',
'ˈ', 'l', 'i', 'd',
'ɛ', 'n', 'ˈ', 'v', 'ɪ', 'd', 'i', 'ə',
'ˈ', 'd', 'ʒ', 'o', 'ʊ', 'n', 'z',
'ˈ', 'ɛ', 'ɹ', 'ˌ', 'p', 'ɔ', 'ɹ', 't',
}
# fmt: on
g2p = self._create_g2p()
assert expected_symbols == g2p.symbols
assert len(g2p.phoneme_dict["HELLO"]) == 1
assert len(g2p.phoneme_dict["WORLD"]) == 1
assert len(g2p.phoneme_dict["LEAD"]) == 2
assert len(g2p.phoneme_dict["NVIDIA"]) == 1
assert len(g2p.phoneme_dict["JONES"]) == 1
assert len(g2p.phoneme_dict["AIRPORT"]) == 1
assert g2p.phoneme_dict["HELLO"][0] == list("həˈɫoʊ")
assert g2p.phoneme_dict["WORLD"][0] == list("ˈwɝɫd")
assert g2p.phoneme_dict["LEAD"] == [list("ˈlɛd"), list("ˈlid")]
assert g2p.phoneme_dict["NVIDIA"][0] == list("ɛnˈvɪdiə")
assert g2p.phoneme_dict["JONES"][0] == list("ˈdʒoʊnz")
assert g2p.phoneme_dict["AIRPORT"][0] == list("ˈɛɹˌpɔɹt")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_dict_with_graphemes_and_phonemes(self):
# fmt: off
expected_symbols = {
f"{self.GRAPHEME_PREFIX}{char}"
for char in {
'H', 'E', 'L', 'L', 'O',
'W', 'O', 'R', 'L', 'D',
'L', 'E', 'A', 'D',
'N', 'V', 'I', 'D', 'I', 'A',
'J', 'O', 'N', 'E', 'S',
'A', 'I', 'R', 'P', 'O', 'R', 'T',
}
}.union(
{
'h', 'ə', 'ˈ', 'ɫ', 'o', 'ʊ',
'ˈ', 'w', 'ɝ', 'ɫ', 'd',
'ˈ', 'l', 'ɛ', 'd',
'ˈ', 'l', 'i', 'd',
'ɛ', 'n', 'ˈ', 'v', 'ɪ', 'd', 'i', 'ə',
'ˈ', 'd', 'ʒ', 'o', 'ʊ', 'n', 'z',
'ˈ', 'ɛ', 'ɹ', 'ˌ', 'p', 'ɔ', 'ɹ', 't',
}
)
# fmt: on
g2p = self._create_g2p(use_chars=True, grapheme_prefix=self.GRAPHEME_PREFIX)
assert expected_symbols == g2p.symbols
assert len(g2p.phoneme_dict["HELLO"]) == 1
assert len(g2p.phoneme_dict["WORLD"]) == 1
assert len(g2p.phoneme_dict["LEAD"]) == 2
assert len(g2p.phoneme_dict["NVIDIA"]) == 1
assert len(g2p.phoneme_dict["JONES"]) == 1
assert len(g2p.phoneme_dict["AIRPORT"]) == 1
assert g2p.phoneme_dict["HELLO"][0] == list("həˈɫoʊ")
assert g2p.phoneme_dict["WORLD"][0] == list("ˈwɝɫd")
assert g2p.phoneme_dict["LEAD"] == [list("ˈlɛd"), list("ˈlid")]
assert g2p.phoneme_dict["NVIDIA"][0] == list("ɛnˈvɪdiə")
assert g2p.phoneme_dict["JONES"][0] == list("ˈdʒoʊnz")
assert g2p.phoneme_dict["AIRPORT"][0] == list("ˈɛɹˌpɔɹt")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_replace_symbols(self):
g2p = self._create_g2p(use_chars=True, grapheme_prefix=self.GRAPHEME_PREFIX)
# fmt: off
# Get full vocab without 'i' (phoneme) and 'J' (grapheme)
fixed_symbols = {
f"{self.GRAPHEME_PREFIX}{char}"
for char in {
'H', 'E', 'L', 'L', 'O',
'W', 'O', 'R', 'L', 'D',
'L', 'E', 'A', 'D',
'N', 'V', 'I', 'D', 'I', 'A',
'O', 'N', 'E', 'S',
'A', 'I', 'R', 'P', 'O', 'R', 'T',
}
}.union(
{
'h', 'ə', 'ˈ', 'ɫ', 'o', 'ʊ',
'ˈ', 'w', 'ɝ', 'ɫ', 'd',
'ˈ', 'l', 'ɛ', 'd',
'ˈ', 'l', 'd',
'ɛ', 'n', 'ˈ', 'v', 'ɪ', 'd', 'ə',
'ˈ', 'd', 'ʒ', 'o', 'ʊ', 'n', 'z',
'ˈ', 'ɛ', 'ɹ', 'ˌ', 'p', 'ɔ', 'ɹ', 't',
}
)
# fmt: on
assert len(g2p.phoneme_dict["LEAD"]) == 2
assert len(g2p.phoneme_dict["JONES"]) == 1
assert len(g2p.phoneme_dict["NVIDIA"]) == 1
# Test with keep_alternate set to True (default)
g2p.replace_symbols(symbols=fixed_symbols, keep_alternate=True)
# Check that the alternate pron of "LEAD" was kept
assert len(g2p.phoneme_dict["LEAD"]) == 1
assert g2p.phoneme_dict["LEAD"][0] == list("ˈlɛd")
# Check that filtering was done for unique entries, both grapheme and phoneme
assert "JONES" not in g2p.phoneme_dict
assert "NVIDIA" not in g2p.phoneme_dict
# Check that other words weren't affected
assert g2p.phoneme_dict["HELLO"][0] == list("həˈɫoʊ")
assert g2p.phoneme_dict["WORLD"][0] == list("ˈwɝɫd")
assert g2p.phoneme_dict["AIRPORT"][0] == list("ˈɛɹˌpɔɹt")
# Test with keep_alternate set to False
g2p = self._create_g2p(use_chars=True, grapheme_prefix=self.GRAPHEME_PREFIX)
g2p.replace_symbols(symbols=fixed_symbols, keep_alternate=False)
# Check that both "LEAD" entries were removed
assert "LEAD" not in g2p.phoneme_dict
# Other checks remain the same
assert "JONES" not in g2p.phoneme_dict
assert "NVIDIA" not in g2p.phoneme_dict
assert g2p.phoneme_dict["HELLO"][0] == list("həˈɫoʊ")
assert g2p.phoneme_dict["WORLD"][0] == list("ˈwɝɫd")
assert g2p.phoneme_dict["AIRPORT"][0] == list("ˈɛɹˌpɔɹt")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call(self):
input_text = "Hello world."
expected_output = [char for char in "həˈɫoʊ ˈwɝɫd."]
g2p = self._create_g2p()
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_file_or_object_dict_type(self):
input_text = "Hello world."
expected_output = [char for char in "həˈɫoʊ ˈwɝɫd."]
phoneme_dict = {"HELLO": ["həˈɫoʊ"], "WORLD": ["ˈwɝɫd"], "LEAD": ["ˈlɛd", "ˈlid"], "NVIDIA": ["ɛnˈvɪdiə"]}
g2p_file = self._create_g2p()
g2p_dict = self._create_g2p(phoneme_dict=phoneme_dict)
phonemes_file = g2p_file(input_text)
phonemes_dict = g2p_dict(input_text)
assert phonemes_dict == expected_output
assert phonemes_file == phonemes_dict
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_oov_word(self):
input_text = "Hello Kitty!"
expected_output = list("həˈɫoʊ") + [" "] + list("KITTY") + ["!"]
g2p = self._create_g2p()
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_oov_func(self):
input_text = "Hello Kitty!"
expected_output = list("həˈɫoʊ") + [" "] + list("test!")
g2p = self._create_g2p(apply_to_oov_word=lambda x: "test")
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_uppercase_grapheme_only(self):
input_text = "Hello world."
expected_output = [self.GRAPHEME_PREFIX + char if char not in " ." else char for char in input_text.upper()]
g2p = self._create_g2p(
use_chars=True,
phoneme_probability=0.0,
grapheme_case=GRAPHEME_CASE_UPPER,
grapheme_prefix=self.GRAPHEME_PREFIX,
)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_lowercase_grapheme_only(self):
input_text = "Hello world."
expected_output = [self.GRAPHEME_PREFIX + char if char not in " ." else char for char in input_text.lower()]
g2p = self._create_g2p(
use_chars=True,
phoneme_probability=0.0,
grapheme_case=GRAPHEME_CASE_LOWER,
grapheme_prefix=self.GRAPHEME_PREFIX,
)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_mixed_case_grapheme_only(self):
input_text = "Hello world."
expected_output = [self.GRAPHEME_PREFIX + char if char not in " ." else char for char in input_text]
g2p = self._create_g2p(
use_chars=True,
phoneme_probability=0.0,
grapheme_case=GRAPHEME_CASE_MIXED,
grapheme_prefix=self.GRAPHEME_PREFIX,
)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_uppercase_grapheme_and_get_phoneme_only(self):
input_text = "Hello world."
expected_output = ["h", "ə", "ˈ", "ɫ", "o", "ʊ", " ", "ˈ", "w", "ɝ", "ɫ", "d", "."]
g2p = self._create_g2p(use_chars=True, phoneme_probability=1.0, grapheme_case=GRAPHEME_CASE_UPPER)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_lowercase_grapheme_and_get_phoneme_only(self):
input_text = "Hello world."
expected_output = ["h", "ə", "ˈ", "ɫ", "o", "ʊ", " ", "ˈ", "w", "ɝ", "ɫ", "d", "."]
g2p = self._create_g2p(use_chars=True, phoneme_probability=1.0, grapheme_case=GRAPHEME_CASE_LOWER)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_mixed_case_grapheme_and_get_phoneme_only(self):
input_text = "Hello world."
expected_output = ["h", "ə", "ˈ", "ɫ", "o", "ʊ", " ", "ˈ", "w", "ɝ", "ɫ", "d", "."]
g2p = self._create_g2p(use_chars=True, phoneme_probability=1.0, grapheme_case=GRAPHEME_CASE_MIXED)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_with_escaped_characters(self):
input_text = "Hello |wo rld|."
expected_output = [
"h",
"ə",
"ˈ",
"ɫ",
"o",
"ʊ",
" ",
f"{self.GRAPHEME_PREFIX}wo",
f"{self.GRAPHEME_PREFIX}rld",
".",
]
g2p = self._create_g2p(grapheme_prefix=self.GRAPHEME_PREFIX)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_instantiate_unsupported_locale(self):
with pytest.raises(ValueError, match="Unsupported locale"):
self._create_g2p(locale="en-USA")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_de_de(self):
input_text = "Hallo „welt“" + " " + "Weg" + " " + unicodedata.normalize("NFD", "Abendröte!" + " " + "weg")
expected_output = (
list("hˈaloː „vˈɛlt“")
+ [" "]
+ list("vˈeːk")
+ [" "]
+ [f"{self.GRAPHEME_PREFIX}{char}" for char in unicodedata.normalize("NFC", "Abendröte")]
+ ["!"]
+ [" "]
+ list("vˈɛk")
)
g2p = self._create_g2p(
use_chars=True,
phoneme_dict=self.PHONEME_DICT_PATH_DE,
locale="de-DE",
grapheme_case=GRAPHEME_CASE_MIXED,
grapheme_prefix=self.GRAPHEME_PREFIX,
apply_to_oov_word=None,
)
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_en_us(self):
input_text = "Hello NVIDIA'S airport's Jones's airports worlds Kitty!"
g2p_upper = self._create_g2p(locale="en-US", grapheme_case=GRAPHEME_CASE_UPPER)
expected_output_upper = [char for char in "həˈɫoʊ ɛnˈvɪdiəz ˈɛɹˌpɔɹts ˈdʒoʊnzɪz ˈɛɹˌpɔɹts ˈwɝɫdz KITTY!"]
g2p_lower = self._create_g2p(
locale="en-US",
grapheme_case=GRAPHEME_CASE_LOWER,
grapheme_prefix=self.GRAPHEME_PREFIX,
apply_to_oov_word=None,
)
expected_output_lower = (
[char for char in "həˈɫoʊ ɛnˈvɪdiəz ˈɛɹˌpɔɹts ˈdʒoʊnzɪz ˈɛɹˌpɔɹts ˈwɝɫdz"]
+ [" "]
+ [f"{self.GRAPHEME_PREFIX}{char}" for char in "kitty"]
+ ["!"]
)
g2p_mixed = self._create_g2p(
locale="en-US",
grapheme_case=GRAPHEME_CASE_MIXED,
grapheme_prefix=self.GRAPHEME_PREFIX,
apply_to_oov_word=None,
)
expected_output_mixed = (
[char for char in "həˈɫoʊ ɛnˈvɪdiəz ˈɛɹˌpɔɹts ˈdʒoʊnzɪz ˈɛɹˌpɔɹts ˈwɝɫdz"]
+ [" "]
+ [f"{self.GRAPHEME_PREFIX}{char}" for char in "kitty"]
+ ["!"]
)
for g2p, expected_output in zip(
[g2p_upper, g2p_lower, g2p_mixed], [expected_output_upper, expected_output_lower, expected_output_mixed]
):
phonemes = g2p(input_text)
assert phonemes == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_forward_call_es_es(self):
input_text = "¿Hola mundo, amigo?"
expected_output = [char for char in "¿ˈola mˈundo, AMIGO?"]
g2p = self._create_g2p(phoneme_dict=self.PHONEME_DICT_PATH_ES, locale="es-ES")
phonemes = g2p(input_text)
assert phonemes == expected_output
| NeMo-main | tests/collections/tts/g2p/test_modules.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo.collections.tts.g2p.utils import get_heteronym_spans
class TestG2pDataUtils:
@staticmethod
def _create_expected_output(words):
return [([word], False) for word in words]
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_heteronym_spans(self):
supported_heteronyms = ["live", "read", "protest", "diffuse", "desert"]
sentences = [
"I live in California. I READ a book. Only people who have already gained something are willing to protest."
" He reads a book!",
"Yesterday, I read a book.",
"He read a book last night and pre-diffuse and LivE-post and pre-desert-post.",
"the soldier deserted the desert in desert.",
]
expected_start_end = [
[(2, 6), (24, 28), (98, 105)],
[(13, 17)],
[(3, 7), (34, 41), (46, 50), (64, 70)],
[(25, 31), (35, 41)],
]
expected_heteronyms = [
["live", "read", "protest"],
['read'],
['read', 'diffuse', 'live', 'desert'],
['desert', 'desert'],
]
out_start_end, out_heteronyms = get_heteronym_spans(sentences, supported_heteronyms)
assert out_start_end == expected_start_end, "start-end spans do not match"
assert out_heteronyms == expected_heteronyms, "heteronym spans do not match"
| NeMo-main | tests/collections/tts/g2p/data/test_g2p_data_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained AlignerModel NGC checkpoints and generating Mel-spectrograms.
The test duration breakdowns are shown below. In general, each test for a single model is ~24 seconds on an NVIDIA RTX A6000.
"""
import pytest
import torch
from nemo.collections.tts.models import AlignerModel
available_models = [model.pretrained_model_name for model in AlignerModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = AlignerModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, audio_text_pair_example_english):
model, _ = pretrained_model
audio, audio_len, text_raw = audio_text_pair_example_english
# Generate mel-spectrogram
spec, spec_len = model.preprocessor(input_signal=audio, length=audio_len)
# Process text
text_normalized = model.normalizer.normalize(text_raw, punct_post_process=True)
text_tokens = model.tokenizer(text_normalized)
text = torch.tensor(text_tokens, device=spec.device).unsqueeze(0).long()
text_len = torch.tensor(len(text_tokens), device=spec.device).unsqueeze(0).long()
# Run the Aligner
_, _ = model(spec=spec, spec_len=spec_len, text=text, text_len=text_len)
| NeMo-main | tests/collections/tts/models/test_aligner.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained UnivNet NGC checkpoints and converting Mel-spectrograms into
audios. In general, each test for a single model is ~2 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import UnivNetModel
available_models = [model.pretrained_model_name for model in UnivNetModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = UnivNetModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, mel_spec_example):
model, _ = pretrained_model
_ = model.convert_spectrogram_to_audio(spec=mel_spec_example)
| NeMo-main | tests/collections/tts/models/test_univNet.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained MixerTTS NGC checkpoints and generating Mel-spectrograms.
The test duration breakdowns are shown below. In general, each test for a single model is ~25 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import MixerTTSModel
available_models = [model.pretrained_model_name for model in MixerTTSModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = MixerTTSModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, language_specific_text_example):
model, language_id = pretrained_model
text = language_specific_text_example[language_id]
parsed_text = model.parse(text)
_ = model.generate_spectrogram(tokens=parsed_text, raw_texts=[text])
| NeMo-main | tests/collections/tts/models/test_mixerTTS.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained Tacotron2 NGC checkpoints and generating Mel-spectrograms.
The test duration breakdowns are shown below. In general, each test for a single model is ~28 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import Tacotron2Model
available_models = [model.pretrained_model_name for model in Tacotron2Model.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = Tacotron2Model.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, language_specific_text_example):
model, language_id = pretrained_model
text = language_specific_text_example[language_id]
parsed_text = model.parse(text)
_ = model.generate_spectrogram(tokens=parsed_text)
| NeMo-main | tests/collections/tts/models/test_tacotron2.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained VITS NGC checkpoints and generating Mel-spectrograms.
The test duration breakdowns are shown below. In general, each test for a single model is ~34 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import VitsModel
available_models = [model.pretrained_model_name for model in VitsModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = VitsModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, language_specific_text_example):
model, language_id = pretrained_model
text = language_specific_text_example[language_id]
parsed_text = model.parse(text)
_ = model.convert_text_to_waveform(tokens=parsed_text)
| NeMo-main | tests/collections/tts/models/test_vits.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained HiFiGAN NGC checkpoints and converting Mel-spectrograms into
audios. In general, each test for a single model is ~3.5 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import HifiGanModel
available_models = [model.pretrained_model_name for model in HifiGanModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = HifiGanModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, mel_spec_example):
model, _ = pretrained_model
_ = model.convert_spectrogram_to_audio(spec=mel_spec_example)
| NeMo-main | tests/collections/tts/models/test_hifigan.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained FastPitch NGC checkpoints and generating Mel-spectrograms.
The test duration breakdowns are shown below. In general, each test for a single model is ~25 seconds on an NVIDIA RTX A6000.
"""
import random
import pytest
import torch
from nemo.collections.tts.models import FastPitchModel
available_models = [model.pretrained_model_name for model in FastPitchModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = FastPitchModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, language_specific_text_example):
model, language_id = pretrained_model
text = language_specific_text_example[language_id]
parsed_text = model.parse(text)
# Multi-Speaker
speaker_id = None
reference_spec = None
reference_spec_lens = None
if hasattr(model.fastpitch, 'speaker_emb'):
speaker_id = 0
if hasattr(model.fastpitch, 'speaker_encoder'):
if hasattr(model.fastpitch.speaker_encoder, 'lookup_module'):
speaker_id = 0
if hasattr(model.fastpitch.speaker_encoder, 'gst_module'):
bs, lens, t_spec = parsed_text.shape[0], random.randint(50, 100), model.cfg.n_mel_channels
reference_spec = torch.rand(bs, lens, t_spec)
reference_spec_lens = torch.tensor([lens]).long().expand(bs)
_ = model.generate_spectrogram(
tokens=parsed_text, speaker=speaker_id, reference_spec=reference_spec, reference_spec_lens=reference_spec_lens
)
| NeMo-main | tests/collections/tts/models/test_fastpitch.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file implemented unit tests for loading all pretrained WaveGlow NGC checkpoints and converting Mel-spectrograms into
audios. In general, each test for a single model is ~4 seconds on an NVIDIA RTX A6000.
"""
import pytest
from nemo.collections.tts.models import WaveGlowModel
available_models = [model.pretrained_model_name for model in WaveGlowModel.list_available_models()]
@pytest.fixture(params=available_models, ids=available_models)
@pytest.mark.run_only_on('GPU')
def pretrained_model(request, get_language_id_from_pretrained_model_name):
model_name = request.param
language_id = get_language_id_from_pretrained_model_name(model_name)
model = WaveGlowModel.from_pretrained(model_name=model_name)
return model, language_id
@pytest.mark.nightly
@pytest.mark.run_only_on('GPU')
def test_inference(pretrained_model, mel_spec_example):
model, _ = pretrained_model
_ = model.convert_spectrogram_to_audio(spec=mel_spec_example)
| NeMo-main | tests/collections/tts/models/test_waveGlow.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import numpy as np
import pytest
import torch
from nemo.collections.tts.parts.utils.tts_dataset_utils import (
filter_dataset_by_duration,
get_abs_rel_paths,
get_audio_filepaths,
normalize_volume,
stack_tensors,
)
class TestTTSDatasetUtils:
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_abs_rel_paths_input_abs(self):
input_path = Path("/home/data/audio/test")
base_path = Path("/home/data")
abs_path, rel_path = get_abs_rel_paths(input_path=input_path, base_path=base_path)
assert abs_path == input_path
assert rel_path == Path("audio/test")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_abs_rel_paths_input_rel(self):
input_path = Path("audio/test")
base_path = Path("/home/data")
abs_path, rel_path = get_abs_rel_paths(input_path=input_path, base_path=base_path)
assert abs_path == Path("/home/data/audio/test")
assert rel_path == input_path
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_audio_paths(self):
audio_dir = Path("/home/audio")
audio_rel_path = Path("examples/example.wav")
manifest_entry = {"audio_filepath": str(audio_rel_path)}
abs_path, rel_path = get_audio_filepaths(manifest_entry=manifest_entry, audio_dir=audio_dir)
assert abs_path == Path("/home/audio/examples/example.wav")
assert rel_path == audio_rel_path
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume(self):
input_audio = np.array([0.0, 0.1, 0.3, 0.5])
expected_output = np.array([0.0, 0.18, 0.54, 0.9])
output_audio = normalize_volume(audio=input_audio, volume_level=0.9)
np.testing.assert_array_almost_equal(output_audio, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_negative_peak(self):
input_audio = np.array([0.0, 0.1, -0.3, -1.0, 0.5])
expected_output = np.array([0.0, 0.05, -0.15, -0.5, 0.25])
output_audio = normalize_volume(audio=input_audio, volume_level=0.5)
np.testing.assert_array_almost_equal(output_audio, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_zero(self):
input_audio = np.array([0.0, 0.1, 0.3, 0.5])
expected_output = np.array([0.0, 0.0, 0.0, 0.0])
output_audio = normalize_volume(audio=input_audio, volume_level=0.0)
np.testing.assert_array_almost_equal(output_audio, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_max(self):
input_audio = np.array([0.0, 0.1, 0.3, 0.5])
expected_output = np.array([0.0, 0.2, 0.6, 1.0])
output_audio = normalize_volume(audio=input_audio, volume_level=1.0)
np.testing.assert_array_almost_equal(output_audio, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_zeros(self):
input_audio = np.array([0.0, 0.0, 0.0])
output_audio = normalize_volume(audio=input_audio, volume_level=0.5)
np.testing.assert_array_almost_equal(output_audio, input_audio)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_empty(self):
input_audio = np.array([])
output_audio = normalize_volume(audio=input_audio, volume_level=1.0)
np.testing.assert_array_almost_equal(output_audio, input_audio)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_normalize_volume_out_of_range(self):
input_audio = np.array([0.0, 0.1, 0.3, 0.5])
with pytest.raises(ValueError, match="Volume must be in range"):
normalize_volume(audio=input_audio, volume_level=2.0)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_stack_tensors(self):
tensors = [torch.ones([2]), torch.ones([4]), torch.ones([3])]
max_lens = [6]
expected_output = torch.tensor(
[[1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 0, 0], [1, 1, 1, 0, 0, 0]], dtype=torch.float32
)
stacked_tensor = stack_tensors(tensors=tensors, max_lens=max_lens)
torch.testing.assert_close(stacked_tensor, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_stack_tensors_3d(self):
tensors = [torch.ones([2, 2]), torch.ones([1, 3])]
max_lens = [4, 2]
expected_output = torch.tensor(
[[[1, 1, 0, 0], [1, 1, 0, 0]], [[1, 1, 1, 0], [0, 0, 0, 0]]], dtype=torch.float32
)
stacked_tensor = stack_tensors(tensors=tensors, max_lens=max_lens)
torch.testing.assert_close(stacked_tensor, expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_filter_dataset_by_duration(self):
min_duration = 1.0
max_duration = 10.0
entries = [
{"duration": 0.5},
{"duration": 10.0},
{"duration": 20.0},
{"duration": 0.1},
{"duration": 100.0},
{"duration": 5.0},
]
filtered_entries, total_hours, filtered_hours = filter_dataset_by_duration(
entries=entries, min_duration=min_duration, max_duration=max_duration
)
assert len(filtered_entries) == 2
assert filtered_entries[0]["duration"] == 10.0
assert filtered_entries[1]["duration"] == 5.0
assert total_hours == (135.6 / 3600.0)
assert filtered_hours == (15.0 / 3600.0)
| NeMo-main | tests/collections/tts/parts/utils/test_tts_dataset_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import tempfile
from pathlib import Path
import numpy as np
import pytest
import soundfile as sf
import torch
from nemo.collections.tts.parts.preprocessing.features import (
EnergyFeaturizer,
MelSpectrogramFeaturizer,
PitchFeaturizer,
)
class TestTTSFeatures:
def setup_class(self):
self.audio_filename = "test.wav"
self.spec_dim = 80
self.hop_len = 100
self.audio_len = 10000
self.sample_rate = 20000
self.spec_len = 1 + (self.audio_len // self.hop_len)
self.manifest_entry = {"audio_filepath": self.audio_filename}
@contextlib.contextmanager
def _create_test_dir(self):
test_audio = np.random.uniform(size=[self.audio_len])
temp_dir = tempfile.TemporaryDirectory()
try:
test_dir = Path(temp_dir.name)
audio_path = test_dir / self.audio_filename
sf.write(audio_path, test_audio, self.sample_rate)
yield test_dir
finally:
temp_dir.cleanup()
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_compute_mel_spectrogram(self):
mel_featurizer = MelSpectrogramFeaturizer(
mel_dim=self.spec_dim, hop_length=self.hop_len, sample_rate=self.sample_rate
)
with self._create_test_dir() as test_dir:
spec = mel_featurizer.compute_mel_spec(manifest_entry=self.manifest_entry, audio_dir=test_dir)
assert len(spec.shape) == 2
assert spec.dtype == torch.float32
assert spec.shape[0] == self.spec_dim
assert spec.shape[1] == self.spec_len
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_save_and_load_mel_spectrogram(self):
mel_name = "mel_test"
mel_featurizer = MelSpectrogramFeaturizer(
feature_name=mel_name, mel_dim=self.spec_dim, hop_length=self.hop_len, sample_rate=self.sample_rate
)
with self._create_test_dir() as test_dir:
feature_dir = test_dir / "feature"
mel_featurizer.save(manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir)
mel_dict = mel_featurizer.load(
manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir
)
mel_spec = mel_dict[mel_name]
assert len(mel_spec.shape) == 2
assert mel_spec.dtype == torch.float32
assert mel_spec.shape[0] == self.spec_dim
assert mel_spec.shape[1] == self.spec_len
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_compute_pitch(self):
pitch_featurizer = PitchFeaturizer(hop_length=self.hop_len, sample_rate=self.sample_rate)
with self._create_test_dir() as test_dir:
pitch, voiced, voiced_prob = pitch_featurizer.compute_pitch(
manifest_entry=self.manifest_entry, audio_dir=test_dir
)
assert len(pitch.shape) == 1
assert pitch.shape[0] == self.spec_len
assert pitch.dtype == torch.float32
assert len(voiced.shape) == 1
assert voiced.shape[0] == self.spec_len
assert voiced.dtype == torch.bool
assert len(voiced_prob.shape) == 1
assert voiced_prob.shape[0] == self.spec_len
assert voiced_prob.dtype == torch.float32
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_save_and_load_pitch(self):
pitch_name = "pitch_test"
voiced_mask_name = "voiced_mask_test"
voiced_prob_name = "voiced_prob_test"
pitch_featurizer = PitchFeaturizer(
pitch_name=pitch_name,
voiced_mask_name=voiced_mask_name,
voiced_prob_name=voiced_prob_name,
hop_length=self.hop_len,
sample_rate=self.sample_rate,
)
with self._create_test_dir() as test_dir:
feature_dir = test_dir / "feature"
pitch_featurizer.save(manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir)
pitch_dict = pitch_featurizer.load(
manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir
)
pitch = pitch_dict[pitch_name]
voiced_mask = pitch_dict[voiced_mask_name]
voiced_prob = pitch_dict[voiced_prob_name]
assert len(pitch.shape) == 1
assert pitch.shape[0] == self.spec_len
assert pitch.dtype == torch.float32
assert len(voiced_mask.shape) == 1
assert voiced_mask.shape[0] == self.spec_len
assert voiced_mask.dtype == torch.bool
assert len(voiced_prob.shape) == 1
assert voiced_prob.shape[0] == self.spec_len
assert voiced_prob.dtype == torch.float32
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_compute_energy(self):
mel_featurizer = MelSpectrogramFeaturizer(
mel_dim=self.spec_dim, hop_length=self.hop_len, sample_rate=self.sample_rate
)
energy_featurizer = EnergyFeaturizer(spec_featurizer=mel_featurizer)
with self._create_test_dir() as test_dir:
energy = energy_featurizer.compute_energy(manifest_entry=self.manifest_entry, audio_dir=test_dir)
assert len(energy.shape) == 1
assert energy.shape[0] == self.spec_len
assert energy.dtype == torch.float32
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_save_and_load_energy(self):
energy_name = "energy_test"
mel_featurizer = MelSpectrogramFeaturizer(
mel_dim=self.spec_dim, hop_length=self.hop_len, sample_rate=self.sample_rate
)
energy_featurizer = EnergyFeaturizer(feature_name=energy_name, spec_featurizer=mel_featurizer)
with self._create_test_dir() as test_dir:
feature_dir = test_dir / "feature"
energy_featurizer.save(manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir)
energy_dict = energy_featurizer.load(
manifest_entry=self.manifest_entry, audio_dir=test_dir, feature_dir=feature_dir
)
energy = energy_dict[energy_name]
assert len(energy.shape) == 1
assert energy.shape[0] == self.spec_len
assert energy.dtype == torch.float32
| NeMo-main | tests/collections/tts/parts/preprocessing/test_features.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from nemo.collections.tts.parts.preprocessing.audio_trimming import (
get_start_and_end_of_speech_frames,
pad_sample_indices,
)
class TestAudioTrimming:
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_start_and_end_of_speech_frames_frames(self):
# First speech frame is index 2 (inclusive) and last one is index 8 (exclusive).
is_speech = np.array([True, False, True, True, False, True, True, True, False, True, False])
speech_frame_threshold = 2
start_frame, end_frame = get_start_and_end_of_speech_frames(
is_speech=is_speech, speech_frame_threshold=speech_frame_threshold
)
assert start_frame == 2
assert end_frame == 8
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_get_start_and_end_of_speech_frames_not_frames_found(self):
is_speech = np.array([False, True, True, False])
speech_frame_threshold = 3
start_frame, end_frame = get_start_and_end_of_speech_frames(
is_speech=is_speech, speech_frame_threshold=speech_frame_threshold, audio_id="test"
)
assert start_frame == 0
assert end_frame == 0
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_pad_sample_indices(self):
start_sample, end_sample = pad_sample_indices(
start_sample=1000, end_sample=2000, max_sample=5000, sample_rate=100, pad_seconds=3
)
assert start_sample == 700
assert end_sample == 2300
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_pad_sample_indices_boundaries(self):
start_sample, end_sample = pad_sample_indices(
start_sample=100, end_sample=1000, max_sample=1150, sample_rate=100, pad_seconds=2
)
assert start_sample == 0
assert end_sample == 1150
| NeMo-main | tests/collections/tts/parts/preprocessing/test_audio_trimming.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import tempfile
from pathlib import Path
import numpy as np
import pytest
import torch
from nemo.collections.tts.parts.preprocessing.feature_processors import (
FeatureScaler,
LogCompression,
MeanVarianceNormalization,
MeanVarianceSpeakerNormalization,
)
class TestTTSFeatureProcessors:
@contextlib.contextmanager
def _write_test_dict(self, test_dict, filename):
temp_dir = tempfile.TemporaryDirectory()
try:
test_dir = Path(temp_dir.name)
test_dict_filepath = test_dir / filename
with open(test_dict_filepath, 'w', encoding="utf-8") as stats_f:
json.dump(test_dict, stats_f, indent=4)
yield test_dict_filepath
finally:
temp_dir.cleanup()
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_feature_scalar(self):
field = "test_feat"
input_tensor = torch.tensor([-2.5, 0.0, 1.0], dtype=torch.float32)
expected_tensor = torch.tensor([0.0, 2.0, 2.8], dtype=torch.float32)
processor = FeatureScaler(field, add_value=2.5, div_value=1.25)
training_example = {field: input_tensor}
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_log_compression(self):
field = "test_feat"
input_tensor = torch.tensor([-0.5, 0.0, 2.0], dtype=torch.float32)
expected_tensor = torch.tensor([np.log(0.5), 0.0, np.log(3.0)], dtype=torch.float32)
processor = LogCompression(field)
training_example = {field: input_tensor}
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_log_compression_clamp(self):
field = "test_feat"
input_tensor = torch.tensor([0.1, 1.0, 2.0], dtype=torch.float32)
expected_tensor = torch.tensor([np.log(0.5), 0.0, np.log(2.0)], dtype=torch.float32)
processor = LogCompression(field, log_zero_guard_type="clamp", log_zero_guard_value=0.5)
training_example = {field: input_tensor}
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_mean_variance_normalization(self):
field = "test_feat"
filename = "stats.json"
stat_dict = {"default": {"test_feat_mean": 1.5, "test_feat_std": 0.5}}
input_tensor = torch.tensor([0.0, 1.5, 2.0], dtype=torch.float32)
expected_tensor = torch.tensor([-3.0, 0.0, 1.0], dtype=torch.float32)
training_example = {field: input_tensor}
with self._write_test_dict(stat_dict, filename=filename) as stat_dict_filepath:
processor = MeanVarianceNormalization(field, stats_path=stat_dict_filepath, mask_field=None)
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_mean_variance_normalization_masked(self):
field = "test_feat"
mask_field = "mask"
filename = "stats.json"
stat_dict = {"default": {"test_feat_mean": 1.0, "test_feat_std": 0.5}}
input_tensor = torch.tensor([2.0, 3.0, 4.0, 5.0], dtype=torch.float32)
input_mask = torch.tensor([True, False, False, True], dtype=torch.bool)
expected_tensor = torch.tensor([2.0, 0.0, 0.0, 8.0], dtype=torch.float32)
training_example = {field: input_tensor, mask_field: input_mask}
with self._write_test_dict(stat_dict, filename=filename) as stat_dict_filepath:
processor = MeanVarianceNormalization(field, stats_path=stat_dict_filepath, mask_field=mask_field)
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_mean_variance_speaker_normalization(self):
field = "pitch"
filename = "stats.json"
stat_dict = {
"default": {"pitch_mean": 1.5, "pitch_std": 0.5},
"speaker1": {"pitch_mean": 0.5, "pitch_std": 1.0},
"speaker2": {"pitch_mean": 0.0, "pitch_std": 2.0},
}
input_tensor = torch.tensor([0.0, 1.0], dtype=torch.float32)
training_example1 = {field: input_tensor, "speaker": "speaker1"}
training_example2 = {field: input_tensor, "speaker": "speaker2"}
training_example3 = {field: input_tensor, "speaker": "unknown"}
expected_tensor1 = torch.tensor([-0.5, 0.5], dtype=torch.float32)
expected_tensor2 = torch.tensor([0.0, 0.5], dtype=torch.float32)
expected_tensor3 = torch.tensor([-3.0, -1.0], dtype=torch.float32)
with self._write_test_dict(stat_dict, filename=filename) as stat_dict_filepath:
processor = MeanVarianceSpeakerNormalization(
field, stats_path=stat_dict_filepath, mask_field=None, fallback_to_default=True
)
processor.process(training_example1)
processor.process(training_example2)
processor.process(training_example3)
output_tensor1 = training_example1[field]
output_tensor2 = training_example2[field]
output_tensor3 = training_example3[field]
torch.testing.assert_close(output_tensor1, expected_tensor1)
torch.testing.assert_close(output_tensor2, expected_tensor2)
torch.testing.assert_close(output_tensor3, expected_tensor3)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_mean_variance_speaker_normalization_masked(self):
field = "test_feat"
mask_field = "test_mask"
filename = "stats.json"
stat_dict = {"steve": {"test_feat_mean": -1.0, "test_feat_std": 2.0}}
input_tensor = torch.tensor([1.0, 2.0, 3.0, 4.0], dtype=torch.float32)
input_mask = torch.tensor([False, True, False, True], dtype=torch.bool)
expected_tensor = torch.tensor([0.0, 1.5, 0.0, 2.5], dtype=torch.float32)
training_example = {field: input_tensor, "speaker": "steve", mask_field: input_mask}
with self._write_test_dict(stat_dict, filename=filename) as stat_dict_filepath:
processor = MeanVarianceSpeakerNormalization(field, stats_path=stat_dict_filepath, mask_field=mask_field)
processor.process(training_example)
output_tensor = training_example[field]
torch.testing.assert_close(output_tensor, expected_tensor)
| NeMo-main | tests/collections/tts/parts/preprocessing/test_feature_processors.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.tts.modules.audio_codec_modules import (
Conv1dNorm,
ConvTranspose1dNorm,
get_down_sample_padding,
get_up_sample_padding,
)
class TestAudioCodecModules:
def setup_class(self):
self.in_channels = 8
self.out_channels = 16
self.batch_size = 2
self.len1 = 4
self.len2 = 8
self.max_len = 10
self.kernel_size = 3
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_conv1d(self):
inputs = torch.rand([self.batch_size, self.in_channels, self.max_len])
lengths = torch.tensor([self.len1, self.len2], dtype=torch.int32)
conv = Conv1dNorm(in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=self.kernel_size)
out = conv(inputs, lengths)
assert out.shape == (self.batch_size, self.out_channels, self.max_len)
assert torch.all(out[0, :, : self.len1] != 0.0)
assert torch.all(out[0, :, self.len1 :] == 0.0)
assert torch.all(out[1, :, : self.len2] != 0.0)
assert torch.all(out[1, :, self.len2 :] == 0.0)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_conv1d_downsample(self):
stride = 2
out_len = self.max_len // stride
out_len_1 = self.len1 // stride
out_len_2 = self.len2 // stride
inputs = torch.rand([self.batch_size, self.in_channels, self.max_len])
lengths = torch.tensor([out_len_1, out_len_2], dtype=torch.int32)
padding = get_down_sample_padding(kernel_size=self.kernel_size, stride=stride)
conv = Conv1dNorm(
in_channels=self.in_channels,
out_channels=self.out_channels,
kernel_size=self.kernel_size,
stride=stride,
padding=padding,
)
out = conv(inputs, lengths)
assert out.shape == (self.batch_size, self.out_channels, out_len)
assert torch.all(out[0, :, :out_len_1] != 0.0)
assert torch.all(out[0, :, out_len_1:] == 0.0)
assert torch.all(out[1, :, :out_len_2] != 0.0)
assert torch.all(out[1, :, out_len_2:] == 0.0)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_conv1d_transpose_upsample(self):
stride = 2
out_len = self.max_len * stride
out_len_1 = self.len1 * stride
out_len_2 = self.len2 * stride
inputs = torch.rand([self.batch_size, self.in_channels, self.max_len])
lengths = torch.tensor([out_len_1, out_len_2], dtype=torch.int32)
conv = ConvTranspose1dNorm(
in_channels=self.in_channels, out_channels=self.out_channels, kernel_size=self.kernel_size, stride=stride
)
out = conv(inputs, lengths)
assert out.shape == (self.batch_size, self.out_channels, out_len)
assert torch.all(out[0, :, :out_len_1] != 0.0)
assert torch.all(out[0, :, out_len_1:] == 0.0)
assert torch.all(out[1, :, :out_len_2] != 0.0)
assert torch.all(out[1, :, out_len_2:] == 0.0)
| NeMo-main | tests/collections/tts/modules/test_audio_codec_modules.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.tts.modules import submodules
@pytest.mark.unit
def test_conditional_layer_norm():
# NLP Example
batch, sentence_length, embedding_dim = 20, 5, 10
embedding = torch.randn(batch, sentence_length, embedding_dim)
ln = torch.nn.LayerNorm(embedding_dim)
cln = submodules.ConditionalLayerNorm(embedding_dim)
assert torch.all(ln(embedding) == cln(embedding))
weight = torch.nn.Parameter(torch.randn(embedding_dim))
bias = torch.nn.Parameter(torch.randn(embedding_dim))
ln.weight, ln.bias = weight, bias
cln.weight, cln.bias = weight, bias
assert torch.all(ln(embedding) == cln(embedding)) # Simulate trained weights
# Image Example
N, C, H, W = 20, 5, 10, 10
image = torch.randn(N, C, H, W)
ln = torch.nn.LayerNorm([C, H, W])
cln = submodules.ConditionalLayerNorm([C, H, W])
assert torch.all(ln(image) == cln(image))
weight = torch.nn.Parameter(torch.randn(C, H, W))
bias = torch.nn.Parameter(torch.randn(C, H, W))
ln.weight, ln.bias = weight, bias
cln.weight, cln.bias = weight, bias
assert torch.all(ln(image) == cln(image)) # Simulate trained weights
| NeMo-main | tests/collections/tts/modules/test_submodules.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pytest
from nemo.collections.nlp.data.language_modeling import text_memmap_dataset
@pytest.fixture
def jsonl_file(tmp_path):
# Create a temporary file path
file_path = tmp_path / "data.jsonl"
# Generate data to write to the JSONL file
data = [
{"name": "John", "age": 30},
{"name": "Jane", "age": 25},
{"name": "Bob", "age": 35},
]
# Write data to the JSONL file
with open(file_path, mode="w") as file:
for item in data:
json.dump(item, file)
file.write("\n")
# Provide the file path to the test function
yield str(file_path)
# Optional: Clean up the temporary file after the test
file_path.unlink()
@pytest.fixture
def csv_file(tmp_path):
# Create a temporary file path
file_path = tmp_path / "data.csv"
# Generate data to write to the CSV file
data = [["ID", "Name"], [1, "John"], [2, "Jane"], [3, "Bob"]]
# Write data to the CSV file
with open(file_path, mode="w", newline="") as file:
writer = csv.writer(file)
writer.writerows(data)
# Provide the file path to the test function
yield str(file_path)
# Optional: Clean up the temporary file after the test
file_path.unlink()
def test_jsonl_mem_map_dataset(jsonl_file):
"""Test for JSONL memory-mapped datasets."""
indexed_dataset = text_memmap_dataset.JSONLMemMapDataset(dataset_paths=[jsonl_file], header_lines=0)
assert indexed_dataset[0] == {"name": "John", "age": 30}
assert indexed_dataset[1] == {"name": "Jane", "age": 25}
assert indexed_dataset[2] == {"name": "Bob", "age": 35}
def test_csv_mem_map_dataset(csv_file):
"""Test for CSV memory-mapped datasets."""
indexed_dataset = text_memmap_dataset.CSVMemMapDataset(dataset_paths=[csv_file], data_col=1, header_lines=1)
assert indexed_dataset[0].strip() == "John"
assert indexed_dataset[1].strip() == "Jane"
assert indexed_dataset[2].strip() == "Bob"
def test_csv_fields_mem_map_dataset(csv_file):
"""Test for CSV memory-mapped datasets."""
indexed_dataset = text_memmap_dataset.CSVFieldsMemmapDataset(
dataset_paths=[csv_file], data_fields={"ID": 0, "Name": 1}, header_lines=1
)
assert isinstance(indexed_dataset[0], dict)
assert sorted(indexed_dataset[0].keys()) == ["ID", "Name"]
assert indexed_dataset[0]["ID"] == "1" and indexed_dataset[1]["ID"] == "2" and indexed_dataset[2]["ID"] == "3"
assert (
indexed_dataset[0]["Name"].strip() == "John"
and indexed_dataset[1]["Name"].strip() == "Jane"
and indexed_dataset[2]["Name"].strip() == "Bob"
)
@pytest.mark.parametrize(
"dataset_class", [text_memmap_dataset.JSONLMemMapDataset, text_memmap_dataset.CSVMemMapDataset],
)
@pytest.mark.parametrize("use_alternative_index_mapping_dir", [True, False])
@pytest.mark.parametrize("relative_index_fn", [True, False])
def test_mem_map_dataset_index_mapping_dir(
tmp_path, dataset_class, jsonl_file, use_alternative_index_mapping_dir, relative_index_fn,
):
"""Test for index_mapping_dir."""
if relative_index_fn:
jsonl_file = os.path.relpath(jsonl_file)
else:
jsonl_file = os.path.abspath(jsonl_file)
if use_alternative_index_mapping_dir:
index_mapping_dir = tmp_path / "subdir"
dataset_class(dataset_paths=[jsonl_file], header_lines=0, index_mapping_dir=str(index_mapping_dir))
# Index files should not be created in default location.
assert not os.path.isfile(f"{jsonl_file}.idx.npy")
assert not os.path.isfile(f"{jsonl_file}.idx.info")
if relative_index_fn:
# Remove leading ".." sequences.
while jsonl_file.startswith(("../")):
jsonl_file = jsonl_file.lstrip("../")
idx_fn = f"{str(index_mapping_dir)}/{jsonl_file}.idx"
assert os.path.isfile(f"{idx_fn}.npy")
assert os.path.isfile(f"{idx_fn}.info")
else:
text_memmap_dataset.JSONLMemMapDataset(dataset_paths=[jsonl_file], header_lines=0)
assert os.path.isfile(f"{jsonl_file}.idx.npy")
assert os.path.isfile(f"{jsonl_file}.idx.info")
| NeMo-main | tests/collections/nlp/test_mem_map_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
import torch
from megatron.core import ModelParallelConfig
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.modules.common.megatron.attention import CoreAttention
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.megatron.utils import build_attention_mask_3d
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
import flash_attn
HAVE_FA = True
except (ImportError, ModuleNotFoundError):
HAVE_FA = False
try:
import triton
HAVE_TRITON = True
except (ImportError, ModuleNotFoundError):
HAVE_TRITON = False
try:
import pynvml
HAVE_PYNVML = True
except (ImportError, ModuleNotFoundError):
HAVE_PYNVML = False
def HAVE_AMPERE_GPU():
if HAVE_PYNVML:
pynvml.nvmlInit()
handle = pynvml.nvmlDeviceGetHandleByIndex(0)
device_arch = pynvml.nvmlDeviceGetArchitecture(handle)
pynvml.nvmlShutdown()
return device_arch == pynvml.NVML_DEVICE_ARCH_AMPERE
else:
return False
@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_APEX, reason="apex is not installed")
class TestFlashAttention:
@classmethod
def setup_class(cls):
if not torch.cuda.is_available():
return
GPUS = 1
TP_SIZE = GPUS
PP_SIZE = 1
MB_SIZE = 4
GB_SIZE = 8
SEED = 1234
trainer = Trainer(strategy=NLPDDPStrategy(), devices=GPUS, accelerator='gpu', num_nodes=1, logger=None,)
initialize_model_parallel_for_nemo(
world_size=trainer.world_size,
global_rank=trainer.global_rank,
local_rank=trainer.local_rank,
tensor_model_parallel_size=TP_SIZE,
pipeline_model_parallel_size=PP_SIZE,
micro_batch_size=MB_SIZE,
global_batch_size=GB_SIZE,
seed=SEED,
apex_transformer_log_level=30,
)
@pytest.fixture()
def cfg(self):
cfg = {
'bz': random.randint(1, 7),
'sq': random.randint(2, 7),
'sk': random.randint(2, 7),
'head': random.randint(1, 7),
'layer_number': random.randint(1, 7),
'device': torch.cuda.current_device(),
}
# flash attention requires head dimensions are multiples of 8
head_dim = random.randint(1, 7) * 8
cfg['hidden'] = cfg['head'] * head_dim
return cfg
@pytest.fixture()
def model_parallel_config(self, cfg):
config = ModelParallelConfig()
return config
@pytest.mark.pleasefixme
@pytest.mark.skipif(not HAVE_FA, reason="flash-attention is not installed")
@pytest.mark.unit
def test_flash_self_attention(self, cfg, model_parallel_config):
device = cfg['device']
layer_number = cfg['layer_number']
bz, sl, np, h = cfg['bz'], cfg['sq'], cfg['head'], cfg['hidden']
hn = h // np
q = torch.rand(sl, bz, np, hn, device=device).half()
k = torch.rand(sl, bz, np, hn, device=device).half()
v = torch.rand(sl, bz, np, hn, device=device).half()
attention_mask_2d = torch.arange(sl, device=device).unsqueeze(0) < torch.randint(
1, sl, (bz,), device=device
).unsqueeze(1)
attention_mask_padding_3d = build_attention_mask_3d(
source_mask=attention_mask_2d, target_mask=attention_mask_2d, attn_mask_type=AttnMaskType.padding
).unsqueeze(1)
attention_mask_causal_3d = build_attention_mask_3d(
source_mask=attention_mask_2d, target_mask=attention_mask_2d, attn_mask_type=AttnMaskType.causal
).unsqueeze(1)
# Non-causal
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_padding_3d)
out_fa = attention_fa(q, k, v, attention_mask_padding_3d)
torch.testing.assert_close(out, out_fa)
out_fa = attention_fa(q, k, v, ~attention_mask_2d)
torch.testing.assert_close(out, out_fa)
# Causal
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.causal,
attention_dropout=0.0,
apply_query_key_layer_scaling=False,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.causal,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_causal_3d)
out_fa = attention_fa(q, k, v, attention_mask_causal_3d)
torch.testing.assert_close(out, out_fa)
out_fa = attention_fa(q, k, v, ~attention_mask_2d)
torch.testing.assert_close(out, out_fa)
@pytest.mark.pleasefixme
@pytest.mark.skipif(not HAVE_FA, reason="flash-attention is not installed")
@pytest.mark.unit
def test_flash_cross_attention(self, cfg, model_parallel_config):
device = cfg['device']
layer_number = cfg['layer_number']
bz, sq, sk, np, h = cfg['bz'], cfg['sq'], cfg['sk'], cfg['head'], cfg['hidden']
hn = h // np
q = torch.rand(sq, bz, np, hn, device=device).half()
k = torch.rand(sk, bz, np, hn, device=device).half()
v = torch.rand(sk, bz, np, hn, device=device).half()
attention_mask_2d_q = torch.arange(sq, device=device).unsqueeze(0) < torch.randint(
1, sq, (bz,), device=device
).unsqueeze(1)
attention_mask_2d_k = torch.arange(sk, device=device).unsqueeze(0) < torch.randint(
1, sk, (bz,), device=device
).unsqueeze(1)
attention_mask_padding_3d = build_attention_mask_3d(
source_mask=attention_mask_2d_q, target_mask=attention_mask_2d_k, attn_mask_type=AttnMaskType.padding
).unsqueeze(1)
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
apply_query_key_layer_scaling=False,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_padding_3d)
out_fa = attention_fa(q, k, v, attention_mask_padding_3d)
torch.testing.assert_close(out, out_fa)
@pytest.mark.skipif(not HAVE_FA, reason="flash-attention is not installed")
@pytest.mark.skipif(not HAVE_TRITON, reason="triton is not installed")
@pytest.mark.skipif(
not HAVE_AMPERE_GPU(),
reason="should only run on AMPERE GPU. Please see https://github.com/HazyResearch/flash-attention/issues/245",
)
@pytest.mark.unit
def test_flash_self_attention_triton(self, cfg, model_parallel_config):
device = cfg['device']
layer_number = cfg['layer_number']
bz, sl, np, h = cfg['bz'], cfg['sq'], cfg['head'], cfg['hidden']
hn = h // np
q = torch.rand(sl, bz, np, hn, device=device).half()
k = torch.rand(sl, bz, np, hn, device=device).half()
v = torch.rand(sl, bz, np, hn, device=device).half()
attention_mask_2d = torch.arange(sl, device=device).unsqueeze(0) < torch.randint(
1, sl, (bz,), device=device
).unsqueeze(1)
attention_mask_padding_3d = build_attention_mask_3d(
source_mask=attention_mask_2d, target_mask=attention_mask_2d, attn_mask_type=AttnMaskType.padding
).unsqueeze(1)
attention_mask_causal_3d = build_attention_mask_3d(
source_mask=attention_mask_2d, target_mask=attention_mask_2d, attn_mask_type=AttnMaskType.causal
).unsqueeze(1)
attention_bias = torch.rand(bz, np, sl, sl, device=device)
# Non-causal
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
apply_query_key_layer_scaling=False,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_padding_3d, relative_position_bias=attention_bias)
out_fa = attention_fa(q, k, v, attention_mask_padding_3d, relative_position_bias=attention_bias)
torch.testing.assert_close(out, out_fa, rtol=1e-3, atol=1e-3)
out_fa = attention_fa(q, k, v, ~attention_mask_2d, relative_position_bias=attention_bias)
torch.testing.assert_close(out, out_fa, rtol=1e-3, atol=1e-3)
# Causal
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.causal,
attention_dropout=0.0,
apply_query_key_layer_scaling=False,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.causal,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_causal_3d, relative_position_bias=attention_bias)
out_fa = attention_fa(q, k, v, attention_mask_causal_3d, relative_position_bias=attention_bias)
torch.testing.assert_close(out, out_fa, rtol=1e-3, atol=1e-3)
out_fa = attention_fa(q, k, v, ~attention_mask_2d, relative_position_bias=attention_bias)
torch.testing.assert_close(out, out_fa, rtol=1e-3, atol=1e-3)
@pytest.mark.skipif(not HAVE_FA, reason="flash-attention is not installed")
@pytest.mark.skipif(not HAVE_TRITON, reason="triton is not installed")
@pytest.mark.skipif(
not HAVE_AMPERE_GPU(),
reason="should only run on AMPERE GPU. Please see https://github.com/HazyResearch/flash-attention/issues/245",
)
@pytest.mark.unit
def test_flash_cross_attention_triton(self, cfg, model_parallel_config):
device = cfg['device']
layer_number = cfg['layer_number']
bz, sq, sk, np, h = cfg['bz'], cfg['sq'], cfg['sk'], cfg['head'], cfg['hidden']
hn = h // np
q = torch.rand(sq, bz, np, hn, device=device).half()
k = torch.rand(sk, bz, np, hn, device=device).half()
v = torch.rand(sk, bz, np, hn, device=device).half()
attention_mask_2d_q = torch.arange(sq, device=device).unsqueeze(0) < torch.randint(
1, sq, (bz,), device=device
).unsqueeze(1)
attention_mask_2d_k = torch.arange(sk, device=device).unsqueeze(0) < torch.randint(
1, sk, (bz,), device=device
).unsqueeze(1)
attention_mask_padding_3d = build_attention_mask_3d(
source_mask=attention_mask_2d_q, target_mask=attention_mask_2d_k, attn_mask_type=AttnMaskType.padding
).unsqueeze(1)
attention_bias = torch.rand(bz, np, sq, sk, device=device)
attention = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
apply_query_key_layer_scaling=False,
)
attention_fa = CoreAttention(
config=model_parallel_config,
layer_number=layer_number,
num_attention_heads=np,
hidden_size=h,
attn_mask_type=AttnMaskType.padding,
attention_dropout=0.0,
use_flash_attention=True,
)
out = attention(q, k, v, attention_mask_padding_3d, relative_position_bias=attention_bias)
out_fa = attention_fa(q, k, v, attention_mask_padding_3d, relative_position_bias=attention_bias)
torch.testing.assert_close(out, out_fa, rtol=1e-3, atol=1e-3)
| NeMo-main | tests/collections/nlp/test_flash_attention.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
try:
import apex.transformer.pipeline_parallel.utils
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
def reset_microbatch_calculator():
apex.transformer.pipeline_parallel.utils._GLOBAL_NUM_MICROBATCHES_CALCULATOR = None
@pytest.fixture()
def model_cfg(test_data_dir):
model_cfg = {
'precision': 16,
'micro_batch_size': 4,
'global_batch_size': 16,
'rampup_batch_size': [4, 4, 100],
'tensor_model_parallel_size': 1,
'pipeline_model_parallel_size': 1,
'resume_from_checkpoint': None,
'encoder_seq_length': 512,
'max_position_embeddings': 512,
'num_layers': 1,
'hidden_size': 128,
'ffn_hidden_size': 512,
'num_attention_heads': 2,
'init_method_std': 0.02,
'hidden_dropout': 0.1,
'kv_channels': None,
'apply_query_key_layer_scaling': True,
'layernorm_epsilon': 1e-5,
'make_vocab_size_divisible_by': 128,
'pre_process': True,
'post_process': True,
'persist_layer_norm': True,
'gradient_as_bucket_view': True,
'tokenizer': {
'library': 'megatron',
'type': 'GPT2BPETokenizer',
'model': None,
'vocab_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/vocab.json'),
'merge_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/merges.txt'),
'delimiter': None,
},
'native_amp_init_scale': 4294967296,
'native_amp_growth_interval': 1000,
'hysteresis': 2,
'fp32_residual_connection': False,
'fp16_lm_cross_entropy': False,
'megatron_amp_O2': False,
'seed': 1234,
'use_cpu_initialization': False,
'onnx_safe': False,
'apex_transformer_log_level': 30,
'activations_checkpoint_method': None,
'activations_checkpoint_num_layers': 1,
'data': {
'data_prefix': '???',
'index_mapping_dir': None,
'data_impl': 'mmap',
'splits_string': '900,50,50',
'seq_length': 512,
'skip_warmup': True,
'num_workers': 2,
'dataloader_type': 'single',
'reset_position_ids': False,
'reset_attention_mask': False,
'eod_mask_loss': False,
},
'optim': {
'name': 'fused_adam',
'lr': 2e-4,
'weight_decay': 0.01,
'betas': [0.9, 0.98],
'sched': {'name': 'CosineAnnealing', 'warmup_steps': 500, 'constant_steps': 50000, 'min_lr': '2e-5'},
},
}
return model_cfg
@pytest.fixture()
def trainer_cfg():
trainer_cfg = {
'devices': 1,
'num_nodes': 1,
'accelerator': 'gpu',
'precision': 16,
'logger': False,
'enable_checkpointing': False,
'use_distributed_sampler': False,
'max_epochs': 1,
'max_steps': 150,
'log_every_n_steps': 10,
'val_check_interval': 100,
'limit_val_batches': 50,
'limit_test_batches': 500,
'accumulate_grad_batches': 1,
'gradient_clip_val': 1.0,
}
return trainer_cfg
@pytest.fixture()
def gpt_model(model_cfg, trainer_cfg):
strategy = NLPDDPStrategy()
trainer = Trainer(strategy=strategy, **trainer_cfg)
cfg = DictConfig(model_cfg)
reset_microbatch_calculator()
model = MegatronGPTModel(cfg, trainer)
return model
@pytest.fixture()
def rampup_batch_size():
return [4, 4, 100]
@pytest.fixture()
def rampup_batch_size_schedule():
return [4, 8, 12, 16]
@pytest.mark.run_only_on('GPU')
class TestRampupBatchSize:
@pytest.mark.unit
def test_rampup_bs(self, gpt_model, rampup_batch_size):
assert gpt_model.cfg.rampup_batch_size == rampup_batch_size
@pytest.mark.unit
def test_rampup_bs_schedule(self, gpt_model, trainer_cfg, rampup_batch_size_schedule):
num_microbatch_calculator = apex.transformer.pipeline_parallel.utils._GLOBAL_NUM_MICROBATCHES_CALCULATOR
micro_batch_size = gpt_model.cfg.micro_batch_size
num_devices = trainer_cfg["devices"]
num_nodes = trainer_cfg["num_nodes"]
max_steps = trainer_cfg["max_steps"]
global_batch_size_schedule = []
step, consumed_samples = 0, 0
while step <= max_steps:
step += 1
current_global_batch_size = get_num_microbatches() * micro_batch_size * num_devices * num_nodes
consumed_samples += current_global_batch_size
num_microbatch_calculator.update(consumed_samples=consumed_samples, consistency_check=True)
if current_global_batch_size not in global_batch_size_schedule:
global_batch_size_schedule.append(current_global_batch_size)
reset_microbatch_calculator()
assert global_batch_size_schedule == rampup_batch_size_schedule
| NeMo-main | tests/collections/nlp/test_rampup_batch_size.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from omegaconf import DictConfig
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.megatron.utils import get_ltor_masks_and_position_ids
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
@pytest.fixture()
def model_cfg(test_data_dir):
model_cfg = {
'precision': 16,
'micro_batch_size': 4,
'global_batch_size': 8,
'tensor_model_parallel_size': 1,
'pipeline_model_parallel_size': 1,
'resume_from_checkpoint': None,
'encoder_seq_length': 512,
'max_position_embeddings': 512,
'num_layers': 1,
'hidden_size': 128,
'ffn_hidden_size': 512,
'num_attention_heads': 2,
'init_method_std': 0.02,
'hidden_dropout': 0.1,
'kv_channels': None,
'apply_query_key_layer_scaling': True,
'layernorm_epsilon': 1e-5,
'make_vocab_size_divisible_by': 128,
'pre_process': True,
'post_process': True,
'persist_layer_norm': True,
'gradient_as_bucket_view': True,
'tokenizer': {
'library': 'megatron',
'type': 'GPT2BPETokenizer',
'model': None,
'vocab_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/vocab.json'),
'merge_file': os.path.join(test_data_dir, 'nlp/gpt_vocab_merges/merges.txt'),
'delimiter': None,
},
'native_amp_init_scale': 4294967296,
'native_amp_growth_interval': 1000,
'hysteresis': 2,
'fp32_residual_connection': False,
'fp16_lm_cross_entropy': False,
'megatron_amp_O2': False,
'seed': 1234,
'use_cpu_initialization': False,
'onnx_safe': False,
'apex_transformer_log_level': 30,
'activations_checkpoint_method': None,
'activations_checkpoint_num_layers': 1,
'data': {
'data_prefix': '???',
'index_mapping_dir': None,
'data_impl': 'mmap',
'splits_string': '900,50,50',
'seq_length': 512,
'skip_warmup': True,
'num_workers': 2,
'dataloader_type': 'single',
'reset_position_ids': False,
'reset_attention_mask': False,
'eod_mask_loss': False,
},
'optim': {
'name': 'fused_adam',
'lr': 2e-4,
'weight_decay': 0.01,
'betas': [0.9, 0.98],
'sched': {'name': 'CosineAnnealing', 'warmup_steps': 500, 'constant_steps': 50000, 'min_lr': '2e-5'},
},
}
return model_cfg
@pytest.fixture()
def trainer_cfg():
trainer_cfg = {
'devices': 1,
'num_nodes': 1,
'accelerator': 'gpu',
'precision': 16,
'logger': False,
'enable_checkpointing': False,
'use_distributed_sampler': False,
'max_epochs': 1000,
'max_steps': 100000,
'log_every_n_steps': 10,
'val_check_interval': 100,
'limit_val_batches': 50,
'limit_test_batches': 500,
'accumulate_grad_batches': 1,
'gradient_clip_val': 1.0,
}
return trainer_cfg
@pytest.fixture()
def precision():
return 32
@pytest.fixture()
def gpt_model(model_cfg, trainer_cfg, precision):
model_cfg['precision'] = precision
trainer_cfg['precision'] = precision
strategy = NLPDDPStrategy()
trainer = Trainer(strategy=strategy, **trainer_cfg)
cfg = DictConfig(model_cfg)
model = MegatronGPTModel(cfg=cfg, trainer=trainer)
return model
@pytest.fixture()
def test_text():
test_text = [
"hello, world",
"four score and seven years ago",
"Your time is limited",
"If you set goals rediculously high",
]
return test_text
@pytest.mark.run_only_on('GPU')
class TestGPTModel:
@pytest.mark.unit
def test_constructor(self, gpt_model):
assert isinstance(gpt_model, MegatronGPTModel)
num_weights = gpt_model.num_weights
assert num_weights == 6702976
@pytest.mark.unit
def test_tokenizer(self, gpt_model, test_text):
assert isinstance(gpt_model.tokenizer, AutoTokenizer)
assert gpt_model.tokenizer.name == 'GPT2Tokenizer'
assert gpt_model.tokenizer.vocab_size == 50257
ids = [gpt_model.tokenizer.text_to_ids(text) for text in test_text]
true_ids = [
[31373, 11, 995],
[14337, 4776, 290, 3598, 812, 2084],
[7120, 640, 318, 3614],
[1532, 345, 900, 4661, 2266, 291, 18117, 1029],
]
assert sum([id_list == true_id_list for id_list, true_id_list in zip(ids, true_ids)]) == 4
@pytest.mark.parametrize(
"precision",
[
32,
16,
pytest.param(
"bf16",
marks=pytest.mark.skipif(
not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8,
reason='bfloat16 is not supported on this device',
),
),
],
)
@pytest.mark.unit
def test_forward(self, gpt_model, test_text):
dtype = gpt_model.torch_dtype
gpt_model.eval()
ids = [gpt_model.tokenizer.text_to_ids(text) for text in test_text]
id_tensors = [torch.unsqueeze(torch.LongTensor(id_list), dim=0) for id_list in ids]
masks_and_position_ids = [
get_ltor_masks_and_position_ids(id_tensor, gpt_model.tokenizer.eos_id, False, False, False)
for id_tensor in id_tensors
]
output_tensors = []
with torch.no_grad():
for tokens, attn_mask_and_pos_ids in zip(id_tensors, masks_and_position_ids):
attn_mask, _, pos_ids = attn_mask_and_pos_ids
assert tokens.shape == pos_ids.shape
assert attn_mask.shape[2] == attn_mask.shape[3] == tokens.shape[1] == pos_ids.shape[1]
with torch.autocast('cuda', dtype=dtype):
output_tensor = gpt_model.forward(
tokens=tokens.cuda(),
text_position_ids=pos_ids.cuda(),
attention_mask=attn_mask.cuda(),
labels=None,
)
# output is [b s h]
assert output_tensor.shape[0] == 1
assert output_tensor.shape[1] == tokens.shape[1]
assert output_tensor.shape[2] == gpt_model.padded_vocab_size
assert output_tensor.dtype == dtype
output_tensors.append(output_tensor)
| NeMo-main | tests/collections/nlp/test_gpt_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from sklearn.metrics import precision_recall_fscore_support
from nemo.collections.nlp.metrics.classification_report import ClassificationReport
class ClassificationReportTests:
num_classes = 3
label_ids = {'a': 0, 'b': 1, 'c': 2}
@pytest.mark.unit
def test_classification_report(self):
preds = torch.Tensor([0, 1, 1, 1, 2, 2, 0])
labels = torch.Tensor([1, 0, 0, 1, 2, 1, 0])
def __convert_to_tensor(sklearn_metric):
return torch.Tensor([round(sklearn_metric * 100)])[0]
for mode in ['macro', 'micro', 'weighted']:
classification_report_nemo = ClassificationReport(
num_classes=self.num_classes, label_ids=self.label_ids, mode=mode
)
# pytest.set_trace()
precision, recall, f1, _ = classification_report_nemo(preds, labels)
tp, fp, fn = classification_report_nemo.tp, classification_report_nemo.fp, classification_report_nemo.fn
pr_sklearn, recall_sklearn, f1_sklearn, _ = precision_recall_fscore_support(labels, preds, average=mode)
self.assertEqual(torch.round(precision), __convert_to_tensor(pr_sklearn), f'wrong precision for {mode}')
self.assertEqual(torch.round(recall), __convert_to_tensor(recall_sklearn), f'wrong recall for {mode}')
self.assertEqual(torch.round(f1), __convert_to_tensor(f1_sklearn), f'wrong f1 for {mode}')
| NeMo-main | tests/collections/nlp/test_classification_report.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.language_modeling.megatron_gpt_model import MegatronGPTModel
from nemo.collections.nlp.modules.common.transformer.text_generation import LengthParam, SamplingParam
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
class TestGPTEval:
@pytest.mark.run_only_on('GPU')
def setup_method(self, test_method):
trainer_config = {
"devices": 1,
"num_nodes": 1,
"accelerator": "gpu",
"logger": False,
"precision": 16,
}
tensor_model_parallel_size = 1
pipeline_model_parallel_size = 1
model_file = '/home/TestData/nlp/megatron_gpt/125M/megatron_gpt.nemo'
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **trainer_config)
assert (
trainer_config["devices"] * trainer_config['num_nodes']
== tensor_model_parallel_size * pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
model = MegatronGPTModel.restore_from(restore_path=model_file, trainer=trainer)
model.freeze()
# has to turn off activations_checkpoint_method for inference
try:
model.model.language_model.encoder.activations_checkpoint_method = None
except AttributeError:
pass
self.model = model
# @pytest.mark.skipif(not os.path.exists('/home/TestData/nlp'), reason='Not a Jenkins machine')
# skip this unit test for now. need to investigate the numerical issue
@pytest.mark.skipif(True, reason='skip')
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.skip()
# TODO renable the test
def test_gpt_eval(self):
# test greedy
length_params: LengthParam = {
"max_length": 30,
"min_length": 0,
}
sampling_params: SamplingParam = {
"use_greedy": True,
"temperature": 1.0,
"top_k": 0,
"top_p": 1.0,
"repetition_penalty": 1.0,
"add_BOS": True,
"all_probs": False,
"compute_logprob": False,
"end_strings": ["<|endoftext|>"],
}
# test logprob
sampling_params["compute_logprob"] = True
sentence = 'run gpt in inference mode'
response = self.model.generate(inputs=[sentence], length_params=length_params, sampling_params=sampling_params)
assert response["sentences"][0] == sentence
gt_token_ids = [5143, 308, 457, 287, 32278, 4235]
assert np.array_equal(np.array(response['token_ids'][0]), gt_token_ids)
assert len(response['full_logprob'][0]) == 5
gt_log_prob = [
-7.9579081535339355,
-7.195970058441162,
-5.269130706787109,
-12.75404167175293,
-4.631799697875977,
]
assert np.allclose(np.array(response['logprob'][0]), gt_log_prob, atol=1e-4)
gt_offsets = [0, 3, 5, 7, 10, 20]
assert np.array_equal(np.array(response['offsets'][0]), gt_offsets)
# # test top_p
sampling_params["compute_logprob"] = False
sampling_params["use_greedy"] = False
sampling_params["top_p"] = 0.8
sampling_params["repetition_penalty"] = 1.2
gt_token_ids = [
50256,
15,
59,
198,
59,
2,
16,
59,
2,
17,
58,
57,
59,
62,
37,
7,
39,
15437,
90,
92,
357,
2481,
8,
3467,
2,
18,
30109,
9,
43215,
13,
5416,
]
gt_text = '0\\\n\\#1\\#2[Z\\_F(H)]{} (21) \\#3[[*Phys. Rev'
response = self.model.generate(inputs=[''], length_params=length_params, sampling_params=sampling_params)
assert np.array_equal(np.array(response['token_ids'][0]), gt_token_ids)
assert response['sentences'][0] == gt_text
| NeMo-main | tests/collections/nlp/test_gpt_eval.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pytest
import torch
from nemo.collections.nlp.data.language_modeling.megatron.gpt_prompt_learning_dataset import GPTPromptLearningDataset
from nemo.collections.nlp.models.language_modeling.megatron_gpt_prompt_learning_model import get_pseudo_tokens
from nemo.collections.nlp.modules.common import VirtualPromptSource
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.core import Dataset
def get_prompt_tuning_dataset(
dataset_path, tokenizer, virtual_prompt_source, task_templates, pseudo_tokens,
):
dataset = GPTPromptLearningDataset(
data=[dataset_path],
tokenizer=tokenizer,
virtual_prompt_source=virtual_prompt_source,
task_templates=task_templates,
pseudo_tokens=pseudo_tokens,
pad_token_id=tokenizer.unk_id,
max_seq_length=512,
min_seq_length=1,
)
return dataset
def create_temp_dataset():
example_dataset_a = [
{'taskname': 'task name A', 'text': 'Test sentence one, Answer: ', 'answer': 'test'} for i in range(24)
]
example_dataset_b = [
{'taskname': 'task name B', 'question': 'This is a question', 'answer': 'test'} for i in range(13)
]
example_dataset = example_dataset_a + example_dataset_b
temp_file_name = 'temp_dataset_file.jsonl'
with open(temp_file_name, 'w') as temp:
for example in example_dataset:
temp.write(json.dumps(example) + '\n')
return temp_file_name
def get_task_templates():
task_templates = {}
task_templates['task name A'] = {
"prompt_template": "<|VIRTUAL_PROMPT_0|>{text}{answer}",
"prompt_template_fields": ['text', 'answer'],
"total_virtual_tokens": 5,
"virtual_token_splits": [5],
"truncate_field": None,
"answer_only_loss": True,
"answer_field": "answer",
"task_id_num": 0,
}
task_templates['task name B'] = {
"prompt_template": "<|VIRTUAL_PROMPT_0|>{question}<|VIRTUAL_PROMPT_1|>{answer}{extra}",
"prompt_template_fields": ['question', 'answer', 'extra'],
"total_virtual_tokens": 10,
"virtual_token_splits": [7, 3],
"truncate_field": None,
"answer_only_loss": False,
"answer_field": None,
"task_id_num": 1,
}
return task_templates
class TestMegatronGPTPromptLearningDataset:
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_init_prompt_learning_dataset(self):
tokenizer = get_nmt_tokenizer(library='megatron', model_name='GPT2BPETokenizer')
task_templates = get_task_templates()
dataset_path = create_temp_dataset()
# Setup virtual token place holders
total_virtual_tokens = 10
pseudo_tokens = get_pseudo_tokens(total_virtual_tokens)
tokenizer.add_special_tokens({'additional_special_tokens': pseudo_tokens})
dataset = get_prompt_tuning_dataset(
dataset_path, tokenizer, VirtualPromptSource.PROMPT_ENCODER, task_templates, pseudo_tokens,
)
print(type(dataset))
assert isinstance(dataset, Dataset)
os.remove(dataset_path)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_prompt_learning_dataset_collate_fn_prompt_encoder(self):
tokenizer = get_nmt_tokenizer(library='megatron', model_name='GPT2BPETokenizer')
task_templates = get_task_templates()
dataset_path = create_temp_dataset()
# Setup virtual token place holders
total_virtual_tokens = 10
pseudo_tokens = get_pseudo_tokens(total_virtual_tokens)
tokenizer.add_special_tokens({'additional_special_tokens': pseudo_tokens})
dataset = get_prompt_tuning_dataset(
dataset_path, tokenizer, VirtualPromptSource.PROMPT_ENCODER, task_templates, pseudo_tokens,
)
batch = [dataset[i] for i in range(8)]
batch = dataset.collate_fn(batch)
assert len(batch) == 6
_, _, _, _, _, taskname_ids = batch
assert list(taskname_ids[0].numpy()) == tokenizer.text_to_ids("task name A")
os.remove(dataset_path)
if __name__ == "__main__":
t = TestMegatronGPTPromptLearningDataset()
t.test_init_prompt_learning_dataset()
t.test_prompt_learning_dataset_collate_fn_prompt_encoder()
print('-' * 50 + '\nALL PROMPT TUNING UNIT TESTS PASS!\n' + '-' * 50)
| NeMo-main | tests/collections/nlp/test_prompt_learning.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import pytest
import torch
from nemo.collections.nlp.data.question_answering.dataset.qa_dataset import QADataset
from nemo.collections.nlp.data.question_answering.dataset.qa_gpt_dataset import GPTQADataset
from nemo.collections.nlp.metrics.qa_metrics import QAMetrics
@pytest.mark.unit
def test_remove_articles():
sentences = [
"this is an apple",
"this is the apple",
"this is a fruit",
]
expected_article_removed_sents = ["this is apple", "this is apple", "this is fruit"]
article_removed_sents = [QAMetrics.remove_articles(sent) for sent in sentences]
assert article_removed_sents == expected_article_removed_sents
@pytest.mark.unit
def test_white_space_fix():
sentences = [
"sentence with a space",
"sentence with multiple spaces",
]
expected_white_space_fixed_sents = [
"sentence with a space",
"sentence with multiple spaces",
]
white_space_fixed_sents = [QAMetrics.white_space_fix(sent) for sent in sentences]
assert white_space_fixed_sents == expected_white_space_fixed_sents
@pytest.mark.unit
def test_remove_punc():
sentence = "this, is. a! sentence: with; punctuations?"
expected_punc_removed_sent = "this is a sentence with punctuations"
punc_removed_sent = QAMetrics.remove_punc(sentence)
assert punc_removed_sent == expected_punc_removed_sent
@pytest.mark.unit
def test_get_normalized_tokens():
sentence = 'I am happy'
tokens = ['i', 'am', 'happy']
assert tokens == QAMetrics._get_normalized_tokens(sentence)
sentence = 'I am a person'
tokens = ['i', 'am', 'person']
assert tokens == QAMetrics._get_normalized_tokens(sentence)
sentence = 'I am a person.'
tokens = ['i', 'am', 'person']
assert tokens == QAMetrics._get_normalized_tokens(sentence)
@pytest.mark.unit
def test_get_one_f1():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
f1 = QAMetrics.get_one_f1(generated_field, ground_truth_field)
assert f1 == 0.75
generated_field = ''
ground_truth_field = 'That'
f1 = QAMetrics.get_one_f1(generated_field, ground_truth_field)
assert f1 == 0
@pytest.mark.unit
def test_get_one_exact_match():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
em = QAMetrics.get_one_exact_match(generated_field, ground_truth_field)
assert em == 0
generated_field = 'That is so good!'
ground_truth_field = 'That is so good.'
em = QAMetrics.get_one_exact_match(generated_field, ground_truth_field)
assert em == 1
generated_field = 'That is so good'
ground_truth_field = 'that is so good'
em = QAMetrics.get_one_exact_match(generated_field, ground_truth_field)
assert em == 1
@pytest.mark.unit
def test_split_into_words():
text = 'hi yo'
char_to_word_offset = [0, 0, 0, 1, 1]
doc_tokens = ["hi", "yo"]
output = QADataset.split_into_words(text)
assert output[0] == doc_tokens
assert output[1] == char_to_word_offset
text = 'i am good'
char_to_word_offset = [0, 0, 1, 1, 1, 2, 2, 2, 2]
doc_tokens = ["i", "am", 'good']
output = QADataset.split_into_words(text)
assert output[0] == doc_tokens
assert output[1] == char_to_word_offset
@pytest.mark.unit
def test_get_doc_spans():
all_doc_tokens = ['a'] * 15
max_tokens_for_doc = 10
doc_stride = 5
doc_spans = QADataset.get_docspans(all_doc_tokens, max_tokens_for_doc, doc_stride)
assert len(doc_spans) == 2
assert doc_spans[0].start == 0
assert doc_spans[0].length == 10
assert doc_spans[1].start == 5
assert doc_spans[1].length == 10
@pytest.mark.unit
def test_get_average_dist_to_tok_start_and_end():
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_span = _DocSpan(start=0, length=5)
tok_start_position = 1
tok_end_position = 3
assert 2 == QADataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
doc_span = _DocSpan(start=5, length=5)
tok_start_position = 1
tok_end_position = 2
assert 6 == QADataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
doc_span = _DocSpan(start=5, length=4)
tok_start_position = 1
tok_end_position = 2
assert 5 == QADataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
@pytest.mark.unit
def test_keep_relevant_docspans():
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'all'
assert doc_spans == QADataset.keep_relevant_docspans(doc_spans, tok_start_position, tok_end_position, mode)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = -1
tok_end_position = -1
mode = 'only_positive'
expected_doc_spans = []
assert expected_doc_spans == QADataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'only_positive'
expected_doc_spans = [_DocSpan(start=0, length=5), _DocSpan(start=1, length=5)]
assert expected_doc_spans == QADataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'limited_negative'
expected_doc_spans = [_DocSpan(start=start, length=5) for start in range(10)]
assert expected_doc_spans == QADataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
@pytest.mark.unit
def test_gpt_no_pad_loss_masking():
input_ids = [1] * 15 + [50257] * 15
input_ids = torch.tensor(input_ids)
input_attn_mask = [1] * 16 + [0] * 14
input_attn_mask = torch.Tensor(input_attn_mask)
training_mask_end = 10
expected_labels = [-100] * 10 + [1] * 5 + [50257] + [-100] * 14
expected_labels = torch.tensor(expected_labels)
labels = GPTQADataset.update_labels_for_no_pad_loss(input_ids, training_mask_end, input_attn_mask)
assert torch.all(labels.eq(expected_labels))
| NeMo-main | tests/collections/nlp/test_qna.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import pytest
from nemo.collections.common.tokenizers.regex_tokenizer import RegExTokenizer
DEFAULT_REGEX = (
r"""\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9]"""
)
class TestRegexTokenizer:
def create_test_data(self):
data_file = tempfile.NamedTemporaryFile(mode='w+', delete=False)
data_file.writelines(
"""zinc_id,smiles
ZINC000510438538,FC(F)Oc1ccc([C@H](NCc2cnc3ccccn23)C(F)(F)F)cc1
"""
)
data_file_path = str(data_file.name)
data_file.close()
return data_file_path
@pytest.mark.unit
def test_create_vocab(self):
data_file_path = self.create_test_data()
tokenizer = RegExTokenizer(regex=DEFAULT_REGEX)
tokenizer.build_vocab_from_csv(data_csv_file=data_file_path)
assert len(tokenizer.vocab) == 18
@pytest.mark.unit
def test_text_2_tokens(self):
tokenizer = RegExTokenizer(regex=DEFAULT_REGEX)
tokens = tokenizer.text_to_tokens("Zc")
assert ''.join(tokens) == 'Zc'
@pytest.mark.unit
def test_text_2_ids(self):
data_file_path = self.create_test_data()
tokenizer = RegExTokenizer(regex=DEFAULT_REGEX)
tokenizer.build_vocab_from_csv(data_csv_file=data_file_path)
ids = tokenizer.text_to_ids("Zc")
assert ','.join(list(map(lambda x: str(x), ids))) == '1,11'
@pytest.mark.unit
def test_tokens_2_text(self):
tokenizer = RegExTokenizer(regex=DEFAULT_REGEX)
tokens = tokenizer.tokens_to_text(['^', 'Z', 'c', '&'])
assert ''.join(tokens) == 'Zc'
| NeMo-main | tests/collections/nlp/test_regex_tokenizer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import onnx
import pytest
import pytorch_lightning as pl
import torch
import wget
from omegaconf import DictConfig, OmegaConf
from nemo.collections import nlp as nemo_nlp
from nemo.collections.nlp.models import IntentSlotClassificationModel
from nemo.collections.nlp.modules.common import (
SequenceClassifier,
SequenceRegression,
SequenceTokenClassifier,
TokenClassifier,
)
def classifier_export(obj):
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, obj.__class__.__name__ + '.onnx')
obj = obj.cuda()
obj.export(output=filename)
class TestExportableClassifiers:
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_token_classifier_export_to_onnx(self):
for num_layers in [1, 2, 4]:
classifier_export(TokenClassifier(hidden_size=256, num_layers=num_layers, num_classes=16))
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_bert_pretraining_export_to_onnx(self):
for num_layers in [1, 2, 4]:
classifier_export(TokenClassifier(hidden_size=256, num_layers=num_layers, num_classes=16))
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_sequence_token_classifier_export_to_onnx(self):
for num_layers in [1, 2, 4]:
classifier_export(
SequenceTokenClassifier(hidden_size=256, num_slots=8, num_intents=8, num_layers=num_layers)
)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_sequence_classifier_export_to_onnx(self):
for num_layers in [1, 2, 4]:
classifier_export(SequenceClassifier(hidden_size=256, num_classes=16, num_layers=num_layers))
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_sequence_regression_export_to_onnx(self):
for num_layers in [1, 2, 4]:
classifier_export(SequenceRegression(hidden_size=256, num_layers=num_layers))
def setup_method(self):
self.dict_config = DictConfig(
{
"trainer": {
"devices": 1,
"num_nodes": 1,
"max_epochs": 50,
"max_steps": -1,
"accumulate_grad_batches": 1,
"precision": 32,
"accelerator": "gpu",
"strategy": 'auto',
"log_every_n_steps": 1,
"val_check_interval": 1,
"enable_checkpointing": False,
"logger": False,
},
"model": {
"nemo_path": None,
"data_dir": "???",
"class_labels": {"intent_labels_file": "intent_labels.csv", "slot_labels_file": "slot_labels.csv"},
"class_balancing": None,
"intent_loss_weight": 0.6,
"pad_label": -1,
"ignore_extra_tokens": False,
"ignore_start_end": True,
"train_ds": {
"prefix": "train",
"batch_size": 32,
"shuffle": True,
"num_samples": -1,
"num_workers": 2,
"drop_last": False,
"pin_memory": False,
},
"validation_ds": {
"prefix": "test",
"batch_size": 32,
"shuffle": False,
"num_samples": -1,
"num_workers": 2,
"drop_last": False,
"pin_memory": False,
},
"test_ds": {
"prefix": "test",
"batch_size": 32,
"shuffle": False,
"num_samples": -1,
"num_workers": 2,
"drop_last": False,
"pin_memory": False,
},
"tokenizer": {
"tokenizer_name": "bert-base-uncased",
"vocab_file": None,
"tokenizer_model": None,
"special_tokens": None,
},
"language_model": {
"max_seq_length": 50,
"pretrained_model_name": "bert-base-uncased",
"lm_checkpoint": None,
"config_file": None,
"config": None,
},
"head": {"num_output_layers": 2, "fc_dropout": 0.1},
"optim": {
"name": "adam",
"lr": 0.00002,
"args": {"name": "auto", "params": {"weight_decay": 0.01}},
"sched": {
"name": "WarmupAnnealing",
"iters_per_batch": None,
"max_steps": -1,
"monitor": "val_loss",
"reduce_on_plateau": False,
"args": {
"name": "auto",
"params": {"warmup_steps": None, "warmup_ratio": 0.1, "last_epoch": -1},
},
},
},
},
"exp_manager": {
"exp_dir": None,
"name": "IntentSlot",
"create_tensorboard_logger": False,
"create_checkpoint_callback": False,
},
"hydra": {"run": {"dir": "."}, "job_logging": {"root": {"handlers": None}}},
}
)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_IntentSlotClassificationModel_export_to_onnx(self, dummy_data):
with tempfile.TemporaryDirectory() as tmpdir:
self.setup_method()
config = self.dict_config
config.model.data_dir = dummy_data
trainer = pl.Trainer(**config.trainer)
model = IntentSlotClassificationModel(config.model, trainer=trainer)
filename = os.path.join(tmpdir, 'isc.onnx')
model.export(output=filename, check_trace=True)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'input_ids'
assert onnx_model.graph.input[1].name == 'attention_mask'
assert onnx_model.graph.input[2].name == 'token_type_ids'
assert onnx_model.graph.output[0].name == 'intent_logits'
assert onnx_model.graph.output[1].name == 'slot_logits'
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_TokenClassificationModel_export_to_onnx(self):
model = nemo_nlp.models.TokenClassificationModel.from_pretrained(model_name="ner_en_bert")
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'ner.onnx')
model.export(output=filename, check_trace=True)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'input_ids'
assert onnx_model.graph.input[1].name == 'attention_mask'
assert onnx_model.graph.input[2].name == 'token_type_ids'
assert onnx_model.graph.output[0].name == 'logits'
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_PunctuationCapitalizationModel_export_to_onnx(self):
model = nemo_nlp.models.PunctuationCapitalizationModel.from_pretrained(model_name="punctuation_en_distilbert")
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'puncap.onnx')
model.export(output=filename, check_trace=True)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'input_ids'
assert onnx_model.graph.input[1].name == 'attention_mask'
assert onnx_model.graph.output[0].name == 'punct_logits'
assert onnx_model.graph.output[1].name == 'capit_logits'
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_QAModel_export_to_onnx(self):
model = nemo_nlp.models.QAModel.from_pretrained(model_name="qa_squadv2.0_bertbase")
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'qa.onnx')
model.export(output=filename, check_trace=True)
onnx_model = onnx.load(filename)
assert onnx_model.graph.input[0].name == 'input_ids'
assert onnx_model.graph.input[1].name == 'attention_mask'
assert onnx_model.graph.input[2].name == 'token_type_ids'
assert onnx_model.graph.output[0].name == 'logits'
@pytest.fixture()
def dummy_data(test_data_dir):
return os.path.join(test_data_dir, 'nlp', 'dummy_data')
| NeMo-main | tests/collections/nlp/test_nlp_exportables.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from einops import rearrange
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.modules.common.megatron.attention import ParallelChunkedCrossAttention
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.megatron.position_embedding import RotaryEmbedding
from nemo.collections.nlp.modules.common.megatron.retrieval_token_level_encoder_decoder import (
MegatronRetrievalTokenLevelEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.retrieval_transformer import (
MegatronRetrievalTransformerDecoderModule,
MegatronRetrievalTransformerEncoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
build_attention_mask_3d,
init_method_normal,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig
from megatron.core.enums import ModelType
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
@pytest.fixture()
def model_parallel_config():
config = ModelParallelConfig()
return config
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_APEX or not HAVE_MEGATRON_CORE, reason="apex or megatron-core is not installed")
class TestRetrievalModule:
@classmethod
def setup_class(cls):
if not torch.cuda.is_available():
return
GPUS = 1
TP_SIZE = GPUS
PP_SIZE = 1
MB_SIZE = 4
GB_SIZE = 8
SEED = 1234
trainer = Trainer(strategy=NLPDDPStrategy(), devices=GPUS, accelerator='gpu', num_nodes=1, logger=None,)
initialize_model_parallel_for_nemo(
world_size=trainer.world_size,
global_rank=trainer.global_rank,
local_rank=trainer.local_rank,
tensor_model_parallel_size=TP_SIZE,
pipeline_model_parallel_size=PP_SIZE,
micro_batch_size=MB_SIZE,
global_batch_size=GB_SIZE,
seed=SEED,
apex_transformer_log_level=30,
)
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
torch.distributed.barrier()
@pytest.mark.unit
def test_cross_attn(self, model_parallel_config):
num_layers = 1
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
context_chunk_size = 2 * text_chunk_size
input_length = chunks * text_chunk_size
vocab_size = 20000
rot_dim = dim // num_attention_heads
rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (input_length, batch)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(input_length, batch, dim).cuda().half() # (seq, batch, dim)
retrieved = torch.randint(0, vocab_size, (chunks, neighbors, context_chunk_size, batch)).cuda()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch)
# context attention mask [b, np, sq, sk]
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(chunks, neighbors, context_chunk_size, batch, dim).cuda().half()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch, hidden)
# need to add extra chunk size, since it will be shifted
cross_attn_q_pos_emb = rotary_pos_emb(text_chunk_size + text_chunk_size - 1, offset=0)
cross_attn_k_pos_emb = rotary_pos_emb(context_chunk_size)
cross_attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_k_pos_emb)
dec_attn_mask = rearrange(hidden_mask, '(k n) b -> (b k) n', k=chunks)
context_attn_mask = rearrange(context_mask, 'k r n b -> (b k) (r n)')
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=context_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_dec_attn_mask_3d = enc_dec_attn_mask_3d[:, None, :, :]
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
cross_attn = (
ParallelChunkedCrossAttention(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
layer_number=1,
num_attention_heads=num_attention_heads,
hidden_size=dim,
precision=16,
chunk_size=text_chunk_size,
)
.cuda()
.half()
)
out, bias = cross_attn(
hidden_emb, enc_dec_attn_mask_3d, encoder_output=retrieved_emb, rotary_pos_emb=cross_attn_pos_emb
)
assert out.shape == torch.Size([input_length, batch, dim])
assert bias.shape == torch.Size([dim])
@pytest.mark.unit
def test_retrieval_encoder(self, model_parallel_config):
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
layer_type = [LayerType.encoder, LayerType.retrieval_encoder, LayerType.encoder, LayerType.retrieval_encoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
encoder = (
MegatronRetrievalTransformerEncoderModule(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
)
.cuda()
.half()
)
out = encoder(retrieved_emb, context_mask, context_attn_mask=hidden_mask, encoder_output=hidden_emb)
assert out.shape == torch.Size([batch, chunks, neighbors, 2 * text_chunk_size, dim])
@pytest.mark.unit
def test_retrieval_decoder(self, model_parallel_config):
init_method_std = 0.02
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
# rot_dim = dim // num_attention_heads
# rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
# context_chunk_size = 128
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation)
# context attention mask [b, np, sq, sk]
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation, hidden)
layer_type = [LayerType.encoder, LayerType.retrieval_decoder, LayerType.encoder, LayerType.retrieval_decoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
decoder = (
MegatronRetrievalTransformerDecoderModule(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
)
.cuda()
.half()
)
out = decoder(hidden_emb, hidden_mask, retrieved_attn_mask=context_mask, retrieved_emb=retrieved_emb)
assert out.shape == torch.Size([input_length, batch, dim])
@pytest.mark.unit
def test_encoder_decoder_module(self, model_parallel_config):
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
enc_num_layers = 4
dec_num_layers = 6
enc_cross_attention = [3] # layer numbers for cross attention
dec_cross_attention = [3, 5] # layer numbers for cross attention
all_tokens = torch.randint(0, vocab_size, (batch, input_length + 1)).cuda() # (seq, batch, dim)
hidden = all_tokens[:, :-1]
labels = all_tokens[:, 1:]
hidden_mask = (hidden != pad_id).cuda()
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
class FakeTokenizer:
eos_id = vocab_size - 2
tokenizer = FakeTokenizer()
encoder_decoder = (
MegatronRetrievalTokenLevelEncoderDecoderModule(
config=model_parallel_config,
vocab_size=vocab_size,
hidden_size=dim,
max_position_embeddings=input_length,
num_attention_heads=num_attention_heads,
ffn_hidden_size=dim * 4,
precision=16,
chunk_size=text_chunk_size,
enc_num_layers=enc_num_layers,
dec_num_layers=dec_num_layers,
enc_cross_attention=enc_cross_attention,
dec_cross_attention=dec_cross_attention,
add_position_embedding=False,
tokenizer=tokenizer,
)
.cuda()
.half()
)
out = encoder_decoder(
hidden, hidden_mask, retrieved_ids=retrieved, retrieved_attn_mask=context_mask, labels=labels
)
assert out.shape == torch.Size([batch, input_length])
# verify the attention mask matrix is correct
all_tokens = torch.tensor([[1, 2, vocab_size - 2, 3, vocab_size - 1, vocab_size - 2, 3, 4, 5]]).cuda()
encoder_decoder = (
MegatronRetrievalTokenLevelEncoderDecoderModule(
config=model_parallel_config,
vocab_size=vocab_size,
hidden_size=dim,
max_position_embeddings=8,
num_attention_heads=num_attention_heads,
ffn_hidden_size=dim * 4,
precision=16,
chunk_size=4,
enc_num_layers=enc_num_layers,
dec_num_layers=dec_num_layers,
enc_cross_attention=enc_cross_attention,
dec_cross_attention=dec_cross_attention,
add_position_embedding=False,
tokenizer=tokenizer,
)
.cuda()
.half()
)
hidden = all_tokens[:, :-1]
labels = all_tokens[:, 1:]
hidden_mask = (hidden != pad_id).cuda()
retrieved = torch.randint(0, vocab_size, (1, 2, neighbors, 8)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
out = encoder_decoder(
hidden, hidden_mask, retrieved_ids=retrieved, retrieved_attn_mask=context_mask, labels=labels
)
mask3d = encoder_decoder.pre_decoder._calculate_dec_att_mask(
hidden_mask, torch.where(hidden == vocab_size - 2)
)
expected = torch.tensor(
[
[
[
[False, True, True, True, True, True, True, True],
[False, False, True, True, True, True, True, True],
[False, False, False, True, True, True, True, True],
[True, True, True, False, True, True, True, True],
[True, True, True, True, True, True, True, True],
[True, True, True, False, True, False, True, True],
[True, True, True, True, True, True, False, True],
[True, True, True, True, True, True, False, False],
]
]
]
).cuda()
assert (mask3d == expected).all()
| NeMo-main | tests/collections/nlp/test_retrieval_module.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import torch.nn.functional as F
from einops import rearrange
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.modules.common.megatron.attention import ParallelChunkedCrossAttention
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.megatron_init import initialize_model_parallel_for_nemo
from nemo.collections.nlp.modules.common.megatron.position_embedding import RotaryEmbedding
from nemo.collections.nlp.modules.common.megatron.retrieval_token_level_encoder_decoder import (
MegatronRetrievalTokenLevelEncoderDecoderModule,
)
from nemo.collections.nlp.modules.common.megatron.retrieval_transformer import (
MegatronRetrievalTransformerDecoderModule,
MegatronRetrievalTransformerEncoderModule,
)
from nemo.collections.nlp.modules.common.megatron.utils import (
build_attention_mask_3d,
init_method_normal,
scaled_init_method_normal,
)
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
try:
from apex.transformer.enums import AttnMaskType
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
HAVE_APEX = False
try:
from megatron.core import ModelParallelConfig
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
@pytest.fixture()
def model_parallel_config():
config = ModelParallelConfig()
return config
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_APEX or not HAVE_MEGATRON_CORE, reason="apex or megatron-core is not installed")
class TestRetrievalModuleInference:
@classmethod
def setup_class(cls):
if not torch.cuda.is_available():
return
GPUS = 1
TP_SIZE = GPUS
PP_SIZE = 1
MB_SIZE = 4
GB_SIZE = 8
SEED = 1234
trainer = Trainer(strategy=NLPDDPStrategy(), devices=GPUS, accelerator='gpu', num_nodes=1, logger=None,)
initialize_model_parallel_for_nemo(
world_size=trainer.world_size,
global_rank=trainer.global_rank,
local_rank=trainer.local_rank,
tensor_model_parallel_size=TP_SIZE,
pipeline_model_parallel_size=PP_SIZE,
micro_batch_size=MB_SIZE,
global_batch_size=GB_SIZE,
seed=SEED,
apex_transformer_log_level=30,
)
def dummy():
return
if trainer.strategy.launcher is not None:
trainer.strategy.launcher.launch(dummy, trainer=trainer)
trainer.strategy.setup_environment()
torch.distributed.barrier()
@pytest.mark.unit
def test_retrieval_encoder_inference(self, model_parallel_config):
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
layer_type = [LayerType.encoder, LayerType.retrieval_encoder, LayerType.encoder, LayerType.retrieval_encoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
encoder = (
MegatronRetrievalTransformerEncoderModule(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out_gt = encoder(retrieved_emb, context_mask, context_attn_mask=hidden_mask, encoder_output=hidden_emb)
assert out_gt.shape == torch.Size([batch, chunks, neighbors, 2 * text_chunk_size, dim])
out_1 = encoder(
None,
None,
context_attn_mask=hidden_mask[:, :62],
encoder_output=hidden_emb[:, :62, :],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert out_1 is None
out_1 = encoder(
None,
None,
context_attn_mask=hidden_mask[:, :63],
encoder_output=hidden_emb[:, 62:63],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert out_1 is None
out_2 = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, :64],
encoder_output=hidden_emb[:, 63:64],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, :64]).abs().max().item() < 1e-5
assert (out_gt[:, 0,] - out_2[:, 0]).abs().max().item() < 1e-2
out_test = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, :64],
encoder_output=hidden_emb[:, :64],
)
assert (out_gt[:, 0,] - out_test[:, 0]).abs().max().item() < 1e-2
assert (out_gt[:, 0,] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_3 = encoder(
retrieved_emb[:, :1],
context_mask[:, :1],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 127
out_3 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 64:128]).abs().max().item() < 1e-5
assert (out_gt[:, :2,] - out_3).abs().max().item() < 1e-2
# test inference
for i in range(128, 191):
out_4 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 191
out_4 = encoder(
retrieved_emb[:, :3],
context_mask[:, :3],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 128:192]).abs().max().item() < 1e-5
assert (out_gt[:, :3,] - out_4).abs().max().item() < 1e-2
out_2 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, :130],
encoder_output=hidden_emb[:, :130, :],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
for i in range(130, 191):
out_2 = encoder(
retrieved_emb[:, :2],
context_mask[:, :2],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
i = 191
out_4 = encoder(
retrieved_emb[:, :3],
context_mask[:, :3],
context_attn_mask=hidden_mask[:, : i + 1],
encoder_output=hidden_emb[:, i : i + 1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (encoder.encoder_output - hidden_emb[:, 128:192]).abs().max().item() < 1e-5
assert (out_gt[:, :3,] - out_4).abs().max().item() < 1e-2
@pytest.mark.unit
def test_cross_attn_inference(self, model_parallel_config):
num_layers = 1
init_method_std = 0.02
batch = 2
neighbors = 2
# rotary pos emb dim
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
context_chunk_size = 2 * text_chunk_size
input_length = chunks * text_chunk_size
vocab_size = 20000
rot_dim = dim // num_attention_heads
rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (input_length, batch)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(input_length, batch, dim).cuda().half() # (seq, batch, dim)
retrieved = torch.randint(0, vocab_size, (chunks, neighbors, context_chunk_size, batch)).cuda()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch)
# context attention mask [b, np, sq, sk]
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(chunks, neighbors, context_chunk_size, batch, dim).cuda().half()
# retrieved tokens - (num chunks, num retrieved neighbors, retrieved chunk with continuation, batch, hidden)
# need to add extra chunk size, since it will be shifted
cross_attn_q_pos_emb = rotary_pos_emb(text_chunk_size + text_chunk_size - 1, offset=-text_chunk_size + 1)
cross_attn_k_pos_emb = rotary_pos_emb(context_chunk_size)
cross_attn_pos_emb = (cross_attn_q_pos_emb, cross_attn_k_pos_emb)
def get_attn_mask_3d(hidden_mask, context_mask, chunks):
causal_padding = text_chunk_size - 1
reminder = (text_chunk_size - (hidden_mask.shape[0] + 1)) % text_chunk_size
hidden_mask = F.pad(hidden_mask, (0, 0, -causal_padding, reminder), value=False)
dec_attn_mask = rearrange(hidden_mask, '(k n) b -> (b k) n', k=chunks)
context_attn_mask = rearrange(context_mask, 'k r n b -> (b k) (r n)')
enc_dec_attn_mask_3d = build_attention_mask_3d(
source_mask=dec_attn_mask, target_mask=context_attn_mask, attn_mask_type=AttnMaskType.padding,
)
enc_dec_attn_mask_3d = enc_dec_attn_mask_3d[:, None, :, :]
return enc_dec_attn_mask_3d
enc_dec_attn_mask_3d = get_attn_mask_3d(hidden_mask, context_mask, chunks)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
cross_attn = (
ParallelChunkedCrossAttention(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
layer_number=1,
num_attention_heads=num_attention_heads,
hidden_size=dim,
precision=16,
chunk_size=text_chunk_size,
masked_softmax_fusion=False,
)
.cuda()
.half()
)
out, bias = cross_attn(
hidden_emb, enc_dec_attn_mask_3d, encoder_output=retrieved_emb, rotary_pos_emb=cross_attn_pos_emb
)
assert out.shape == torch.Size([input_length, batch, dim])
assert bias.shape == torch.Size([dim])
attn_mask_3d = None
out_1, b = cross_attn(
hidden_emb[:62],
attn_mask_3d,
encoder_output=None,
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out_1 - torch.zeros_like(hidden_emb[:62])).abs().max() == 0
out_1, b = cross_attn(
hidden_emb[62:63],
attn_mask_3d,
encoder_output=None,
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out_1 - torch.zeros_like(hidden_emb[62:63])).abs().max() == 0
attn_mask_3d = get_attn_mask_3d(hidden_mask[:64], context_mask[:1], 1)
out_2, b = cross_attn(
hidden_emb[63:64],
attn_mask_3d,
encoder_output=retrieved_emb[:1],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[63] - out_2[0]).abs().max().item() < 1e-2
for i in range(64, 127):
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:1], 1)
out_2, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:1],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
i = 127
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:2], 2)
out_3, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_3[0]).abs().max().item() < 1e-2
attn_mask_3d = get_attn_mask_3d(hidden_mask[:130], context_mask[:2], 2)
out_1, b = cross_attn(
hidden_emb[:130],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:130] - out_1[:130]).abs().max().item() < 1e-2
for i in range(130, 191):
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:2], 2)
out_2, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:2],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
i = 191
attn_mask_3d = get_attn_mask_3d(hidden_mask[: i + 1], context_mask[:3], 3)
out_4, b = cross_attn(
hidden_emb[i : i + 1],
attn_mask_3d,
encoder_output=retrieved_emb[:3],
rotary_pos_emb=cross_attn_pos_emb,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_4[0]).abs().max().item() < 1e-2
@pytest.mark.unit
def test_retrieval_decoder_inference(self, model_parallel_config):
init_method_std = 0.02
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
# rot_dim = dim // num_attention_heads
# rotary_pos_emb = RotaryEmbedding(rot_dim).cuda().half()
hidden = torch.randint(0, vocab_size, (batch, input_length)).cuda() # (seq, batch, dim)
hidden_mask = (hidden != pad_id).cuda()
hidden_emb = torch.rand(batch, input_length, dim).cuda().half() # (batch, seq, dim)
# context_chunk_size = 128
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation)
# context attention mask [b, np, sq, sk]
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
retrieved_emb = torch.rand(batch, chunks, neighbors, 2 * text_chunk_size, dim).cuda().half()
# retrieved tokens - (batch, num chunks, num retrieved neighbors, retrieved chunk with continuation, hidden)
layer_type = [LayerType.encoder, LayerType.retrieval_decoder, LayerType.encoder, LayerType.retrieval_decoder]
num_layers = len(layer_type)
init_method = init_method_normal(init_method_std)
scaled_init_method = scaled_init_method_normal(init_method_std, num_layers)
decoder = (
MegatronRetrievalTransformerDecoderModule(
config=model_parallel_config,
init_method=init_method,
output_layer_init_method=scaled_init_method,
hidden_size=dim,
ffn_hidden_size=dim * 4,
num_layers=num_layers,
num_attention_heads=num_attention_heads,
precision=16,
chunk_size=text_chunk_size,
layer_type=layer_type,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out = decoder(hidden_emb, hidden_mask, retrieved_attn_mask=context_mask, retrieved_emb=retrieved_emb)
assert out.shape == torch.Size([input_length, batch, dim])
out_1 = decoder(
hidden_emb[:, :62],
hidden_mask[:, :62],
retrieved_attn_mask=None,
retrieved_emb=None,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:62] - out_1[:62]).abs().max().item() < 1e-2
out_1 = decoder(
hidden_emb[:, 62:63],
hidden_mask[:, :63],
retrieved_attn_mask=None,
retrieved_emb=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[62] - out_1[0]).abs().max().item() < 1e-2
out_2 = decoder(
hidden_emb[:, 63:64],
hidden_mask[:, :64],
retrieved_attn_mask=context_mask[:, :1],
retrieved_emb=retrieved_emb[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[63] - out_2[0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_2 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :1],
retrieved_emb=retrieved_emb[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_2[0]).abs().max().item() < 1e-2
for i in range(127, 191):
out_3 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_3[0]).abs().max().item() < 1e-2
out_1 = decoder(
hidden_emb[:, :130],
hidden_mask[:, :130],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
)
assert (out[:130] - out_1[:130]).abs().max().item() < 1e-2
for i in range(130, 191):
out_3 = decoder(
hidden_emb[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_attn_mask=context_mask[:, :2],
retrieved_emb=retrieved_emb[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
)
assert (out[i] - out_3[0]).abs().max().item() < 1e-2
@pytest.mark.unit
def test_encoder_decoder_module_inference(self, model_parallel_config):
# rotary pos emb dim
batch = 2
neighbors = 2
dim = 128
pad_id = 19999
num_attention_heads = 8
chunks = 32
text_chunk_size = 64
input_length = chunks * text_chunk_size
vocab_size = 20000
enc_num_layers = 4
dec_num_layers = 6
enc_cross_attention = [3] # layer numbers for cross attention
dec_cross_attention = [3, 5] # layer numbers for cross attention
all_tokens = torch.randint(0, vocab_size, (batch, input_length + 1)).cuda() # (seq, batch, dim)
hidden = all_tokens[:, :-1]
labels = all_tokens[:, 1:]
hidden_mask = (hidden != pad_id).cuda()
retrieved = torch.randint(0, vocab_size, (batch, chunks, neighbors, 2 * text_chunk_size)).cuda()
pad_id = vocab_size - 1
context_mask = (retrieved != pad_id).cuda()
class FakeTokenizer:
eos_id = vocab_size - 2
tokenizer = FakeTokenizer()
encoder_decoder = (
MegatronRetrievalTokenLevelEncoderDecoderModule(
config=model_parallel_config,
vocab_size=vocab_size,
hidden_size=dim,
max_position_embeddings=input_length,
num_attention_heads=num_attention_heads,
ffn_hidden_size=dim * 4,
precision=16,
chunk_size=text_chunk_size,
enc_num_layers=enc_num_layers,
dec_num_layers=dec_num_layers,
enc_cross_attention=enc_cross_attention,
dec_cross_attention=dec_cross_attention,
add_position_embedding=False,
tokenizer=tokenizer,
hidden_dropout=0.0,
attention_dropout=0.0,
)
.cuda()
.half()
)
out = encoder_decoder(hidden, hidden_mask, retrieved_ids=retrieved, retrieved_attn_mask=context_mask)
out_1 = encoder_decoder(
hidden[:, :62],
hidden_mask[:, :62],
retrieved_attn_mask=None,
retrieved_ids=None,
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, :62] - out_1[:, :62]).abs().max().item() < 1e-2
out_1 = encoder_decoder(
hidden[:, 62:63],
hidden_mask[:, :63],
retrieved_attn_mask=None,
retrieved_ids=None,
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, 62] - out_1[:, 0]).abs().max().item() < 1e-2
out_2 = encoder_decoder(
hidden[:, 63:64],
hidden_mask[:, :64],
retrieved_ids=retrieved[:, :1],
retrieved_attn_mask=context_mask[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, 63] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(64, 127):
out_2 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :1],
retrieved_attn_mask=context_mask[:, :1],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_2[:, 0]).abs().max().item() < 1e-2
for i in range(127, 191):
out_3 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
out_1 = encoder_decoder(
hidden[:, :130],
hidden_mask[:, :130],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=True,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, :130] - out_1[:, :130]).abs().max().item() < 1e-2
for i in range(130, 191):
out_3 = encoder_decoder(
hidden[:, i : i + 1],
hidden_mask[:, : i + 1],
retrieved_ids=retrieved[:, :2],
retrieved_attn_mask=context_mask[:, :2],
set_inference_key_value_memory=False,
inference_max_sequence_len=input_length,
neighbors=neighbors,
)
assert (out[:, i] - out_3[:, 0]).abs().max().item() < 1e-2
| NeMo-main | tests/collections/nlp/test_retrieval_module_inference.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import MTEncDecModel
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import AAYNBaseConfig
def export_test(model, suffix):
with tempfile.TemporaryDirectory() as restore_folder:
filename = os.path.join(restore_folder, 'nmt' + suffix)
enc_filename = os.path.join(restore_folder, 'encoder-nmt' + suffix)
dec_filename = os.path.join(restore_folder, 'decoder-nmt' + suffix)
model.export(output=filename, check_trace=True)
assert os.path.exists(enc_filename)
assert os.path.exists(dec_filename)
def get_cfg():
cfg = AAYNBaseConfig()
cfg.encoder_tokenizer.tokenizer_name = 'yttm'
cfg.encoder_tokenizer.tokenizer_model = 'tests/.data/yttm.4096.en-de.model'
cfg.decoder_tokenizer.tokenizer_name = 'yttm'
cfg.decoder_tokenizer.tokenizer_model = 'tests/.data/yttm.4096.en-de.model'
cfg.train_ds = None
cfg.validation_ds = None
cfg.test_ds = None
return cfg
class TestMTEncDecModel:
@pytest.mark.unit
def test_creation_saving_restoring(self):
model = MTEncDecModel(cfg=get_cfg())
assert isinstance(model, MTEncDecModel)
# Create a new temporary directory
with tempfile.TemporaryDirectory() as restore_folder:
with tempfile.TemporaryDirectory() as save_folder:
save_folder_path = save_folder
# Where model will be saved
model_save_path = os.path.join(save_folder, f"{model.__class__.__name__}.nemo")
model.save_to(save_path=model_save_path)
# Where model will be restored from
model_restore_path = os.path.join(restore_folder, f"{model.__class__.__name__}.nemo")
shutil.copy(model_save_path, model_restore_path)
# at this point save_folder should not exist
assert save_folder_path is not None and not os.path.exists(save_folder_path)
assert not os.path.exists(model_save_path)
assert os.path.exists(model_restore_path)
# attempt to restore
model_copy = model.__class__.restore_from(restore_path=model_restore_path)
assert model.num_weights == model_copy.num_weights
@pytest.mark.unit
def test_no_artifact_name_collision(self):
model = MTEncDecModel(cfg=get_cfg())
assert isinstance(model, MTEncDecModel)
with tempfile.TemporaryDirectory() as tmpdir1:
model.save_to("nmt_model.nemo")
with tempfile.TemporaryDirectory() as tmpdir:
model._save_restore_connector._unpack_nemo_file(path2file="nmt_model.nemo", out_folder=tmpdir)
conf = OmegaConf.load(os.path.join(tmpdir, "model_config.yaml"))
# Make sure names now differ in saved config
assert conf.encoder_tokenizer.tokenizer_model != conf.decoder_tokenizer.tokenizer_model
# Make sure names in config start with "nemo:" prefix
assert conf.encoder_tokenizer.tokenizer_model.startswith("nemo:")
assert conf.decoder_tokenizer.tokenizer_model.startswith("nemo:")
# Check if both tokenizers were included
assert os.path.exists(os.path.join(tmpdir, conf.encoder_tokenizer.tokenizer_model[5:]))
assert os.path.exists(os.path.join(tmpdir, conf.decoder_tokenizer.tokenizer_model[5:]))
@pytest.mark.unit
def test_train_eval_loss(self):
cfg = get_cfg()
cfg.label_smoothing = 0.5
model = MTEncDecModel(cfg=cfg)
assert isinstance(model, MTEncDecModel)
batch_size = 10
time = 32
vocab_size = 32000
torch.manual_seed(42)
tgt_ids = torch.LongTensor(batch_size, time).random_(1, model.decoder_tokenizer.vocab_size)
logits = torch.FloatTensor(batch_size, time, vocab_size).random_(-1, 1)
log_probs = torch.nn.functional.log_softmax(logits, dim=-1)
train_loss = model.loss_fn(log_probs=log_probs, labels=tgt_ids)
eval_loss = model.eval_loss_fn(log_probs=log_probs, labels=tgt_ids)
assert not torch.allclose(train_loss, eval_loss) # , (train_loss, eval_loss)
cfg.label_smoothing = 0
model = MTEncDecModel(cfg=cfg)
# Train loss == val loss when label smoothing = 0
train_loss = model.loss_fn(log_probs=log_probs, labels=tgt_ids)
eval_loss = model.eval_loss_fn(log_probs=log_probs, labels=tgt_ids)
assert torch.allclose(train_loss, eval_loss)
@pytest.mark.skipif(not os.path.exists('/home/TestData/nlp'), reason='Not a Jenkins machine')
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_gpu_export_ts(self):
model = MTEncDecModel(cfg=get_cfg()).cuda()
assert isinstance(model, MTEncDecModel)
export_test(model, ".ts")
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_gpu_export_onnx(self):
model = MTEncDecModel(cfg=get_cfg()).cuda()
assert isinstance(model, MTEncDecModel)
export_test(model, ".onnx")
if __name__ == "__main__":
t = TestMTEncDecModel()
# t.test_gpu_export_ts()
t.test_train_eval_loss()
| NeMo-main | tests/collections/nlp/test_nmt_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import pytest
import torch
from nemo.collections.nlp.modules.common.megatron.layer_type import LayerType
from nemo.collections.nlp.modules.common.megatron.position_embedding import (
ALiBiRelativePositionEmbedding,
KERPLERelativePositionEmbedding,
RotaryEmbedding,
SandwichRelativePositionEmbedding,
T5RelativePositionEmbedding,
XPOSPositionEmbedding,
)
from nemo.collections.nlp.modules.common.megatron.position_embedding.rotary_position_embedding import (
apply_rotary_pos_emb,
)
from nemo.collections.nlp.modules.common.megatron.utils import init_method_normal
@pytest.fixture()
def cfg():
cfg = {
'max_seq_len': 8,
'num_attention_heads': 2,
'layer_type': LayerType.encoder,
'hidden_size': 4,
'rpe_init_method_std': 0.02,
'rpe_num_buckets': 6,
'rpe_max_distance': 16,
}
return cfg
@pytest.mark.unit
def test_alibi(cfg):
# non-causal
PE_nc = ALiBiRelativePositionEmbedding(
bidirectional=True,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
)
# causal
PE_c = ALiBiRelativePositionEmbedding(
bidirectional=False,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
)
q_len = k_len = random.randint(1, cfg['max_seq_len'] * 2)
bias_nc = PE_nc(q_len, k_len)
assert bias_nc.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert torch.equal(bias_nc, bias_nc.transpose(2, 3))
bias_c = PE_c(q_len, k_len)
assert bias_c.shape == (1, cfg['num_attention_heads'], 1, k_len)
assert torch.equal(bias_c, bias_nc[:, :, -1:, :])
@pytest.mark.unit
def test_sandwich(cfg):
# non-causal
PE_nc = SandwichRelativePositionEmbedding(
bidirectional=True,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
hidden_size=cfg['hidden_size'],
)
# causal
PE_c = SandwichRelativePositionEmbedding(
bidirectional=False,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
hidden_size=cfg['hidden_size'],
)
q_len = k_len = random.randint(1, cfg['max_seq_len'] * 2)
bias_nc = PE_nc(q_len, k_len)
assert bias_nc.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert torch.equal(bias_nc, bias_nc.transpose(2, 3))
bias_c = PE_c(q_len, k_len)
assert bias_c.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert torch.all(torch.triu(bias_c, diagonal=0) == 0)
@pytest.mark.unit
def test_kerple(cfg):
# non-causal
PE_nc = KERPLERelativePositionEmbedding(
bidirectional=True,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
)
# causal
PE_c = KERPLERelativePositionEmbedding(
bidirectional=False,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
max_seq_len=cfg['max_seq_len'],
)
q_len = k_len = random.randint(1, cfg['max_seq_len'] * 2)
bias_nc = PE_nc(q_len, k_len)
assert bias_nc.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert torch.equal(bias_nc, bias_nc.transpose(2, 3))
bias_c = PE_c(q_len, k_len)
assert bias_c.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert torch.all(torch.triu(bias_c, diagonal=0) == 0)
@pytest.mark.unit
def test_t5relative(cfg):
# non-causal
PE_nc = T5RelativePositionEmbedding(
bidirectional=True,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
init_method=init_method_normal(cfg['rpe_init_method_std']),
relative_position_num_buckets=cfg['rpe_num_buckets'],
relative_position_max_distance=cfg['rpe_max_distance'],
)
# causal
PE_c = T5RelativePositionEmbedding(
bidirectional=False,
num_attention_heads=cfg['num_attention_heads'],
layer_type=cfg['layer_type'],
init_method=init_method_normal(cfg['rpe_init_method_std']),
relative_position_num_buckets=cfg['rpe_num_buckets'],
relative_position_max_distance=cfg['rpe_max_distance'],
)
q_len = k_len = random.randint(1, cfg['max_seq_len'] * 2)
bias_nc = PE_nc(q_len, k_len)
assert bias_nc.shape == (1, cfg['num_attention_heads'], q_len, k_len)
bias_c = PE_c(q_len, k_len)
assert bias_c.shape == (1, cfg['num_attention_heads'], q_len, k_len)
assert (
len(torch.triu(bias_c, diagonal=0).unique()) == cfg['num_attention_heads'] + 1
if q_len > 1
else cfg['num_attention_heads']
)
@pytest.mark.unit
def test_rotary(cfg):
PE = RotaryEmbedding(dim=cfg['hidden_size'])
rotary_embedding = PE(cfg['max_seq_len'])
x = torch.rand(cfg['max_seq_len'], 1, cfg['num_attention_heads'], cfg['hidden_size'])
x_rotary = apply_rotary_pos_emb(x, rotary_embedding)
assert x_rotary.shape == x.shape
hd = cfg['hidden_size'] // 2
x_rotary_test = torch.cat(
(
x[..., :hd] * rotary_embedding[..., :hd].cos() + x[..., hd:] * rotary_embedding[..., hd:].sin() * -1,
x[..., :hd] * rotary_embedding[..., :hd].sin() + x[..., hd:] * rotary_embedding[..., hd:].cos(),
),
dim=-1,
)
assert torch.equal(x_rotary, x_rotary_test)
offset = random.choice(range(1, cfg['max_seq_len']))
rotary_embedding_offset = PE(cfg['max_seq_len'], offset=offset)
x_rotary = apply_rotary_pos_emb(x[: offset + 1], rotary_embedding[: offset + 1])
x_rotary_offset = apply_rotary_pos_emb(x[offset : offset + 1], rotary_embedding_offset[:1])
assert torch.equal(x_rotary[-1], x_rotary_offset[0])
@pytest.mark.unit
def test_xpos(cfg):
PE = XPOSPositionEmbedding(head_dim=cfg['hidden_size'])
x = torch.rand(cfg['max_seq_len'], 1, cfg['num_attention_heads'], cfg['hidden_size'])
x_rotary = PE(x)
assert x_rotary.shape == x.shape
offset = random.choice(range(1, cfg['max_seq_len']))
x_rotary = PE(x[: offset + 1])
x_rotary_offset = PE(x[offset : offset + 1], offset=offset)
assert torch.equal(x_rotary[-1], x_rotary_offset[0])
| NeMo-main | tests/collections/nlp/test_position_embedding.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import wget
from omegaconf import OmegaConf
from nemo.collections.nlp.models import EntityLinkingModel
def get_cfg():
language_model = OmegaConf.create(
{"pretrained_model_name": "bert-base-uncased", "config_file": None, "config": None, "lm_checkpoint": None}
)
tokenizer = OmegaConf.create(
{"tokenizer_name": "bert-base-uncased", "vocab_file": None, "tokenizer_model": None, "do_lower_case": True}
)
model = OmegaConf.create(
{
"nemo_path": "sap_entity_linking.nemo",
"max_seq_length": 128,
"language_model": language_model,
"tokenizer": tokenizer,
"train_ds": None,
"validation_ds": None,
}
)
cfg = OmegaConf.create({"model": model})
return cfg
class TestEntityLinkingModel:
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_creation_saving_restoring(self):
# Create a new temporary directory
with tempfile.TemporaryDirectory() as restore_dir:
with tempfile.TemporaryDirectory() as save_dir:
model = EntityLinkingModel(cfg=get_cfg().model)
assert isinstance(model, EntityLinkingModel)
save_dir_path = save_dir
# Where model will be saved
model_save_path = os.path.join(save_dir, f"{model.__class__.__name__}.nemo")
model.save_to(save_path=model_save_path)
# Where model will be restored from
model_restore_path = os.path.join(restore_dir, f"{model.__class__.__name__}.nemo")
shutil.copy(model_save_path, model_restore_path)
# at this point save_dir should not exist
assert save_dir_path is not None and not os.path.exists(save_dir_path)
assert not os.path.exists(model_save_path)
assert os.path.exists(model_restore_path)
# attempt to restore
model_copy = model.__class__.restore_from(restore_path=model_restore_path)
assert model.num_weights == model_copy.num_weights
if __name__ == "__main__":
t = TestEntityLinkingModel()
t.test_creation_saving_restoring()
| NeMo-main | tests/collections/nlp/test_entity_linking_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import apex
apex_available = True
except Exception:
apex_available = False
import os
import tempfile
import onnx
import pytest
import torch
from omegaconf import OmegaConf
import nemo.collections.nlp as nemo_nlp
from nemo.core.classes import typecheck
def get_pretrained_bert_345m_uncased_model():
model_name = "megatron-bert-345m-uncased"
config = {"language_model": {"pretrained_model_name": model_name}, "tokenizer": {}}
omega_conf = OmegaConf.create(config)
model = nemo_nlp.modules.get_lm_model(cfg=omega_conf)
if torch.cuda.is_available():
model = model.cuda()
return model
class TestMegatron:
@pytest.mark.skip("This test was written for megatron-lm")
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_list_pretrained_models(self):
pretrained_lm_models = nemo_nlp.modules.get_pretrained_lm_models_list()
assert len(pretrained_lm_models) > 0
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.skip("Only one Megatron model is allowed")
def test_get_model(self):
model = get_pretrained_bert_345m_uncased_model()
assert isinstance(model, nemo_nlp.modules.MegatronBertEncoder)
typecheck.set_typecheck_enabled(enabled=False)
inp = model.input_example()
out = model.forward(*inp)
typecheck.set_typecheck_enabled(enabled=True)
@pytest.mark.skipif(not os.path.exists('/home/TestData/nlp'), reason='Not a Jenkins machine')
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.skip("Megatron-LM BERT support deprecated. Supported in NeMo < 1.5")
def test_onnx_export(self):
model = get_pretrained_bert_345m_uncased_model()
assert model
with tempfile.TemporaryDirectory() as tmpdir:
# Generate filename in the temporary directory.
# Test export.
model.export(os.path.join(".", "megatron.onnx"))
if __name__ == "__main__":
t = TestMegatron()
t.test_onnx_export()
| NeMo-main | tests/collections/nlp/test_megatron.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from transformers import AutoTokenizer
from nemo.collections.nlp.data.spellchecking_asr_customization.bert_example import BertExampleBuilder
from nemo.collections.nlp.data.spellchecking_asr_customization.utils import (
apply_replacements_to_text,
substitute_replacements_in_text,
)
@pytest.mark.unit
def test_substitute_replacements_in_text():
text = "we began the further diversification of our revenue base with the protterra supply agreement and the navastar joint development agreement"
replacements = [(66, 75, 'pro-terra', 0.99986), (101, 109, 'navistar', 0.996)]
gold_text = "we began the further diversification of our revenue base with the pro-terra supply agreement and the navistar joint development agreement"
corrected_text = substitute_replacements_in_text(text, replacements, replace_hyphen_to_space=False)
assert corrected_text == gold_text
gold_text_no_hyphen = "we began the further diversification of our revenue base with the pro terra supply agreement and the navistar joint development agreement"
corrected_text = substitute_replacements_in_text(text, replacements, replace_hyphen_to_space=True)
assert corrected_text == gold_text_no_hyphen
@pytest.mark.unit
def test_apply_replacements_to_text():
# min_prob = 0.5
# dp_data = None,
# min_dp_score_per_symbol: float = -99.9
# test more than one fragment to replace, test multiple same replacements
text = "we began the further diversification of our revenue base with the protterra supply agreement and the navastar joint development agreement"
replacements = [
(66, 75, 'proterra', 0.99986),
(66, 75, 'proterra', 0.9956),
(101, 109, 'navistar', 0.93),
(101, 109, 'navistar', 0.91),
(101, 109, 'navistar', 0.92),
]
gold_text = "we began the further diversification of our revenue base with the proterra supply agreement and the navistar joint development agreement"
corrected_text = apply_replacements_to_text(
text, replacements, min_prob=0.5, replace_hyphen_to_space=False, dp_data=None
)
assert corrected_text == gold_text
# test that min_prob works
gold_text = "we began the further diversification of our revenue base with the proterra supply agreement and the navastar joint development agreement"
corrected_text = apply_replacements_to_text(
text, replacements, min_prob=0.95, replace_hyphen_to_space=False, dp_data=None
)
assert corrected_text == gold_text
@pytest.fixture()
def bert_example_builder():
tokenizer = AutoTokenizer.from_pretrained("huawei-noah/TinyBERT_General_6L_768D")
label_map = {"0": 0, "1": 1, "2": 2, "3": 3, "4": 4, "5": 5, "6": 6, "7": 7, "8": 8, "9": 9, "10": 10}
semiotic_classes = {"PLAIN": 0, "CUSTOM": 1}
max_seq_len = 256
builder = BertExampleBuilder(label_map, semiotic_classes, tokenizer, max_seq_len)
return builder
@pytest.mark.skip("Doesn't work download when testing on github, for unknown reason")
@pytest.mark.with_downloads
@pytest.mark.unit
def test_creation(bert_example_builder):
assert bert_example_builder._tokenizer is not None
@pytest.mark.skip("Doesn't work download when testing on github, for unknown reason")
@pytest.mark.with_downloads
@pytest.mark.unit
def test_builder_get_spans(bert_example_builder):
span_info_parts = ["CUSTOM 37 41", "CUSTOM 47 52", "CUSTOM 42 46", "CUSTOM 0 7"]
gold_sorted_spans = [(1, 1, 8), (1, 38, 42), (1, 43, 47), (1, 48, 53)]
spans = bert_example_builder._get_spans(span_info_parts)
spans.sort()
assert spans == gold_sorted_spans
@pytest.mark.skip("Doesn't work download when testing on github, for unknown reason")
@pytest.mark.with_downloads
@pytest.mark.unit
def test_builder_get_fragment_indices(bert_example_builder):
hyp = "a b o u t _ o u r _ s h i p e r s _ b u t _ y o u _ k n o w"
targets = [1]
# a b o u t _ o u r _ s h i p e r s _ b u t _ y o u _ k n o w
# 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
span_info_parts = ["CUSTOM 8 17"]
gold_sorted_fragment_indices = [(7, 18, 1), (11, 18, 1)]
fragment_indices = bert_example_builder._get_fragment_indices(hyp, targets, span_info_parts)
fragment_indices.sort()
assert fragment_indices == gold_sorted_fragment_indices
# a b o u t _ o u r _ s h i p e r s _ b u t _ y o u _ k n o w
# 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
span_info_parts = ["CUSTOM 10 16"]
gold_sorted_fragment_indices = [(11, 18, 1)]
fragment_indices = bert_example_builder._get_fragment_indices(hyp, targets, span_info_parts)
fragment_indices.sort()
assert fragment_indices == gold_sorted_fragment_indices
@pytest.mark.skip("Doesn't work download when testing on github, for unknown reason")
@pytest.mark.with_downloads
@pytest.mark.unit
def test_builder_get_input_features(bert_example_builder):
hyp = "a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o"
ref = "d i d i e r _ s a u m o n;a s t r o n o m i e;t r i s t a n _ g u i l l o t;t r i s t e s s e;m o n a d e;c h r i s t i a n;a s t r o n o m e r;s o l o m o n;d i d i d i d i d i;m e r c y"
targets = [1, 3]
span_info_parts = ["CUSTOM 12 23", "CUSTOM 28 41"]
gold_tags = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
]
gold_input_ids = [
101,
1037,
1055,
1056,
1054,
1051,
1050,
1051,
1049,
1041,
1054,
1055,
1035,
1040,
1045,
1040,
1045,
1041,
1035,
1055,
1051,
1049,
1051,
1050,
1035,
1037,
1050,
1040,
1035,
1056,
1054,
1045,
1055,
1056,
1045,
1037,
1050,
1035,
1043,
1048,
1048,
1051,
102,
1040,
1045,
1040,
1045,
1041,
1054,
1035,
1055,
1037,
1057,
1049,
1051,
1050,
102,
1037,
1055,
1056,
1054,
1051,
1050,
1051,
1049,
1045,
1041,
102,
1056,
1054,
1045,
1055,
1056,
1037,
1050,
1035,
1043,
1057,
1045,
1048,
1048,
1051,
1056,
102,
1056,
1054,
1045,
1055,
1056,
1041,
1055,
1055,
1041,
102,
1049,
1051,
1050,
1037,
1040,
1041,
102,
1039,
1044,
1054,
1045,
1055,
1056,
1045,
1037,
1050,
102,
1037,
1055,
1056,
1054,
1051,
1050,
1051,
1049,
1041,
1054,
102,
1055,
1051,
1048,
1051,
1049,
1051,
1050,
102,
1040,
1045,
1040,
1045,
1040,
1045,
1040,
1045,
1040,
1045,
102,
1049,
1041,
1054,
1039,
1061,
102,
]
gold_input_mask = [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
gold_segment_ids = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
2,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
3,
4,
4,
4,
4,
4,
4,
4,
4,
4,
4,
5,
5,
5,
5,
5,
5,
5,
6,
6,
6,
6,
6,
6,
6,
6,
6,
6,
7,
7,
7,
7,
7,
7,
7,
7,
7,
7,
7,
8,
8,
8,
8,
8,
8,
8,
8,
9,
9,
9,
9,
9,
9,
9,
9,
9,
9,
9,
10,
10,
10,
10,
10,
10,
]
gold_labels_mask = [
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
gold_input_ids_for_subwords = [
101,
26357,
2106,
2666,
2061,
8202,
1998,
13012,
16643,
2319,
1043,
7174,
102,
2106,
3771,
7842,
2819,
2239,
102,
28625,
3630,
9856,
102,
9822,
26458,
7174,
2102,
102,
13012,
13473,
11393,
102,
13813,
3207,
102,
3017,
102,
15211,
102,
9168,
102,
2106,
28173,
4305,
4305,
102,
8673,
102,
]
gold_input_mask_for_subwords = [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
]
gold_segment_ids_for_subwords = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
2,
2,
2,
2,
3,
3,
3,
3,
3,
4,
4,
4,
4,
5,
5,
5,
6,
6,
7,
7,
8,
8,
9,
9,
9,
9,
9,
10,
10,
]
gold_character_pos_to_subword_pos = [
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
2,
2,
3,
3,
3,
4,
4,
5,
5,
5,
5,
6,
6,
6,
6,
7,
7,
7,
8,
8,
8,
9,
9,
9,
10,
11,
11,
11,
12,
13,
13,
13,
14,
14,
14,
14,
15,
15,
16,
16,
17,
17,
18,
19,
19,
19,
19,
19,
20,
20,
21,
21,
21,
22,
23,
23,
23,
23,
23,
23,
23,
23,
24,
24,
24,
25,
25,
25,
26,
27,
28,
28,
28,
29,
29,
29,
30,
30,
30,
31,
32,
32,
32,
32,
33,
33,
34,
35,
35,
35,
35,
35,
35,
35,
35,
35,
36,
37,
37,
37,
37,
37,
37,
37,
37,
37,
37,
38,
39,
39,
39,
39,
39,
39,
39,
40,
41,
41,
41,
42,
42,
42,
43,
43,
44,
44,
45,
46,
46,
46,
46,
46,
47,
]
tags = [0 for _ in hyp.split()]
for p, t in zip(span_info_parts, targets):
c, start, end = p.split(" ")
start = int(start)
end = int(end)
tags[start:end] = [t for i in range(end - start)]
# get input features for characters
(input_ids, input_mask, segment_ids, labels_mask, labels, _, _,) = bert_example_builder._get_input_features(
hyp=hyp, ref=ref, tags=tags
)
# get input features for words
hyp_with_words = hyp.replace(" ", "").replace("_", " ")
ref_with_words = ref.replace(" ", "").replace("_", " ")
(
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
_,
_,
_,
_,
) = bert_example_builder._get_input_features(hyp=hyp_with_words, ref=ref_with_words, tags=None)
character_pos_to_subword_pos = bert_example_builder._map_characters_to_subwords(input_ids, input_ids_for_subwords)
assert tags == gold_tags
assert input_ids == gold_input_ids
assert input_mask == gold_input_mask
assert segment_ids == gold_segment_ids
assert labels_mask == gold_labels_mask
assert input_ids_for_subwords == gold_input_ids_for_subwords
assert input_mask_for_subwords == gold_input_mask_for_subwords
assert segment_ids_for_subwords == gold_segment_ids_for_subwords
assert character_pos_to_subword_pos == gold_character_pos_to_subword_pos
| NeMo-main | tests/collections/nlp/test_spellchecking_asr_customization.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import torch
from numpy.testing import assert_array_equal
from omegaconf import OmegaConf
from scripts.nlp_language_modeling.build_knn_map_index import build_map, dedup
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
MMapRetrievalIndexedDatasetBuilder,
merge_knn_files,
)
from nemo.collections.nlp.data.language_modeling.megatron.retro_dataset import RETRODataset
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(not HAVE_MEGATRON_CORE, reason="megatron-core is not installed")
class TestRetrievalIndexFiles:
@classmethod
def setup_class(cls):
init_method = 'tcp://'
master_ip = 'localhost'
master_port = '6000'
init_method += master_ip + ':' + master_port
torch.distributed.init_process_group(backend='gloo', world_size=1, rank=0, init_method=init_method)
parallel_state.initialize_model_parallel(1, 1)
@pytest.mark.unit
def test_index(self):
chunk_size = 64
stride = 32
sizes = np.array([128, 256], dtype=np.int32)
dtype = np.int64
itemsize = dtype().itemsize
index_file = '/tmp/test.idx'
try:
with MMapRetrievalIndexedDataset.Index.writer(index_file, dtype, False) as index:
index.write(sizes, chunk_size, stride=stride)
index_load = MMapRetrievalIndexedDataset.Index(index_file)
assert index_load.chunk_size == chunk_size
assert not index_load.retrieval_db
assert np.array_equal(index_load.sizes, sizes)
assert np.array_equal(
index_load._chunk_id_start,
np.array([0, len(range(0, sizes[0] - chunk_size + 1, stride))], dtype=np.int64),
)
add1 = [i * itemsize for i in list(range(0, sizes[0] - chunk_size + 1, stride))]
start = max(add1) + chunk_size * itemsize
add2 = [i * itemsize + start for i in list(range(0, sizes[1] - chunk_size + 1, stride))]
addr = add1 + add2
assert np.array_equal(index_load._chunk_address, np.array(addr, dtype=np.int64))
assert np.array_equal(index_load._pointers, np.array([0, sizes[0] * itemsize], dtype=np.int64))
assert len(index_load._chunk_address) == index_load.num_chunks
finally:
os.remove(index_file)
chunk_size = 64
stride = 64
sizes = np.array([128, 256], dtype=np.int32)
dtype = np.int64
itemsize = dtype().itemsize
index_file = '/tmp/test.idx'
try:
with MMapRetrievalIndexedDataset.Index.writer(index_file, dtype, False) as index:
index.write(sizes, chunk_size, stride=stride)
index_load = MMapRetrievalIndexedDataset.Index(index_file)
assert index_load.chunk_size == chunk_size
assert not index_load.retrieval_db
assert np.array_equal(index_load.sizes, sizes)
assert np.array_equal(
index_load._chunk_id_start,
np.array([0, len(range(0, sizes[0] - chunk_size + 1, stride))], dtype=np.int64),
)
add1 = [i * itemsize for i in list(range(0, sizes[0] - chunk_size + 1, stride))]
start = max(add1) + chunk_size * itemsize
add2 = [i * itemsize + start for i in list(range(0, sizes[1] - chunk_size + 1, stride))]
addr = add1 + add2
assert np.array_equal(index_load._chunk_address, np.array(addr, dtype=np.int64))
assert np.array_equal(index_load._pointers, np.array([0, sizes[0] * itemsize], dtype=np.int64))
assert len(index_load._chunk_address) == index_load.num_chunks
finally:
os.remove(index_file)
@pytest.mark.unit
def test_create_data_index_stride32(self):
chunk_size = 64
pad_id = 0
stride = 32
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(
bin_file, chunk_size, pad_id, False, dtype=np.int64, stride=stride
)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
assert ds.from_chunk_id_to_doc_id(2) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), gt1[64 : 64 + 64])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 3
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
assert ds.from_chunk_id_to_doc_id(6) == 1
assert ds.from_chunk_id_to_doc_id(7) == 1
assert ds.from_chunk_id_to_doc_id(8) == 1
assert ds.from_chunk_id_to_doc_id(9) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(10)
assert np.array_equal(ds.get_chunk(chunk_id), gt2[0:64])
assert np.array_equal(ds.get_chunk(chunk_id + 1), gt2[stride : stride + chunk_size])
assert np.array_equal(ds.get_chunk(chunk_id + 2), gt2[stride * 2 : stride * 2 + chunk_size])
assert np.array_equal(ds.get_chunk(chunk_id + 3), gt2[stride * 3 : stride * 3 + chunk_size])
assert ds.get_chunk_id(1, 64) == 5
assert ds.get_chunk_id(1, 128) == 7
assert ds.get_chunk_id(1, 192) == 9
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_create_data_index(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(bin_file, chunk_size, pad_id, False)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 1
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), gt1[64 : 64 + 64])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(2) == 1
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(6)
assert np.array_equal(ds.get_chunk(chunk_id), gt2[0:64])
assert np.array_equal(ds.get_chunk(chunk_id + 1), gt2[64:128])
assert np.array_equal(ds.get_chunk(chunk_id + 2), gt2[128:192])
assert np.array_equal(ds.get_chunk(chunk_id + 3), gt2[192:256])
assert ds.get_chunk_id(1, 64) == 3
assert ds.get_chunk_id(1, 128) == 4
assert ds.get_chunk_id(1, 192) == 5
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_create_retrieval_data_index_stride32(self):
stride = 32
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt1 = np.pad(sentence1, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt2 = np.pad(sentence2, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(bin_file, chunk_size, pad_id, True, stride=stride)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
assert ds.from_chunk_id_to_doc_id(2) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt1[64 : 64 + 64 * 2])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 3
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
assert ds.from_chunk_id_to_doc_id(6) == 1
assert ds.from_chunk_id_to_doc_id(7) == 1
assert ds.from_chunk_id_to_doc_id(8) == 1
assert ds.from_chunk_id_to_doc_id(9) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(10)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[0 : chunk_size * 2])
assert np.array_equal(ds.get_chunk(chunk_id + 1), gt2[stride : stride + chunk_size * 2])
assert np.array_equal(ds.get_chunk(chunk_id + 2), gt2[stride * 2 : stride * 2 + chunk_size * 2])
assert np.array_equal(ds.get_chunk(chunk_id + 3), gt2[stride * 3 : stride * 3 + chunk_size * 2])
assert ds.get_chunk_id(1, 64) == 5
assert ds.get_chunk_id(1, 128) == 7
assert ds.get_chunk_id(1, 192) == 9
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
chunk_id = ds.get_chunk_id(1, 64)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[64:192])
multi_chunks = ds.get_chunk(slice(0, ds.chunks))
assert np.array_equal(multi_chunks[0], padded_gt1[0 : chunk_size * 2])
assert np.array_equal(multi_chunks[1], padded_gt1[stride : stride + chunk_size * 2])
assert np.array_equal(multi_chunks[2], padded_gt1[stride * 2 : stride * 2 + chunk_size * 2])
assert np.array_equal(multi_chunks[3], padded_gt2[0 : chunk_size * 2])
assert np.array_equal(multi_chunks[4], padded_gt2[stride : stride + chunk_size * 2])
assert np.array_equal(multi_chunks[5], padded_gt2[stride * 2 : stride * 2 + chunk_size * 2])
assert np.array_equal(multi_chunks[6], padded_gt2[stride * 3 : stride * 3 + chunk_size * 2])
assert np.array_equal(multi_chunks[7], padded_gt2[stride * 4 : stride * 4 + chunk_size * 2])
assert np.array_equal(multi_chunks[8], padded_gt2[stride * 5 : stride * 5 + chunk_size * 2])
assert np.array_equal(multi_chunks[9], padded_gt2[stride * 6 : stride * 6 + chunk_size * 2])
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_create_retrieval_data_index(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence1) % chunk_size)
gt1 = np.pad(sentence1, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt1 = np.pad(sentence1, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
padded_size = chunk_size - (len(sentence2) % chunk_size)
gt2 = np.pad(sentence2, (0, padded_size), 'constant', constant_values=pad_id)
padded_gt2 = np.pad(sentence2, (0, padded_size + chunk_size), 'constant', constant_values=pad_id)
data_file = '/tmp/test'
index_file = data_file + '.idx'
bin_file = data_file + '.bin'
try:
builder = MMapRetrievalIndexedDatasetBuilder(bin_file, chunk_size, pad_id, True)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(index_file)
# load the data
ds = MMapRetrievalIndexedDataset(data_file)
assert np.array_equal(ds.get(0), gt1)
assert np.array_equal(ds.get(1), gt2)
fetch1, fetch2 = ds[0:2]
assert np.array_equal(fetch1, gt1)
assert np.array_equal(fetch2, gt2)
chunk_id = ds.get_chunk_id(0, 64)
assert chunk_id == 1
assert ds.from_chunk_id_to_doc_id(0) == 0
assert ds.from_chunk_id_to_doc_id(1) == 0
with pytest.raises(ValueError):
ds.get_chunk_id(0, 128)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt1[64 : 64 + 64 * 2])
chunk_id = ds.get_chunk_id(1, 0)
assert chunk_id == 2
assert ds.from_chunk_id_to_doc_id(2) == 1
assert ds.from_chunk_id_to_doc_id(3) == 1
assert ds.from_chunk_id_to_doc_id(4) == 1
assert ds.from_chunk_id_to_doc_id(5) == 1
with pytest.raises(ValueError):
ds.from_chunk_id_to_doc_id(6)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[0:128])
assert np.array_equal(ds.get_chunk(chunk_id + 1), padded_gt2[64:192])
assert np.array_equal(ds.get_chunk(chunk_id + 2), padded_gt2[128:256])
assert np.array_equal(ds.get_chunk(chunk_id + 3), padded_gt2[192:320])
assert ds.get_chunk_id(1, 64) == 3
assert ds.get_chunk_id(1, 128) == 4
assert ds.get_chunk_id(1, 192) == 5
with pytest.raises(ValueError):
ds.get_chunk_id(0, 256)
chunk_id = ds.get_chunk_id(1, 64)
assert np.array_equal(ds.get_chunk(chunk_id), padded_gt2[64:192])
multi_chunks = ds.get_chunk(slice(0, ds.chunks))
assert np.array_equal(multi_chunks[0], padded_gt1[0:128])
assert np.array_equal(multi_chunks[1], padded_gt1[64 : 64 + 128])
assert np.array_equal(multi_chunks[2], padded_gt2[0:128])
assert np.array_equal(multi_chunks[3], padded_gt2[64 : 64 + 128])
assert np.array_equal(multi_chunks[4], padded_gt2[128 : 128 + 128])
assert np.array_equal(multi_chunks[5], padded_gt2[192 : 192 + 128])
finally:
os.remove(index_file)
os.remove(bin_file)
@pytest.mark.unit
def test_knn_index(self):
data_file = '/tmp/test'
index_file = data_file + '.idx'
K = 8
index_files = [f'{data_file}_{i}.idx' for i in range(3)]
merged_file = '/tmp/merged.idx'
try:
with KNNIndex.writer(index_file, K) as w:
map_np0 = np.random.randint(0, 100, (50, K))
w.write(map_np0)
map_np1 = np.random.randint(0, 100, (50, K))
w.write(map_np1)
map_np2 = np.random.randint(0, 100, (50, K))
w.write(map_np2)
f = KNNIndex(index_file)
assert f.K == K
assert f.len == map_np0.shape[0] + map_np1.shape[0] + map_np2.shape[0]
assert np.array_equal(map_np0, f.knn_map[:50])
assert np.array_equal(map_np1, f.knn_map[50:100])
assert np.array_equal(map_np2, f.knn_map[100:])
assert np.array_equal(f.get_KNN_chunk_ids(5), map_np0[5])
assert f.chunk_start_id == 0
assert f.chunk_end_id == f.len
with KNNIndex.writer(index_file, K, 100) as w:
map_np0 = np.random.randint(0, 100, (50, K))
w.write(map_np0)
map_np1 = np.random.randint(0, 100, (50, K))
w.write(map_np1)
map_np2 = np.random.randint(0, 100, (50, K))
w.write(map_np2)
f = KNNIndex(index_file)
assert f.K == K
assert f.len == map_np0.shape[0] + map_np1.shape[0] + map_np2.shape[0]
assert np.array_equal(map_np0, f.knn_map[:50])
assert np.array_equal(map_np1, f.knn_map[50:100])
assert np.array_equal(map_np2, f.knn_map[100:])
assert np.array_equal(f.get_KNN_chunk_ids(5 + 100), map_np0[5])
assert f.chunk_start_id == 100
assert f.chunk_end_id == f.len + 100
# test multiple sharding indices
inputs = []
start = 0
for i in range(3):
with KNNIndex.writer(index_files[i], K, offset=start) as w:
map_np0 = np.random.randint(0, 100, (50, K))
inputs.append(map_np0)
w.write(map_np0)
map_np1 = np.random.randint(0, 100, (50, K))
inputs.append(map_np1)
w.write(map_np1)
f = KNNIndex(index_files[i])
start += f.len
merge_knn_files(index_files, merged_file)
f = KNNIndex(merged_file)
input_array = np.vstack(inputs)
assert f.len == 100 * 3
for i in range(300):
assert np.array_equal(f.get_KNN_chunk_ids(i), input_array[i])
assert f.chunk_start_id == 0
assert f.chunk_end_id == f.len
assert f.K == K
finally:
os.remove(index_file)
for i in range(3):
os.remove(index_files[i])
os.remove(merged_file)
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_MEGATRON_CORE, reason="megatron-core is not installed")
def test_retro_dataset(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
sentence3 = torch.arange(0, 300, 2, dtype=torch.int64)
sentence4 = torch.arange(1, 400, 2, dtype=torch.int64)
# test the case that
# training data and retrieval data are different
data_file = '/tmp/test_data'
data_index_file = data_file + '.idx'
data_bin_file = data_file + '.bin'
db_file = '/tmp/test_db_data'
db_index_file = db_file + '.idx'
db_bin_file = db_file + '.bin'
K = 8
map_index_file = '/tmp/test_map.idx'
index_path = '/tmp'
cfg = OmegaConf.create({'data': {"index_mapping_dir": index_path}})
# dummy tokenizer
class Tokenizer:
eos_id = 1
pad_id = 0
tokenizer = Tokenizer()
num_samples = 100
seq_len = 192
name = 'test'
data_prefix = 'pref'
seed = 1
_filename = index_path + '/' + data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_len)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
try:
builder = MMapRetrievalIndexedDatasetBuilder(data_bin_file, chunk_size, pad_id, False)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(data_index_file)
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(data_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(data_bin_file)
os.remove(data_index_file)
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
# test the case that
# training data and retrieval data are the same
try:
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(db_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_MEGATRON_CORE, reason="megatron-core is not installed")
def test_retro_dataset_stride32(self):
chunk_size = 64
pad_id = 0
sentence1 = torch.arange(0, 200, 2, dtype=torch.int64)
sentence2 = torch.arange(1, 500, 2, dtype=torch.int64)
sentence3 = torch.arange(0, 300, 2, dtype=torch.int64)
sentence4 = torch.arange(1, 400, 2, dtype=torch.int64)
# test the case that
# training data and retrieval data are different
data_file = '/tmp/test_data'
data_index_file = data_file + '.idx'
data_bin_file = data_file + '.bin'
db_file = '/tmp/test_db_data'
db_index_file = db_file + '.idx'
db_bin_file = db_file + '.bin'
K = 8
map_index_file = '/tmp/test_map.idx'
index_path = '/tmp'
cfg = OmegaConf.create({'data': {"index_mapping_dir": index_path}})
# dummy tokenizer
class Tokenizer:
eos_id = 1
pad_id = 0
tokenizer = Tokenizer()
num_samples = 100
stride = 32
seq_len = 192
name = 'test'
data_prefix = 'pref'
seed = 1
_filename = index_path + '/' + data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_len)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
try:
builder = MMapRetrievalIndexedDatasetBuilder(data_bin_file, chunk_size, pad_id, False, stride=32)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.finalize(data_index_file)
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True, stride=32)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(data_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(data_bin_file)
os.remove(data_index_file)
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
# test the case that
# training data and retrieval data are the same
try:
builder = MMapRetrievalIndexedDatasetBuilder(db_bin_file, chunk_size, pad_id, True, stride=32)
builder.add_item(sentence1)
builder.add_item(sentence2)
builder.add_item(sentence3)
builder.add_item(sentence4)
builder.finalize(db_index_file)
# load the data
data_index = MMapRetrievalIndexedDataset(db_file)
db_index = MMapRetrievalIndexedDataset(db_file)
with KNNIndex.writer(map_index_file, K) as w:
map_np = np.random.randint(-3, db_index.chunks, (data_index.chunks, K))
w.write(map_np)
map_index = KNNIndex(map_index_file)
documents = np.arange(0, data_index.sizes.shape[0])
d = RETRODataset(
cfg,
None,
tokenizer,
name,
data_prefix,
documents,
data_index,
num_samples,
seq_len,
seed,
map_index,
db_index,
)
for i in range(len(d)):
record = d[i]
assert record['tokens'].shape[0] == seq_len
assert record['labels'].shape[0] == seq_len
assert record['retrieved_ids'].shape[0] == seq_len // chunk_size
assert record['retrieved_ids'].shape[1] == K
assert record['retrieved_ids'].shape[2] == chunk_size * 2
assert record['tokens_mask'].shape[0] == seq_len
finally:
os.remove(db_bin_file)
os.remove(db_index_file)
os.remove(map_index_file)
os.remove(doc_idx_filename)
os.remove(sample_idx_filename)
os.remove(shuffle_idx_filename)
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_MEGATRON_CORE, reason="megatron-core is not installed")
def test_dedup(self):
total = 1000
id_start = np.array([0, 100, 200, 300, 500, 900])
beg = 30
end = 210
chunk_id_to_doc_id_map = np.zeros((end - beg, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, total, beg, end)
for i in range(30, 100):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[0:2])
for i in range(100, 200):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[1:3])
for i in range(200, 210):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[2:4])
beg = 5
end = 100
chunk_id_to_doc_id_map = np.zeros((end - beg, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, total, beg, end)
for i in range(beg, end):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[0:2])
beg = 100
end = 200
chunk_id_to_doc_id_map = np.zeros((end - beg, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, total, beg, end)
for i in range(beg, end):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[1:3])
beg = 900
end = 1000
chunk_id_to_doc_id_map = np.zeros((end - beg, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, total, beg, end)
for i in range(beg, end):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], np.array([900, 1000]))
beg = 150
end = 250
chunk_id_to_doc_id_map = np.zeros((end - beg, 2), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, total, beg, end)
for i in range(beg, 200):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[1:3])
for i in range(200, end):
assert_array_equal(chunk_id_to_doc_id_map[i - beg], id_start[2:4])
I = np.arange(1000)[None, :]
tmp_neighbors = np.ones_like(I) * -1
with pytest.raises(ValueError):
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, 0, beg)
I = np.arange(1000)[None, :]
tmp_neighbors = np.ones_like(I) * -1
with pytest.raises(ValueError):
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, 250, beg)
for i in range(beg, 200):
I = np.arange(1000)[None, :]
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, i, beg)
gt = np.array(list(range(100)) + list(range(200, 1000)) + ([-1] * 100))
assert_array_equal(tmp_neighbors[0], gt)
for i in range(200, 250):
I = np.arange(1000)[None, :]
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, i, beg)
gt = np.array(list(range(200)) + list(range(300, 1000)) + ([-1] * 100))
assert_array_equal(tmp_neighbors[0], gt)
I = np.arange(1000)[None, :]
I = np.repeat(I, 70, axis=0)
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, 180, beg)
gt0 = np.array(list(range(100)) + list(range(200, 1000)) + ([-1] * 100))
gt1 = np.array(list(range(200)) + list(range(300, 1000)) + ([-1] * 100))
for i in range(20):
assert_array_equal(tmp_neighbors[i], gt0)
for i in range(20, 70):
assert_array_equal(tmp_neighbors[i], gt1)
| NeMo-main | tests/collections/nlp/test_indexed_retrieval_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import numpy as np
import pytest
from nemo.collections.common.tokenizers.column_coder import ColumnCodes
from nemo.collections.common.tokenizers.tabular_tokenizer import TabularTokenizer
class TestTabularTokenizer:
def setup_method(self, test_method):
column_configs = [
{
"name": "col_a",
"code_type": "float",
"args": {"code_len": 4, "base": 16, "fillall": False, "hasnan": True, "transform": 'yeo-johnson'},
},
{
"name": "col_b",
"code_type": "float",
"args": {"code_len": 4, "base": 177, "fillall": True, "hasnan": True, "transform": 'quantile'},
},
{
"name": "col_c",
"code_type": "int",
"args": {"code_len": 3, "base": 12, "fillall": True, "hasnan": True},
},
{"name": "col_d", "code_type": "category",},
]
example_arrays = {}
np.random.seed(1234)
array = np.random.random(100)
example_arrays['col_a'] = array
array = np.random.random(100)
example_arrays['col_b'] = array
array = np.random.randint(3, 1000, 100)
example_arrays['col_c'] = array
ALPHABET = np.array(list(string.ascii_lowercase + ' '))
array = np.char.add(np.random.choice(ALPHABET, 1000), np.random.choice(ALPHABET, 1000))
example_arrays['col_d'] = array
self.cc = ColumnCodes.get_column_codes(column_configs, example_arrays)
@pytest.mark.unit
def test_tabular_tokenizer(self):
tab = TabularTokenizer(self.cc, delimiter=',')
text = "0.323, 0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 10
assert tab.eod == 1351
assert tab.eor == 1352
assert tab.num_columns == 4
assert self.cc.vocab_size == 1351
assert tab.vocab_size == 1353
r = tab.text_to_ids(text)
assert (sum(self.cc.sizes) + 1) * 2 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1352])
)
assert np.array_equal(
np.array(r[13:]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 7
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 2 == len(r)
assert np.array_equal(np.array(r[0:2]), np.array([1313, 1352]))
assert np.array_equal(
np.array(r[2:15]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == 'xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 5
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 == len(r)
assert np.array_equal(
np.array(r[0:13]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.3230,0.0999998,232,xy<|endoftext|>'
text = "232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 8
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 5 == len(r)
assert np.array_equal(np.array(r[0:5]), np.array([787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[5:18]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
text = "0.1, 232, xy\n0.323, 0.1, 232, xy<|endoftext|>"
r = tab.text_to_tokens(text)
assert len(r) == 9
r = tab.text_to_ids(text)
assert sum(self.cc.sizes) + 1 + 9 == len(r)
assert np.array_equal(np.array(r[0:9]), np.array([584, 417, 305, 76, 787, 780, 773, 1313, 1352]))
assert np.array_equal(
np.array(r[9:22]), np.array([49, 32, 29, 15, 584, 417, 305, 76, 787, 780, 773, 1313, 1351])
)
reversed_text = tab.ids_to_text(r)
assert reversed_text == '0.0999998,232,xy\n0.3230,0.0999998,232,xy<|endoftext|>'
| NeMo-main | tests/collections/nlp/test_tabular_tokenizer.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from pydoc import doc
import pytest
from nemo.collections.nlp.data.question_answering_squad.qa_dataset import SquadDataset
from nemo.collections.nlp.data.question_answering_squad.qa_squad_processing import (
_get_tokens,
exact_match_score,
f1_score,
)
@pytest.mark.unit
def test_get_tokens():
sentence = 'I am happy'
tokens = ['i', 'am', 'happy']
assert tokens == _get_tokens(sentence)
sentence = 'I am a person'
tokens = ['i', 'am', 'person']
assert tokens == _get_tokens(sentence)
sentence = 'I am a person.'
tokens = ['i', 'am', 'person']
assert tokens == _get_tokens(sentence)
@pytest.mark.unit
def test_f1_score():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
f1 = f1_score(generated_field, ground_truth_field)
assert f1 == 0.75
generated_field = ''
ground_truth_field = 'That'
f1 = f1_score(generated_field, ground_truth_field)
assert f1 == 0
@pytest.mark.unit
def test_exact_match_score():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
em = exact_match_score(generated_field, ground_truth_field)
assert em == 0
generated_field = 'That is so good!'
ground_truth_field = 'That is so good.'
em = exact_match_score(generated_field, ground_truth_field)
assert em == 1
generated_field = 'That is so good'
ground_truth_field = 'that is so good'
em = exact_match_score(generated_field, ground_truth_field)
assert em == 1
@pytest.mark.unit
def test_split_into_words():
text = 'hi yo'
char_to_word_offset = [0, 0, 0, 1, 1]
doc_tokens = ["hi", "yo"]
output = SquadDataset.split_into_words(text)
assert output[0] == doc_tokens
assert output[1] == char_to_word_offset
text = 'i am good'
char_to_word_offset = [0, 0, 1, 1, 1, 2, 2, 2, 2]
doc_tokens = ["i", "am", 'good']
output = SquadDataset.split_into_words(text)
assert output[0] == doc_tokens
assert output[1] == char_to_word_offset
@pytest.mark.unit
def test_get_doc_spans():
all_doc_tokens = ['a'] * 15
max_tokens_for_doc = 10
doc_stride = 5
doc_spans = SquadDataset.get_docspans(all_doc_tokens, max_tokens_for_doc, doc_stride)
assert len(doc_spans) == 2
assert doc_spans[0].start == 0
assert doc_spans[0].length == 10
assert doc_spans[1].start == 5
assert doc_spans[1].length == 10
@pytest.mark.unit
def test_get_average_dist_to_tok_start_and_end():
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_span = _DocSpan(start=0, length=5)
tok_start_position = 1
tok_end_position = 3
assert 2 == SquadDataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
doc_span = _DocSpan(start=5, length=5)
tok_start_position = 1
tok_end_position = 2
assert 6 == SquadDataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
doc_span = _DocSpan(start=5, length=4)
tok_start_position = 1
tok_end_position = 2
assert 5 == SquadDataset.get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position)
@pytest.mark.unit
def test_keep_relevant_docspans():
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'all'
assert doc_spans == SquadDataset.keep_relevant_docspans(doc_spans, tok_start_position, tok_end_position, mode)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = -1
tok_end_position = -1
mode = 'only_positive'
expected_doc_spans = []
assert expected_doc_spans == SquadDataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'only_positive'
expected_doc_spans = [_DocSpan(start=0, length=5), _DocSpan(start=1, length=5)]
assert expected_doc_spans == SquadDataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
doc_spans = [_DocSpan(start=start, length=5) for start in range(15)]
tok_start_position = 1
tok_end_position = 2
mode = 'limited_negative'
expected_doc_spans = [_DocSpan(start=start, length=5) for start in range(10)]
assert expected_doc_spans == SquadDataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, mode
)
| NeMo-main | tests/collections/nlp/test_question_answering.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
from shutil import rmtree
from unittest import TestCase
import pytest
import pytorch_lightning as pl
from omegaconf import OmegaConf
import nemo.collections.nlp.models as models
def get_metrics(data_dir, model):
trainer = pl.Trainer(devices=[0], accelerator='gpu')
model.set_trainer(trainer)
model.update_data_dir(data_dir)
test_ds = OmegaConf.create(
{
'text_file': 'text_dev.txt',
'labels_file': 'labels_dev.txt',
'shuffle': False,
'num_samples': -1,
'batch_size': 8,
}
)
model._cfg.dataset.use_cache = False
model.setup_test_data(test_data_config=test_ds)
metrics = trainer.test(model)[0]
return metrics
def get_metrics_new_format(data_dir, model):
trainer = pl.Trainer(devices=[0], accelerator='gpu')
model.set_trainer(trainer)
test_ds = OmegaConf.create(
{
'use_tarred_dataset': False,
'ds_item': data_dir,
'text_file': 'text_dev.txt',
'labels_file': 'labels_dev.txt',
'shuffle': False,
'num_samples': -1,
'tokens_in_batch': 512,
'use_cache': False,
}
)
model.setup_test_data(test_data_config=test_ds)
metrics = trainer.test(model)[0]
return metrics
def data_exists(data_dir):
return os.path.exists(data_dir)
class TestPretrainedModelPerformance:
@pytest.mark.with_downloads()
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(
not data_exists('/home/TestData/nlp/token_classification_punctuation/fisher'), reason='Not a Jenkins machine'
)
def test_punct_capit_with_bert(self):
data_dir = '/home/TestData/nlp/token_classification_punctuation/fisher'
model = models.PunctuationCapitalizationModel.from_pretrained("punctuation_en_bert")
metrics = get_metrics_new_format(data_dir, model)
assert abs(metrics['test_punct_precision'] - 52.3024) < 0.001
assert abs(metrics['test_punct_recall'] - 58.9220) < 0.001
assert abs(metrics['test_punct_f1'] - 53.2976) < 0.001
assert abs(metrics['test_capit_precision'] - 87.0707) < 0.001
assert abs(metrics['test_capit_recall'] - 87.0707) < 0.001
assert abs(metrics['test_capit_f1'] - 87.0707) < 0.001
assert int(model.metrics['test']['punct_class_report'][0].total_examples) == 128
preds_512 = model.add_punctuation_capitalization(['what can i do for you today'], max_seq_length=512)[0]
assert preds_512 == 'What can I do for you today?'
preds_5 = model.add_punctuation_capitalization(['what can i do for you today'], max_seq_length=5, margin=0)[0]
assert preds_5 == 'What can I? Do for you. Today.'
preds_5_step_1 = model.add_punctuation_capitalization(
['what can i do for you today'], max_seq_length=5, margin=0, step=1
)[0]
assert preds_5_step_1 == 'What Can I do for you today.'
preds_6_step_1_margin_6 = model.add_punctuation_capitalization(
['what can i do for you today'], max_seq_length=6, margin=1, step=1
)[0]
assert preds_6_step_1_margin_6 == 'What can I do for you today.'
@pytest.mark.with_downloads()
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(
not data_exists('/home/TestData/nlp/token_classification_punctuation/fisher'), reason='Not a Jenkins machine'
)
def test_punct_capit_with_distilbert(self):
data_dir = '/home/TestData/nlp/token_classification_punctuation/fisher'
model = models.PunctuationCapitalizationModel.from_pretrained("punctuation_en_distilbert")
metrics = get_metrics_new_format(data_dir, model)
assert abs(metrics['test_punct_precision'] - 53.0826) < 0.001
assert abs(metrics['test_punct_recall'] - 56.2905) < 0.001
assert abs(metrics['test_punct_f1'] - 52.4225) < 0.001
assert int(model.metrics['test']['punct_class_report'][0].total_examples) == 128
@pytest.mark.with_downloads()
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.skipif(
not data_exists('/home/TestData/nlp/token_classification_punctuation/gmb'), reason='Not a Jenkins machine'
)
def test_ner_model(self):
data_dir = '/home/TestData/nlp/token_classification_punctuation/gmb'
model = models.TokenClassificationModel.from_pretrained("ner_en_bert")
metrics = get_metrics(data_dir, model)
assert abs(metrics['precision'] - 96.0937) < 0.001
assert abs(metrics['recall'] - 96.0146) < 0.001
assert abs(metrics['f1'] - 95.6076) < 0.001
assert int(model.classification_report.total_examples) == 202
| NeMo-main | tests/collections/nlp/test_pretrained_models_performance.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import random
import pytest
from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_chat_dataset import GPTSFTChatDataset
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
TOKENIZER_FILE_43B = '/home/TestData/nlp/megatron_sft/tokenizer.model'
MERGE_FILE = '/home/TestData/nlp/megatron_sft/merges.txt'
VOCAB_FILE = '/home/TestData/nlp/megatron_sft/vocab.json'
def ids_to_text(tokenizer, ids):
tokens = tokenizer.ids_to_tokens(ids)
text = tokenizer.tokens_to_text(tokens)
return text
def get_random_sentence():
nouns = ("puppy", "car", "rabbit", "girl", "monkey")
verbs = ("runs", "hits", "jumps", "drives", "barfs")
adv = ("crazily.", "dutifully.", "foolishly.", "merrily.", "occasionally.")
num1 = random.randrange(0, 5)
num2 = random.randrange(0, 5)
num3 = random.randrange(0, 5)
return nouns[num1] + ' ' + verbs[num2] + ' ' + adv[num3]
def get_random_label():
keys = ["quality", "toxicity", "humor", "creativity", "violence", "helpfulness", "not_appropriate"]
values = [random.randrange(0, 5) for i in range(len(keys))]
return ",".join([k + ":" + str(v) for k, v in zip(keys, values)])
def create_data_points(mask_user, turn_num, records, temp_file, t2v, label=True):
data_points = []
with open(temp_file, 'w', encoding='utf-8') as f:
for r in range(records):
record = {}
record['system'] = 'a chat\n\n'
record['type'] = 'TEXT_TO_VALUE' if t2v else 'VALUE_TO_TEXT'
record['mask'] = 'User' if mask_user else 'Assistant'
turns = []
record['conversations'] = turns
for i in range(turn_num):
turn = {}
turn['from'] = 'User' if i % 2 == 0 else 'Assistant'
turn['value'] = get_random_sentence()
if label:
turn['label'] = get_random_label()
turns.append(turn)
f.write(json.dumps(record, ensure_ascii=False) + '\n')
data_points.append(record)
return data_points
class TestGPTSFTChatDataset:
@classmethod
def setup_class(cls):
pass
@pytest.mark.unit
def test_43B_tokenizer_mask_user(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(True, turn_num, records, temp_file, t2v=False)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(1, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_43B_tokenizer_mask_assistant(self):
random.seed(3)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(False, turn_num, records, temp_file, t2v=False)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(2, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_43B_tokenizer_mask_user_t2v(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(True, turn_num, records, temp_file, t2v=True)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(1, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['label'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_43B_tokenizer_mask_assistant_t2v(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(False, turn_num, records, temp_file, t2v=True)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(0, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['label'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_mpt_tokenizer_mask_user(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(True, turn_num, records, temp_file, t2v=False)
tokenizer = get_nmt_tokenizer(
library='huggingface', model_name='gpt2', merges_file=MERGE_FILE, vocab_file=VOCAB_FILE, use_fast=True
)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<extra_id_0>', '<extra_id_1>', '<extra_id_2>']}
)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = ids_to_text(tokenizer, input_ids[mask].tolist())
expected_text = ''
for j in range(1, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_mpt_tokenizer_mask_assistant(self):
random.seed(3)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(False, turn_num, records, temp_file, t2v=False)
tokenizer = get_nmt_tokenizer(
library='huggingface', model_name='gpt2', merges_file=MERGE_FILE, vocab_file=VOCAB_FILE, use_fast=True
)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<extra_id_0>', '<extra_id_1>', '<extra_id_2>']}
)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = ids_to_text(tokenizer, input_ids[mask].tolist())
expected_text = ''
for j in range(2, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_mpt_tokenizer_mask_user_t2v(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(True, turn_num, records, temp_file, t2v=True)
tokenizer = get_nmt_tokenizer(
library='huggingface', model_name='gpt2', merges_file=MERGE_FILE, vocab_file=VOCAB_FILE, use_fast=True
)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<extra_id_0>', '<extra_id_1>', '<extra_id_2>']}
)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = ids_to_text(tokenizer, input_ids[mask].tolist())
expected_text = ''
for j in range(1, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['label'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_mpt_tokenizer_mask_assistant_t2v(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(False, turn_num, records, temp_file, t2v=True)
tokenizer = get_nmt_tokenizer(
library='huggingface', model_name='gpt2', merges_file=MERGE_FILE, vocab_file=VOCAB_FILE, use_fast=True
)
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<extra_id_0>', '<extra_id_1>', '<extra_id_2>']}
)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = ids_to_text(tokenizer, input_ids[mask].tolist())
expected_text = ''
for j in range(0, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['label'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_43B_tokenizer_mask_user_nolabel(self):
random.seed(5)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(True, turn_num, records, temp_file, t2v=False, label=False)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(1, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
@pytest.mark.unit
def test_43B_tokenizer_mask_assistant_nolabel(self):
random.seed(3)
temp_file = '/tmp/test_file.jsonl'
turn_num = 5
records = 5
try:
data_points = create_data_points(False, turn_num, records, temp_file, t2v=False, label=False)
tokenizer = get_nmt_tokenizer(library='sentencepiece', tokenizer_model=TOKENIZER_FILE_43B)
d = GPTSFTChatDataset(temp_file, tokenizer, 4096, 1, index_mapping_dir='/tmp/', hf_dataset=True)
for i in range(len(d)):
result = d[i]
input_ids = result['input_ids']
mask = result['mask']
text = tokenizer.ids_to_text(input_ids[mask].tolist())
expected_text = ''
for j in range(2, turn_num, 2):
expected_text += data_points[i]['conversations'][j]['value'] + '\n' + '<extra_id_1>'
assert text == expected_text
finally:
os.remove(temp_file)
| NeMo-main | tests/collections/nlp/test_chat_sft_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
import numpy as np
import pytest
from nemo.collections.common.tokenizers.column_coder import CategoryCode, ColumnCodes, FloatCode, IntCode
class TestColumnCoder:
@pytest.mark.unit
def test_float(self):
np.random.seed(1234)
series = np.random.random(100)
float_coder = FloatCode('t', 5, 0, False, 10, True)
float_coder.compute_code(data_series=series)
r = float_coder.encode('0.323')
assert np.array_equal(np.array(r), np.array([40, 32, 25, 17, 3]))
decoded = float_coder.decode(r)
assert decoded == '0.32290'
r = float_coder.encode('1.323')
assert np.array_equal(np.array(r), np.array([41, 30, 20, 10, 0]))
decoded = float_coder.decode(r)
assert decoded == '0.99208'
r = float_coder.encode('nan')
assert np.array_equal(np.array(r), np.array([42, 39, 29, 19, 9]))
decoded = float_coder.decode(r)
assert decoded == 'nan'
float_coder = FloatCode('t', 5, 0, False, 10, True, 'yeo-johnson')
float_coder.compute_code(data_series=series)
r = float_coder.encode('0.323')
assert np.array_equal(np.array(r), np.array([41, 30, 25, 14, 5]))
decoded = float_coder.decode(r)
assert decoded == '0.32300'
r = float_coder.encode('1.323')
assert np.array_equal(np.array(r), np.array([43, 39, 21, 16, 3]))
decoded = float_coder.decode(r)
assert decoded == '1.08064'
r = float_coder.encode('nan')
assert np.array_equal(np.array(r), np.array([44, 39, 29, 19, 9]))
decoded = float_coder.decode(r)
assert decoded == 'nan'
float_coder = FloatCode('t', 5, 0, False, 10, True, 'robust')
float_coder.compute_code(data_series=series)
r = float_coder.encode('0.323')
assert np.array_equal(np.array(r), np.array([40, 37, 24, 10, 8]))
decoded = float_coder.decode(r)
assert decoded == '0.32299'
r = float_coder.encode('1.323')
assert np.array_equal(np.array(r), np.array([42, 30, 27, 19, 3]))
decoded = float_coder.decode(r)
assert decoded == '0.89536'
r = float_coder.encode('nan')
assert np.array_equal(np.array(r), np.array([43, 39, 29, 19, 9]))
decoded = float_coder.decode(r)
assert decoded == 'nan'
float_coder = FloatCode('t', 5, 0, True, 377, True)
float_coder.compute_code(data_series=series)
r = float_coder.encode('0.323')
assert np.array_equal(np.array(r), np.array([1508, 1228, 765, 663, 194]))
decoded = float_coder.decode(r)
assert decoded == '0.32299999994'
r = float_coder.encode('nan')
assert np.array_equal(np.array(r), np.array([1885, 1507, 1130, 753, 376]))
decoded = float_coder.decode(r)
assert decoded == 'nan'
assert float_coder.end_id == 1886
assert float_coder.code_range[0] == (1508, 1886)
assert float_coder.code_range[1] == (1131, 1508)
assert float_coder.code_range[2] == (754, 1131)
assert float_coder.code_range[3] == (377, 754)
assert float_coder.code_range[4] == (0, 377)
float_coder = FloatCode('t', 5, 0, True, 377, False)
float_coder.compute_code(data_series=series)
assert float_coder.end_id == 1885
assert float_coder.code_range[0] == (1508, 1885)
assert float_coder.code_range[1] == (1131, 1508)
assert float_coder.code_range[2] == (754, 1131)
assert float_coder.code_range[3] == (377, 754)
assert float_coder.code_range[4] == (0, 377)
try:
float_coder.encode('nan')
except ValueError as e:
assert str(e) == 'colum t cannot handle nan, please set hasnan=True'
@pytest.mark.unit
def test_int(self):
np.random.seed(1234)
array = np.random.randint(3, 1000, 100)
int_coder = IntCode('i', 3, 0, False, 16, True)
int_coder.compute_code(array)
r = int_coder.encode('232')
assert np.array_equal(np.array(r), np.array([32, 30, 2]))
decoded = int_coder.decode(r)
assert decoded == '232'
r = int_coder.encode('nan')
assert np.array_equal(np.array(r), np.array([36, 31, 15]))
decoded = int_coder.decode(r)
assert decoded == 'nan'
@pytest.mark.unit
def test_category(self):
np.random.seed(1234)
ALPHABET = np.array(list(string.ascii_lowercase + ' '))
array = np.char.add(np.random.choice(ALPHABET, 1000), np.random.choice(ALPHABET, 1000))
int_coder = CategoryCode('c', 0)
int_coder.compute_code(array)
r = int_coder.encode('xy')
assert np.array_equal(np.array(r), np.array([509]))
decoded = int_coder.decode(r)
assert decoded == 'xy'
@pytest.mark.unit
def test_column_coder(self):
column_configs = [
{
"name": "col_a",
"code_type": "float",
"args": {"code_len": 4, "base": 16, "fillall": False, "hasnan": True, "transform": 'yeo-johnson'},
},
{
"name": "col_b",
"code_type": "float",
"args": {"code_len": 4, "base": 177, "fillall": True, "hasnan": True, "transform": 'quantile'},
},
{
"name": "col_c",
"code_type": "int",
"args": {"code_len": 3, "base": 12, "fillall": True, "hasnan": True},
},
{"name": "col_d", "code_type": "category",},
]
example_arrays = {}
np.random.seed(1234)
array = np.random.random(100)
example_arrays['col_a'] = array
array = np.random.random(100)
example_arrays['col_b'] = array
array = np.random.randint(3, 1000, 100)
example_arrays['col_c'] = array
ALPHABET = np.array(list(string.ascii_lowercase + ' '))
array = np.char.add(np.random.choice(ALPHABET, 1000), np.random.choice(ALPHABET, 1000))
example_arrays['col_d'] = array
cc = ColumnCodes.get_column_codes(column_configs, example_arrays)
rr = cc.encode('col_a', '0.323')
assert np.array_equal(np.array(rr), np.array([49, 32, 29, 15]))
decoded = cc.decode('col_a', rr)
assert decoded == '0.3230'
rr = cc.encode('col_b', '0.323')
assert np.array_equal(np.array(rr), np.array([584, 457, 235, 110]))
decoded = cc.decode('col_b', rr)
assert decoded == '0.3229999'
rr = cc.encode('col_c', '232')
assert np.array_equal(np.array(rr), np.array([787, 780, 773]))
decoded = cc.decode('col_c', rr)
assert decoded == '232'
rr = cc.encode('col_d', 'xy')
assert np.array_equal(np.array(rr), np.array([1313]))
decoded = cc.decode('col_d', rr)
assert decoded == 'xy'
# assert cc.vocab_size == 1343
| NeMo-main | tests/collections/nlp/test_num_encoding.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
import torch
from omegaconf import OmegaConf
import nemo.collections.nlp as nemo_nlp
from nemo.collections.common.tokenizers.huggingface.auto_tokenizer import AutoTokenizer
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
def do_export(model, name: str):
with tempfile.TemporaryDirectory() as tmpdir:
# Generate filename in the temporary directory.
tmp_file_name = os.path.join(tmpdir, name + '.onnx')
# Test export.
if torch.cuda.is_available():
model = model.cuda()
model.export(tmp_file_name)
class TestHuggingFace:
config = {"language_model": {"pretrained_model_name": ""}, "tokenizer": {}}
omega_conf = OmegaConf.create(config)
@pytest.mark.unit
def test_list_pretrained_models(self):
pretrained_lm_models = nemo_nlp.modules.get_pretrained_lm_models_list()
assert len(pretrained_lm_models) > 0
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_get_pretrained_bert_model(self):
self.omega_conf.language_model.pretrained_model_name = 'bert-base-uncased'
model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
assert isinstance(model, nemo_nlp.modules.BertEncoder)
do_export(model, "bert-base-uncased")
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_get_pretrained_distilbert_model(self):
self.omega_conf.language_model.pretrained_model_name = 'distilbert-base-uncased'
model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
assert isinstance(model, nemo_nlp.modules.DistilBertEncoder)
do_export(model, "distilbert-base-uncased")
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_get_pretrained_roberta_model(self):
self.omega_conf.language_model.pretrained_model_name = 'roberta-base'
model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
assert isinstance(model, nemo_nlp.modules.RobertaEncoder)
do_export(model, "roberta-base-uncased")
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_get_pretrained_albert_model(self):
self.omega_conf.language_model.pretrained_model_name = 'albert-base-v1'
model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
assert isinstance(model, nemo_nlp.modules.AlbertEncoder)
do_export(model, "albert-base-v1")
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_get_pretrained_chinese_bert_wwm_model(self):
model_name = 'hfl/chinese-bert-wwm'
self.omega_conf.language_model.pretrained_model_name = model_name
model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
assert isinstance(model, nemo_nlp.modules.BertModule)
tokenizer = get_tokenizer(tokenizer_name=model_name)
assert isinstance(tokenizer, AutoTokenizer)
# model is not on HF anymore
# @pytest.mark.with_downloads()
# @pytest.mark.unit
# def test_get_pretrained_arabic_model(self):
# model_name = 'asafaya/bert-base-arabic'
# self.omega_conf.language_model.pretrained_model_name = model_name
# model = nemo_nlp.modules.get_lm_model(cfg=self.omega_conf)
# assert isinstance(model, nemo_nlp.modules.BertModule)
# tokenizer = get_tokenizer(tokenizer_name=model_name)
# assert isinstance(tokenizer, AutoTokenizer)
| NeMo-main | tests/collections/nlp/test_huggingface.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.nlp.data.dialogue.data_processor.assistant_data_processor import DialogueAssistantDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.data_processor.sgd_data_processor import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.dataset.dialogue_gpt_classification_dataset import (
DialogueGPTClassificationDataset,
)
from nemo.collections.nlp.data.dialogue.dataset.dialogue_s2s_generation_dataset import DialogueS2SGenerationDataset
from nemo.collections.nlp.data.dialogue.dataset.dialogue_sgd_bert_dataset import DialogueSGDBERTDataset
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueClassificationMetrics, DialogueGenerationMetrics
from nemo.collections.nlp.models.dialogue.dialogue_nearest_neighbour_model import DialogueNearestNeighbourModel
@pytest.mark.unit
def test_dialogue_metric_generation_f1():
generated_field = 'That is so good'
ground_truth_field = 'That is so awesome'
precision, recall, f1 = DialogueGenerationMetrics._get_one_f1(generated_field, ground_truth_field)
assert precision == 75
assert recall == 75
assert f1 == 75
@pytest.mark.unit
def test_dialogue_metric_split_label_and_slots():
fields = ["reserve_restaurant\nslots: time_of_day(7pm), number_of_people(3)", "time_of_day(7pm)"]
labels, slots_list = DialogueClassificationMetrics.split_label_and_slots(fields, with_slots=True)
assert labels == ["reserve_restaurant", 'none']
assert slots_list == [["time_of_day(7pm)", "number_of_people(3)"], ["time_of_day(7pm)"]]
@pytest.mark.unit
def test_dialogue_metric_slot_filling_metrics():
generated_slots = [["time_of_day(7pm)", "number_of_people(3)"], ["time_of_day(7pm)"]]
ground_truth_slots = [["time_of_day(7pm)"], ["time_of_day(7pm)", "number_of_people(3)"]]
(
avg_precision,
avg_recall,
avg_f1,
avg_joint_goal_accuracy,
) = DialogueClassificationMetrics.get_slot_filling_metrics(generated_slots, ground_truth_slots)
assert avg_precision == 75
assert avg_recall == 75
assert avg_f1 == 75
assert avg_joint_goal_accuracy == 0
@pytest.mark.unit
def test_dialogue_assistant_data_processor_normalize_zero_shot_intent():
label0 = 'food_ordering.contextual_query'
normalized_label0 = 'contextual query'
label1 = 'food_ordering.nomatch'
normalized_label1 = 'no match'
label2 = 'food_ordering.no'
normalized_label2 = 'no'
assert normalized_label0 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label0)
assert normalized_label1 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label1)
assert normalized_label2 == DialogueAssistantDataProcessor.normalize_zero_shot_intent(label2)
@pytest.mark.unit
def test_dialogue_assistant_data_processor_get_continuous_slots():
slot_ids = [54, 54, 54, 19, 19, 18, 54, 54, 54]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 19, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {19: [3, 5], 18: [5, 6]}
# here 18 and 19 maps to the same slot (originally variants of B-slot and I-slot)
slot_ids = [54, 54, 54, 19, 19, 18, 54, 54, 54]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 18, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {18: [3, 6]}
# test if function works when non-empty slots are at boundary
slot_ids = [18, 54, 54, 19, 19]
empty_slot_id = 54
bio_slot_ids_to_unified_slot_ids = {18: 18, 19: 19, 54: 54}
continuous_slots = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids
)
assert continuous_slots == {18: [0, 1], 19: [3, 5]}
@pytest.mark.unit
def test_dialogue_assistant_map_bio_format_slots_to_unified_slots():
slots = ['B-time', 'I-time', 'B-alarm', 'I-alarm', 'O']
gt_bio_slot_ids_to_unified_slot_ids = {'0': '0', '1': '0', '2': '1', '3': '1', '4': '2'}
gt_unified_slots = ['time', 'alarm', 'O']
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(slots)
assert gt_bio_slot_ids_to_unified_slot_ids == bio_slot_ids_to_unified_slot_ids
assert gt_unified_slots == unified_slots
# case in which BIOS scheme was not used in annotation
slots = ['time', 'alarm', 'O']
gt_bio_slot_ids_to_unified_slot_ids = {'0': '0', '1': '1', '2': '2'}
gt_unified_slots = ['time', 'alarm', 'O']
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(slots)
assert gt_bio_slot_ids_to_unified_slot_ids == bio_slot_ids_to_unified_slot_ids
assert gt_unified_slots == unified_slots
@pytest.mark.unit
def test_dialogue_data_processor_get_relevant_idxs():
dataset_split = 'train'
dev_proportion = 10
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 900
assert idxs != list(range(900))
dataset_split = 'dev'
dev_proportion = 40
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 400
assert idxs != list(range(400))
dataset_split = 'test'
dev_proportion = 40
n_samples = 1000
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, dev_proportion)
assert len(idxs) == 1000
assert idxs == list(range(1000))
@pytest.mark.unit
def test_dialogue_sgd_data_processor_convert_camelcase_to_lower():
label = 'none'
gt_converted_label = 'none'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
label = 'ReserveRestaurant'
gt_converted_label = 'reserve restaurant'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
label = 'Alarm'
gt_converted_label = 'alarm'
assert gt_converted_label == DialogueSGDDataProcessor.convert_camelcase_to_lower(label)
@pytest.mark.unit
def test_dialogue_gpt_classification_dataset_linearize_slots():
slots = []
linearized_slots = 'None'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
slots = {'time': '7pm', 'place': 'field'}
linearized_slots = 'time(7pm), place(field)'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
slots = {'time': ['7pm', '1900'], 'place': 'field'}
linearized_slots = 'time(7pm), place(field)'
assert linearized_slots == DialogueGPTClassificationDataset.linearize_slots(slots)
@pytest.mark.unit
def test_dialogue_gpt_classification_dataset_linearize_slots():
actions = [
{'act': 'inform', 'slot': 'time', 'values': ['7pm', '1900']},
{'act': 'confirm', 'slot': 'place', 'values': ['hall']},
]
prompt_template = 'values'
formatted_actions = '7pm hall'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
prompt_template = 'slots_values'
formatted_actions = 'time (7pm) place (hall)'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
prompt_template = 'acts_slots_values'
formatted_actions = 'inform time (7pm) confirm place (hall)'
assert formatted_actions == DialogueS2SGenerationDataset.format_actions(prompt_template, actions)
@pytest.mark.unit
def test_dialogue_sgd_dataset_naive_tokenize():
utterance = 'I am feeling hungry so I would like to find a place to eat.'
tokens = [
'I',
' ',
'am',
' ',
'feeling',
' ',
'hungry',
' ',
'so',
' ',
'I',
' ',
'would',
' ',
'like',
' ',
'to',
' ',
'find',
' ',
'a',
' ',
'place',
' ',
'to',
' ',
'eat',
'.',
]
assert tokens == DialogueSGDBERTDataset._naive_tokenize(utterance)
@pytest.mark.unit
def test_dialogue_nearest_neighbour_mean_pooling():
model_output = [torch.ones(8, 512, 768)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.ones(8, 768).float(), DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
model_output = [torch.zeros(8, 512, 768)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.zeros(8, 768).float(), DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
model_output = [torch.cat([torch.zeros(8, 256, 768), torch.ones(8, 256, 768)], axis=1)]
attention_mask = torch.ones(8, 512)
assert torch.equal(
torch.ones(8, 768).float() * 0.5, DialogueNearestNeighbourModel.mean_pooling(model_output, attention_mask)
)
| NeMo-main | tests/collections/nlp/test_dialogue.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
import sys
from os import path
import pytest
class TestHydraRunner:
@pytest.mark.integration
def test_no_config(self):
""""Test app without config - fields missing causes error.
"""
# Create system call.
call = "python tests/hydra/my_app.py"
with pytest.raises(subprocess.CalledProcessError):
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
@pytest.mark.integration
def test_config1(self):
""""Test injection of valid config1.
"""
# Create system call.
call = "python tests/hydra/my_app.py --config-name config1.yaml"
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
# Make sure that .hydra dir is not present.
assert not path.exists(f".hydra")
# Make sure that default hydra log file is not present.
assert not path.exists(f"my_app.log")
@pytest.mark.integration
def test_config1_invalid(self):
""""Test injection of invalid config1.
"""
# Create system call.
call = "python tests/hydra/my_app.py --config-name config1_invalid.yaml"
with pytest.raises(subprocess.CalledProcessError):
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
@pytest.mark.integration
def test_config2(self):
""""Test injection of valid config2 from a different folder.
"""
# Create system call.
call = "python tests/hydra/my_app.py --config-path config_subdir --config-name config2.yaml"
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
# Make sure that .hydra dir is not present.
assert not path.exists(f".hydra")
# Make sure that default hydra log file is not present.
assert not path.exists(f"my_app.log")
@pytest.mark.integration
def test_config2_invalid(self):
""""Test injection of invalid config2 from a different folder.
"""
# Create system call.
call = "python tests/hydra/my_app.py --config-path config_subdir --config-name config2_invalid.yaml"
with pytest.raises(subprocess.CalledProcessError):
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
@pytest.mark.integration
def test_config2_filepath_schema(self):
""""Test injection of valid config2 - using namepath with schema is prohibited.
"""
# Create system call.
call = "python tests/hydra/my_app.py --config-name config_subdir/config2_invalid.yaml"
with pytest.raises(subprocess.CalledProcessError):
# Run the call as subprocess.
subprocess.check_call(call, shell=True, stdout=sys.stdout, stderr=sys.stdout)
| NeMo-main | tests/hydra/test_hydra_runner.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import hydra
from omegaconf import MISSING, OmegaConf
from nemo.core.config import hydra_runner
@dataclass
class DefaultConfig:
"""
This is structured config for this application.
It provides the schema used for validation of user-written spec file
as well as default values of the selected parameters.
"""
# Dataset. Available options: [imdb, sst2]
dataset_name: str = MISSING
@hydra_runner(config_name="DefaultConfig", schema=DefaultConfig)
def my_app(cfg):
print(OmegaConf.to_yaml(cfg))
# Get dataset_name.
dataset_name = cfg.dataset_name
if __name__ == "__main__":
my_app()
| NeMo-main | tests/hydra/my_app.py |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import sys
import glob
import sphinx_book_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../.."))
sys.path.insert(0, os.path.abspath("../../nemo"))
from package_info import __version__
templates_path = ["_templates"]
autodoc_mock_imports = [
'torch',
'torch.nn',
'torch.utils',
'torch.optim',
'torch.utils.data',
'torch.utils.data.sampler',
'torchtext',
'torchvision',
'ruamel.yaml', # ruamel.yaml has ., which is troublesome for this regex
'hydra', # hydra-core in requirements, hydra during import
'dateutil', # part of core python
'transformers.tokenization_bert', # has ., troublesome for this regex
'sklearn', # scikit_learn in requirements, sklearn in import
'nemo_text_processing.inverse_text_normalization', # Not installed automatically
'nemo_text_processing.text_normalization', # Not installed automatically
'attr', # attrdict in requirements, attr in import
'torchmetrics', # inherited from PTL
'lightning_utilities', # inherited from PTL
'apex',
'megatron.core',
'transformer_engine',
'joblib', # inherited from optional code
'IPython',
'ipadic',
'psutil',
'regex',
]
_skipped_autodoc_mock_imports = ['wrapt', 'numpy']
for req_path in sorted(list(glob.glob("../../requirements/*.txt"))):
if "docs.txt" in req_path:
continue
req_file = os.path.abspath(os.path.expanduser(req_path))
with open(req_file, 'r') as f:
for line in f:
line = line.replace("\n", "")
req = re.search(r"([a-zA-Z0-9-_]*)", line)
if req:
req = req.group(1)
req = req.replace("-", "_")
if req not in autodoc_mock_imports:
if req in _skipped_autodoc_mock_imports:
print(f"Skipping req : `{req}` (lib {line})")
continue
autodoc_mock_imports.append(req)
print(f"Adding req : `{req}` to autodoc mock requirements (lib {line})")
else:
print(f"`{req}` already added to autodoc mock requirements (lib {line})")
#
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.viewcode",
"sphinx.ext.napoleon",
"sphinx.ext.githubpages",
"sphinxcontrib.bibtex",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.intersphinx",
"sphinx.ext.autosectionlabel",
"sphinxcontrib.bibtex",
"sphinx_copybutton",
"sphinxext.opengraph",
]
bibtex_bibfiles = [
'asr/asr_all.bib',
'nlp/nlp_all.bib',
'nlp/text_normalization/tn_itn_all.bib',
'tools/tools_all.bib',
'tts/tts_all.bib',
'text_processing/text_processing_all.bib',
'core/adapters/adapter_bib.bib',
]
intersphinx_mapping = {
'pytorch': ('https://pytorch.org/docs/stable', None),
'pytorch-lightning': ('https://pytorch-lightning.readthedocs.io/en/latest/', None),
}
# Set default flags for all classes.
autodoc_default_options = {'members': None, 'undoc-members': None, 'show-inheritance': True}
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "NVIDIA NeMo"
copyright = "© 2021-2022 NVIDIA Corporation & Affiliates. All rights reserved."
author = "NVIDIA CORPORATION"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
# The short X.Y version.
# version = "0.10.0"
version = __version__
# The full version, including alpha/beta/rc tags.
# release = "0.9.0"
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
### Previous NeMo theme
# # NVIDIA theme settings.
# html_theme = 'nvidia_theme'
# html_theme_path = ["."]
# html_theme_options = {
# 'display_version': True,
# 'project_version': version,
# 'project_name': project,
# 'logo_path': None,
# 'logo_only': True,
# }
# html_title = 'Introduction'
# html_logo = html_theme_options["logo_path"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "nemodoc"
### from TLT conf.py
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = "sphinx_book_theme"
html_logo = os.path.join('nv_logo.png')
html_title = 'NVIDIA NeMo'
html_theme_options = {
'logo_only': True,
'display_version': True,
# 'prev_next_buttons_location': 'bottom',
# 'style_external_links': False,
# 'style_nav_header_background': '#000000',
# Toc options
'collapse_navigation': False,
# 'sticky_navigation': False,
'navigation_depth': 10,
# 'includehidden': False,
# 'titles_only': False,
# Sphinx Book theme,
'repository_url': 'https://github.com/NVIDIA/NeMo',
'use_repository_button': True,
'show_navbar_depth': 1,
'show_toc_level': 10,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_favicon = 'favicon.ico'
html_static_path = ['_static']
html_last_updated_fmt = ''
def setup(app):
app.add_css_file('css/custom.css')
app.add_js_file('js/pk_scripts.js')
# html_css_files = [
# './custom.css',
# ]
# html_js_files = [
# './pk_scripts.js',
# ]
# OpenGraph settings
ogp_site_url = 'https://nvidia.github.io/NeMo/'
ogp_image = 'https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/_static/nv_logo.png'
# MathJax CDN
mathjax_path = "https://cdn.jsdelivr.net/npm/[email protected]/es5/mml-chtml.min.js"
| NeMo-main | docs/source/conf.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
import torch
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from nemo.collections.asr.data.audio_to_label import AudioToSpeechLabelDataset
from nemo.collections.asr.models import EncDecSpeakerLabelModel
from nemo.collections.asr.parts.features import WaveformFeaturizer
from nemo.core.config import hydra_runner
from nemo.utils import logging
seed_everything(42)
@hydra_runner(config_path="conf", config_name="speaker_identification_infer")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
device = 'cuda' if torch.cuda.is_available() else 'cpu'
enrollment_manifest = cfg.data.enrollment_manifest
test_manifest = cfg.data.test_manifest
out_manifest = cfg.data.out_manifest
sample_rate = cfg.data.sample_rate
backend = cfg.backend.backend_model.lower()
featurizer = WaveformFeaturizer(sample_rate=sample_rate)
dataset = AudioToSpeechLabelDataset(manifest_filepath=enrollment_manifest, labels=None, featurizer=featurizer)
enroll_id2label = dataset.id2label
if backend == 'cosine_similarity':
model_path = cfg.backend.cosine_similarity.model_path
batch_size = cfg.backend.cosine_similarity.batch_size
if model_path.endswith('.nemo'):
speaker_model = EncDecSpeakerLabelModel.restore_from(model_path)
else:
speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_path)
enroll_embs, _, enroll_truelabels, _ = speaker_model.batch_inference(
enrollment_manifest, batch_size, sample_rate, device=device,
)
test_embs, _, _, _ = speaker_model.batch_inference(test_manifest, batch_size, sample_rate, device=device,)
# length normalize
enroll_embs = enroll_embs / (np.linalg.norm(enroll_embs, ord=2, axis=-1, keepdims=True))
test_embs = test_embs / (np.linalg.norm(test_embs, ord=2, axis=-1, keepdims=True))
# reference embedding
reference_embs = []
keyslist = list(enroll_id2label.values())
for label_id in keyslist:
indices = np.where(enroll_truelabels == label_id)
embedding = (enroll_embs[indices].sum(axis=0).squeeze()) / len(indices)
reference_embs.append(embedding)
reference_embs = np.asarray(reference_embs)
scores = np.matmul(test_embs, reference_embs.T)
matched_labels = scores.argmax(axis=-1)
elif backend == 'neural_classifier':
model_path = cfg.backend.neural_classifier.model_path
batch_size = cfg.backend.neural_classifier.batch_size
if model_path.endswith('.nemo'):
speaker_model = EncDecSpeakerLabelModel.restore_from(model_path)
else:
speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_path)
if speaker_model.decoder.final.out_features != len(enroll_id2label):
raise ValueError(
"number of labels mis match. Make sure you trained or finetuned neural classifier with labels from enrollement manifest_filepath"
)
_, test_logits, _, _ = speaker_model.batch_inference(test_manifest, batch_size, sample_rate, device=device,)
matched_labels = test_logits.argmax(axis=-1)
with open(test_manifest, 'rb') as f1, open(out_manifest, 'w', encoding='utf-8') as f2:
lines = f1.readlines()
for idx, line in enumerate(lines):
line = line.strip()
item = json.loads(line)
item['infer'] = enroll_id2label[matched_labels[idx]]
json.dump(item, f2)
f2.write('\n')
logging.info("Inference labels have been written to {} manifest file".format(out_manifest))
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/recognition/speaker_identification_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from nemo.collections.asr.models import EncDecSpeakerLabelModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
seed_everything(42)
@hydra_runner(config_path="conf", config_name="titanet-finetune.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
speaker_model = EncDecSpeakerLabelModel(cfg=cfg.model, trainer=trainer)
speaker_model.maybe_init_from_pretrained_checkpoint(cfg)
# save labels to file
if log_dir is not None:
with open(os.path.join(log_dir, 'labels.txt'), 'w') as f:
if speaker_model.labels is not None:
for label in speaker_model.labels:
f.write(f'{label}\n')
trainer.fit(speaker_model)
torch.distributed.destroy_process_group()
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if trainer.is_global_zero:
trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator, strategy=cfg.trainer.strategy)
if speaker_model.prepare_test(trainer):
trainer.test(speaker_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/recognition/speaker_reco_finetune.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import pickle as pkl
import sys
import numpy as np
from scipy.interpolate import interp1d
from scipy.optimize import brentq
from sklearn.metrics import roc_curve
from tqdm import tqdm
"""
This script faciliates to get EER % based on cosine-smilarity
for Voxceleb dataset.
Args:
trial_file str: path to voxceleb trial file
emb : path to pickle file of embeddings dictionary (generated from spkr_get_emb.py)
save_kaldi_emb: if required pass this argument to save kaldi embeddings for KALDI PLDA training later
Note: order of audio files in manifest file should match the embeddings
"""
def get_acc(trial_file='', emb='', save_kaldi_emb=False):
trial_score = open('trial_score.txt', 'w')
dirname = os.path.dirname(trial_file)
with open(emb, 'rb') as f:
emb = pkl.load(f)
trial_embs = []
keys = []
all_scores = []
all_keys = []
# for each trials in trial file
with open(trial_file, 'r') as f:
tmp_file = f.readlines()
for line in tqdm(tmp_file):
line = line.strip()
truth, x_speaker, y_speaker = line.split()
x_speaker = x_speaker.split('/')
x_speaker = '@'.join(x_speaker)
y_speaker = y_speaker.split('/')
y_speaker = '@'.join(y_speaker)
X = emb[x_speaker]
Y = emb[y_speaker]
if save_kaldi_emb and x_speaker not in keys:
keys.append(x_speaker)
trial_embs.extend([X])
if save_kaldi_emb and y_speaker not in keys:
keys.append(y_speaker)
trial_embs.extend([Y])
score = np.dot(X, Y) / ((np.dot(X, X) * np.dot(Y, Y)) ** 0.5)
score = (score + 1) / 2
all_scores.append(score)
trial_score.write(str(score) + "\t" + truth)
truth = int(truth)
all_keys.append(truth)
trial_score.write('\n')
trial_score.close()
if save_kaldi_emb:
np.save(dirname + '/all_embs_voxceleb.npy', np.asarray(trial_embs))
np.save(dirname + '/all_ids_voxceleb.npy', np.asarray(keys))
print("Saved KALDI PLDA related embeddings to {}".format(dirname))
return np.asarray(all_scores), np.asarray(all_keys)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--trial_file", help="path to voxceleb trial file", type=str, required=True)
parser.add_argument("--emb", help="path to numpy file of embeddings", type=str, required=True)
parser.add_argument(
"--save_kaldi_emb",
help=":save kaldi embeddings for KALDI PLDA training later",
required=False,
action='store_true',
)
args = parser.parse_args()
trial_file, emb, save_kaldi_emb = args.trial_file, args.emb, args.save_kaldi_emb
y_score, y = get_acc(trial_file=trial_file, emb=emb, save_kaldi_emb=save_kaldi_emb)
fpr, tpr, thresholds = roc_curve(y, y_score, pos_label=1)
eer = brentq(lambda x: 1.0 - x - interp1d(fpr, tpr)(x), 0.0, 1.0)
sys.stdout.write("{0:.2f}\n".format(eer * 100))
| NeMo-main | examples/speaker_tasks/recognition/voxceleb_eval.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from nemo.collections.asr.models import EncDecSpeakerLabelModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
Basic run (on GPU for 10 epochs for 2 class training):
EXP_NAME=sample_run
python ./speaker_reco.py --config-path='conf' --config-name='SpeakerNet_recognition_3x2x512.yaml' \
trainer.max_epochs=10 \
model.train_ds.batch_size=64 model.validation_ds.batch_size=64 \
model.train_ds.manifest_filepath="<train_manifest>" model.validation_ds.manifest_filepath="<dev_manifest>" \
model.test_ds.manifest_filepath="<test_manifest>" \
trainer.devices=1 \
model.decoder.params.num_classes=2 \
exp_manager.name=$EXP_NAME +exp_manager.use_datetime_version=False \
exp_manager.exp_dir='./speaker_exps'
See https://github.com/NVIDIA/NeMo/blob/main/tutorials/speaker_tasks/Speaker_Identification_Verification.ipynb for notebook tutorial
Optional: Use tarred dataset to speech up data loading.
Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.
Note that it's possible that tarred datasets impacts validation scores because it drop values in order to have same amount of files per tarfile;
Scores might be off since some data is missing.
Use the `convert_to_tarred_audio_dataset.py` script under <NEMO_ROOT>/speech_recognition/scripts in order to prepare tarred audio dataset.
For details, please see TarredAudioToClassificationLabelDataset in <NEMO_ROOT>/nemo/collections/asr/data/audio_to_label.py
"""
seed_everything(42)
@hydra_runner(config_path="conf", config_name="SpeakerNet_verification_3x2x256.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
speaker_model = EncDecSpeakerLabelModel(cfg=cfg.model, trainer=trainer)
# save labels to file
if log_dir is not None:
with open(os.path.join(log_dir, 'labels.txt'), 'w') as f:
if speaker_model.labels is not None:
for label in speaker_model.labels:
f.write(f'{label}\n')
trainer.fit(speaker_model)
if not trainer.fast_dev_run:
model_path = os.path.join(log_dir, '..', 'spkr.nemo')
speaker_model.save_to(model_path)
torch.distributed.destroy_process_group()
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if trainer.is_global_zero:
trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator, strategy=cfg.trainer.strategy)
if speaker_model.prepare_test(trainer):
trainer.test(speaker_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/recognition/speaker_reco.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is a helper script to extract speaker embeddings based on manifest file
Usage:
python extract_speaker_embeddings.py --manifest=/path/to/manifest/file'
--model_path='/path/to/.nemo/file'(optional)
--embedding_dir='/path/to/embedding/directory'
Args:
--manifest: path to manifest file containing audio_file paths for which embeddings need to be extracted
--model_path(optional): path to .nemo speaker verification model file to extract embeddings, if not passed SpeakerNet-M model would
be downloaded from NGC and used to extract embeddings
--embeddings_dir(optional): path to directory where embeddings need to stored default:'./'
"""
import json
import os
import pickle as pkl
from argparse import ArgumentParser
import numpy as np
import torch
from nemo.collections.asr.models.label_models import EncDecSpeakerLabelModel
from nemo.collections.asr.parts.utils.speaker_utils import embedding_normalize
from nemo.utils import logging
def get_embeddings(speaker_model, manifest_file, batch_size=1, embedding_dir='./', device='cuda'):
"""
save embeddings to pickle file
Args:
speaker_model: NeMo <EncDecSpeakerLabel> model
manifest_file: path to the manifest file containing the audio file path from which the
embeddings should be extracted
batch_size: batch_size for inference
embedding_dir: path to directory to store embeddings file
device: compute device to perform operations
"""
all_embs, _, _, _ = speaker_model.batch_inference(manifest_file, batch_size=batch_size, device=device)
all_embs = np.asarray(all_embs)
all_embs = embedding_normalize(all_embs)
out_embeddings = {}
with open(manifest_file, 'r', encoding='utf-8') as manifest:
for i, line in enumerate(manifest.readlines()):
line = line.strip()
dic = json.loads(line)
uniq_name = '@'.join(dic['audio_filepath'].split('/')[-3:])
out_embeddings[uniq_name] = all_embs[i]
embedding_dir = os.path.join(embedding_dir, 'embeddings')
if not os.path.exists(embedding_dir):
os.makedirs(embedding_dir, exist_ok=True)
prefix = manifest_file.split('/')[-1].rsplit('.', 1)[-2]
name = os.path.join(embedding_dir, prefix)
embeddings_file = name + '_embeddings.pkl'
pkl.dump(out_embeddings, open(embeddings_file, 'wb'))
logging.info("Saved embedding files to {}".format(embedding_dir))
def main():
parser = ArgumentParser()
parser.add_argument(
"--manifest", type=str, required=True, help="Path to manifest file",
)
parser.add_argument(
"--model_path",
type=str,
default='titanet_large',
required=False,
help="path to .nemo speaker verification model file to extract embeddings, if not passed SpeakerNet-M model would be downloaded from NGC and used to extract embeddings",
)
parser.add_argument(
"--batch_size", type=int, default=1, required=False, help="batch size",
)
parser.add_argument(
"--embedding_dir",
type=str,
default='./',
required=False,
help="path to directory where embeddings need to stored default:'./'",
)
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.model_path.endswith('.nemo'):
logging.info(f"Using local speaker model from {args.model_path}")
speaker_model = EncDecSpeakerLabelModel.restore_from(restore_path=args.model_path)
elif args.model_path.endswith('.ckpt'):
speaker_model = EncDecSpeakerLabelModel.load_from_checkpoint(checkpoint_path=args.model_path)
else:
speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_name="titanet_large")
logging.info(f"using pretrained titanet_large speaker model from NGC")
device = 'cuda'
if not torch.cuda.is_available():
device = 'cpu'
logging.warning("Running model on CPU, for faster performance it is adviced to use atleast one NVIDIA GPUs")
get_embeddings(
speaker_model, args.manifest, batch_size=args.batch_size, embedding_dir=args.embedding_dir, device=device
)
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/recognition/extract_speaker_embeddings.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.asr.models.msdd_models import NeuralDiarizer
from nemo.core.config import hydra_runner
"""
Run the entire speaker diarization pipeline: VAD, clustering diarizer for initializing clustering then Multi-scale Diarization Decoder (MSDD).
python multiscale_diar_decoder_infer.py --config-path='../conf/inference' --config-name='diar_infer_telephonic.yaml' \
diarizer.vad.model_path=<NeMo VAD model path> \
diarizer.msdd_model.model_path=<NeMo MSDD model path> \
diarizer.oracle_vad=False \
diarizer.manifest_filepath=<test_manifest> \
diarizer.out_dir=<test_temp_dir> \
"""
@hydra_runner(config_path="../conf/inference", config_name="diar_infer_telephonic.yaml")
def main(cfg):
diarizer_model = NeuralDiarizer(cfg=cfg).to(cfg.device)
diarizer_model.diarize()
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/diarization/neural_diarizer/multiscale_diar_decoder_infer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from nemo.collections.asr.models import EncDecDiarLabelModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
Example training session (single GPU training on telephonic datasets)
python ./multiscale_diar_decoder.py --config-path='../conf/neural_diarizer' --config-name='msdd_5scl_15_05_50Povl_256x3x32x2.yaml' \
trainer.devices=1 \
model.base.diarizer.speaker_embeddings.model_path="titanet_large" \
model.train_ds.manifest_filepath="<train_manifest_path>" \
model.validation_ds.manifest_filepath="<dev_manifest_path>" \
model.train_ds.emb_dir="<train_temp_dir>" \
model.validation_ds.emb_dir="<dev_temp_dir>" \
exp_manager.name='sample_train' \
exp_manager.exp_dir='./msdd_exp'
"""
seed_everything(42)
@hydra_runner(config_path="../conf/neural_diarizer", config_name="msdd_5scl_15_05_50Povl_256x3x32x2.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
msdd_model = EncDecDiarLabelModel(cfg=cfg.model, trainer=trainer)
trainer.fit(msdd_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/diarization/neural_diarizer/multiscale_diar_decoder.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from nemo.collections.asr.parts.utils.decoder_timestamps_utils import ASRDecoderTimeStamps
from nemo.collections.asr.parts.utils.diarization_utils import OfflineDiarWithASR
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
This script demonstrates how to run offline speaker diarization with asr.
Usage:
python offline_diar_with_asr_infer.py \
diarizer.manifest_filepath=<path to manifest file> \
diarizer.out_dir='demo_asr_output' \
diarizer.speaker_embeddings.model_path=<pretrained modelname or path to .nemo> \
diarizer.asr.model_path=<pretrained modelname or path to .nemo> \
diarizer.asr.parameters.asr_based_vad=True \
diarizer.speaker_embeddings.parameters.save_embeddings=False
Check out whole parameters in ./conf/offline_diarization_with_asr.yaml and their meanings.
For details, have a look at <NeMo_git_root>/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb
Currently, the following NGC models are supported:
stt_en_quartznet15x5
stt_en_citrinet*
stt_en_conformer_ctc*
"""
@hydra_runner(config_path="../conf/inference", config_name="diar_infer_meeting.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
# ASR inference for words and word timestamps
asr_decoder_ts = ASRDecoderTimeStamps(cfg.diarizer)
asr_model = asr_decoder_ts.set_asr_model()
word_hyp, word_ts_hyp = asr_decoder_ts.run_ASR(asr_model)
# Create a class instance for matching ASR and diarization results
asr_diar_offline = OfflineDiarWithASR(cfg.diarizer)
asr_diar_offline.word_ts_anchor_offset = asr_decoder_ts.word_ts_anchor_offset
# Diarization inference for speaker labels
diar_hyp, diar_score = asr_diar_offline.run_diarization(cfg, word_ts_hyp)
trans_info_dict = asr_diar_offline.get_transcript_with_speaker_labels(diar_hyp, word_hyp, word_ts_hyp)
# If RTTM is provided and DER evaluation
if diar_score is not None:
metric, mapping_dict, _ = diar_score
# Get session-level diarization error rate and speaker counting error
der_results = OfflineDiarWithASR.gather_eval_results(
diar_score=diar_score,
audio_rttm_map_dict=asr_diar_offline.AUDIO_RTTM_MAP,
trans_info_dict=trans_info_dict,
root_path=asr_diar_offline.root_path,
)
# Calculate WER and cpWER if reference CTM files exist
wer_results = OfflineDiarWithASR.evaluate(
hyp_trans_info_dict=trans_info_dict,
audio_file_list=asr_diar_offline.audio_file_list,
ref_ctm_file_list=asr_diar_offline.ctm_file_list,
)
# Print average DER, WER and cpWER
OfflineDiarWithASR.print_errors(der_results=der_results, wer_results=wer_results)
# Save detailed session-level evaluation results in `root_path`.
OfflineDiarWithASR.write_session_level_result_in_csv(
der_results=der_results,
wer_results=wer_results,
root_path=asr_diar_offline.root_path,
csv_columns=asr_diar_offline.csv_columns,
)
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/diarization/clustering_diarizer/offline_diar_with_asr_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from omegaconf import OmegaConf
from pytorch_lightning import seed_everything
from nemo.collections.asr.models import ClusteringDiarizer
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
This script demonstrates how to use run speaker diarization.
Usage:
python offline_diar_infer.py \
diarizer.manifest_filepath=<path to manifest file> \
diarizer.out_dir='demo_output' \
diarizer.speaker_embeddings.model_path=<pretrained modelname or path to .nemo> \
diarizer.vad.model_path='vad_marblenet' \
diarizer.speaker_embeddings.parameters.save_embeddings=False
Check out whole parameters in ./conf/offline_diarization.yaml and their meanings.
For details, have a look at <NeMo_git_root>/tutorials/speaker_tasks/Speaker_Diarization_Inference.ipynb
"""
seed_everything(42)
@hydra_runner(config_path="../conf/inference", config_name="diar_infer_meeting.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
sd_model = ClusteringDiarizer(cfg=cfg).to(cfg.device)
sd_model.diarize()
if __name__ == '__main__':
main()
| NeMo-main | examples/speaker_tasks/diarization/clustering_diarizer/offline_diar_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to compute the Word or Character Error Rate of a given ASR model for a given manifest file for some dataset.
The manifest file must conform to standard ASR definition - containing `audio_filepath` and `text` as the ground truth.
Note: This script depends on the `transcribe_speech.py` script, and therefore both scripts should be located in the
same directory during execution.
# Arguments
<< All arguments of `transcribe_speech.py` are inherited by this script, so please refer to `transcribe_speech.py`
for full list of arguments >>
dataset_manifest: Required - path to dataset JSON manifest file (in NeMo format)
output_filename: Optional - output filename where the transcriptions will be written.
use_cer: Bool, whether to compute CER or WER
tolerance: Float, minimum WER/CER required to pass some arbitrary tolerance.
only_score_manifest: Bool, when set will skip audio transcription and just calculate WER of provided manifest.
# Usage
## To score a dataset with a manifest file that does not contain previously transcribed `pred_text`.
python speech_to_text_eval.py \
model_path=null \
pretrained_name=null \
dataset_manifest=<Mandatory: Path to an ASR dataset manifest file> \
output_filename=<Optional: Some output filename which will hold the transcribed text as a manifest> \
batch_size=32 \
amp=True \
use_cer=False
## To score a manifest file which has been previously augmented with transcribed text as `pred_text`
This is useful when one uses `transcribe_speech_parallel.py` to transcribe larger datasets, and results are written
to a manifest which has the two keys `text` (for ground truth) and `pred_text` (for model's transcription)
python speech_to_text_eval.py \
dataset_manifest=<Mandatory: Path to an ASR dataset manifest file> \
use_cer=False \
only_score_manifest=True
"""
import json
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import torch
import transcribe_speech
from omegaconf import MISSING, OmegaConf, open_dict
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.parts.utils.transcribe_utils import PunctuationCapitalization, TextProcessingConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
@dataclass
class EvaluationConfig(transcribe_speech.TranscriptionConfig):
dataset_manifest: str = MISSING
output_filename: Optional[str] = "evaluation_transcripts.json"
# decoder type: ctc or rnnt, can be used to switch between CTC and RNNT decoder for Joint RNNT/CTC models
decoder_type: Optional[str] = None
# att_context_size can be set for cache-aware streaming models with multiple look-aheads
att_context_size: Optional[list] = None
use_cer: bool = False
tolerance: Optional[float] = None
only_score_manifest: bool = False
text_processing: Optional[TextProcessingConfig] = TextProcessingConfig(
punctuation_marks=".,?", separate_punctuation=False, do_lowercase=False, rm_punctuation=False,
)
@hydra_runner(config_name="EvaluationConfig", schema=EvaluationConfig)
def main(cfg: EvaluationConfig):
torch.set_grad_enabled(False)
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.audio_dir is not None:
raise RuntimeError(
"Evaluation script requires ground truth labels to be passed via a manifest file. "
"If manifest file is available, submit it via `dataset_manifest` argument."
)
if not os.path.exists(cfg.dataset_manifest):
raise FileNotFoundError(f"The dataset manifest file could not be found at path : {cfg.dataset_manifest}")
if not cfg.only_score_manifest:
# Transcribe speech into an output directory
transcription_cfg = transcribe_speech.main(cfg) # type: EvaluationConfig
# Release GPU memory if it was used during transcription
if torch.cuda.is_available():
torch.cuda.empty_cache()
logging.info("Finished transcribing speech dataset. Computing ASR metrics..")
else:
cfg.output_filename = cfg.dataset_manifest
transcription_cfg = cfg
ground_truth_text = []
predicted_text = []
invalid_manifest = False
with open(transcription_cfg.output_filename, 'r') as f:
for line in f:
data = json.loads(line)
if 'pred_text' not in data:
invalid_manifest = True
break
ground_truth_text.append(data['text'])
predicted_text.append(data['pred_text'])
pc = PunctuationCapitalization(cfg.text_processing.punctuation_marks)
if cfg.text_processing.separate_punctuation:
ground_truth_text = pc.separate_punctuation(ground_truth_text)
predicted_text = pc.separate_punctuation(predicted_text)
if cfg.text_processing.do_lowercase:
ground_truth_text = pc.do_lowercase(ground_truth_text)
predicted_text = pc.do_lowercase(predicted_text)
if cfg.text_processing.rm_punctuation:
ground_truth_text = pc.rm_punctuation(ground_truth_text)
predicted_text = pc.rm_punctuation(predicted_text)
# Test for invalid manifest supplied
if invalid_manifest:
raise ValueError(
f"Invalid manifest provided: {transcription_cfg.output_filename} does not "
f"contain value for `pred_text`."
)
# Compute the WER
cer = word_error_rate(hypotheses=predicted_text, references=ground_truth_text, use_cer=True)
wer = word_error_rate(hypotheses=predicted_text, references=ground_truth_text, use_cer=False)
if cfg.use_cer:
metric_name = 'CER'
metric_value = cer
else:
metric_name = 'WER'
metric_value = wer
if cfg.tolerance is not None:
if metric_value > cfg.tolerance:
raise ValueError(f"Got {metric_name} of {metric_value}, which was higher than tolerance={cfg.tolerance}")
logging.info(f'Got {metric_name} of {metric_value}. Tolerance was {cfg.tolerance}')
logging.info(f'Dataset WER/CER ' + str(round(100 * wer, 2)) + "%/" + str(round(100 * cer, 2)) + "%")
# Inject the metric name and score into the config, and return the entire config
with open_dict(cfg):
cfg.metric_name = metric_name
cfg.metric_value = metric_value
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_to_text_eval.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from dataclasses import dataclass, is_dataclass
from typing import List, Optional, Union
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf, open_dict
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, EncDecHybridRNNTCTCModel
from nemo.collections.asr.modules.conformer_encoder import ConformerChangeConfig
from nemo.collections.asr.parts.utils.eval_utils import cal_write_wer
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.asr.parts.utils.transcribe_utils import (
compute_output_filename,
prepare_audio_data,
setup_model,
transcribe_partial_audio,
write_transcription,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
Transcribe audio file on a single CPU/GPU. Useful for transcription of moderate amounts of audio data.
# Arguments
model_path: path to .nemo ASR checkpoint
pretrained_name: name of pretrained ASR model (from NGC registry)
audio_dir: path to directory with audio files
dataset_manifest: path to dataset JSON manifest file (in NeMo format)
compute_timestamps: Bool to request greedy time stamp information (if the model supports it)
compute_langs: Bool to request language ID information (if the model supports it)
(Optionally: You can limit the type of timestamp computations using below overrides)
ctc_decoding.ctc_timestamp_type="all" # (default all, can be [all, char, word])
rnnt_decoding.rnnt_timestamp_type="all" # (default all, can be [all, char, word])
(Optionally: You can limit the type of timestamp computations using below overrides)
ctc_decoding.ctc_timestamp_type="all" # (default all, can be [all, char, word])
rnnt_decoding.rnnt_timestamp_type="all" # (default all, can be [all, char, word])
output_filename: Output filename where the transcriptions will be written
batch_size: batch size during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device.
allow_mps: Bool to allow using MPS (Apple Silicon M-series GPU) device if available
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
overwrite_transcripts: Bool which when set allows repeated transcriptions to overwrite previous results.
ctc_decoding: Decoding sub-config for CTC. Refer to documentation for specific values.
rnnt_decoding: Decoding sub-config for RNNT. Refer to documentation for specific values.
calculate_wer: Bool to decide whether to calculate wer/cer at end of this script
clean_groundtruth_text: Bool to clean groundtruth text
langid: Str used for convert_num_to_words during groundtruth cleaning
use_cer: Bool to use Character Error Rate (CER) or Word Error Rate (WER)
# Usage
ASR model can be specified by either "model_path" or "pretrained_name".
Data for transcription can be defined with either "audio_dir" or "dataset_manifest".
append_pred - optional. Allows you to add more than one prediction to an existing .json
pred_name_postfix - optional. The name you want to be written for the current model
Results are returned in a JSON manifest file.
python transcribe_speech.py \
model_path=null \
pretrained_name=null \
audio_dir="<remove or path to folder of audio files>" \
dataset_manifest="<remove or path to manifest>" \
output_filename="<remove or specify output filename>" \
clean_groundtruth_text=True \
langid='en' \
batch_size=32 \
compute_timestamps=False \
compute_langs=False \
cuda=0 \
amp=True \
append_pred=False \
pred_name_postfix="<remove or use another model name for output filename>"
"""
@dataclass
class ModelChangeConfig:
# Sub-config for changes specific to the Conformer Encoder
conformer: ConformerChangeConfig = ConformerChangeConfig()
@dataclass
class TranscriptionConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
channel_selector: Optional[
Union[int, str]
] = None # Used to select a single channel from multichannel audio, or use average across channels
audio_key: str = 'audio_filepath' # Used to override the default audio key in dataset_manifest
eval_config_yaml: Optional[str] = None # Path to a yaml file of config of evaluation
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
num_workers: int = 0
append_pred: bool = False # Sets mode of work, if True it will add new field transcriptions.
pred_name_postfix: Optional[str] = None # If you need to use another model name, rather than standard one.
random_seed: Optional[int] = None # seed number going to be used in seed_everything()
# Set to True to output greedy timestamp information (only supported models)
compute_timestamps: bool = False
# set to True if need to return full alignment information
preserve_alignment: bool = False
# Set to True to output language ID information
compute_langs: bool = False
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
allow_mps: bool = False # allow to select MPS device (Apple Silicon M-series GPU)
amp: bool = False
amp_dtype: str = "float16" # can be set to "float16" or "bfloat16" when using amp
audio_type: str = "wav"
# Recompute model transcription, even if the output folder exists with scores.
overwrite_transcripts: bool = True
# Decoding strategy for CTC models
ctc_decoding: CTCDecodingConfig = CTCDecodingConfig()
# Decoding strategy for RNNT models
rnnt_decoding: RNNTDecodingConfig = RNNTDecodingConfig(fused_batch_size=-1)
# decoder type: ctc or rnnt, can be used to switch between CTC and RNNT decoder for Hybrid RNNT/CTC models
decoder_type: Optional[str] = None
# att_context_size can be set for cache-aware streaming models with multiple look-aheads
att_context_size: Optional[list] = None
# Use this for model-specific changes before transcription
model_change: ModelChangeConfig = ModelChangeConfig()
# Config for word / character error rate calculation
calculate_wer: bool = True
clean_groundtruth_text: bool = False
langid: str = "en" # specify this for convert_num_to_words step in groundtruth cleaning
use_cer: bool = False
# can be set to True to return list of transcriptions instead of the config
# if True, will also skip writing anything to the output file
return_transcriptions: bool = False
# Set to False to return text instead of hypotheses from the transcribe function, so as to save memory
return_hypotheses: bool = True
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg: TranscriptionConfig) -> Union[TranscriptionConfig, List[Hypothesis]]:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
for key in cfg:
cfg[key] = None if cfg[key] == 'None' else cfg[key]
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.random_seed:
pl.seed_everything(cfg.random_seed)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
# Load augmentor from exteranl yaml file which contains eval info, could be extend to other feature such VAD, P&C
augmentor = None
if cfg.eval_config_yaml:
eval_config = OmegaConf.load(cfg.eval_config_yaml)
augmentor = eval_config.test_ds.get("augmentor")
logging.info(f"Will apply on-the-fly augmentation on samples during transcription: {augmentor} ")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
map_location = torch.device('cuda:0')
elif cfg.allow_mps and hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
logging.warning(
"MPS device (Apple Silicon M-series GPU) support is experimental."
" Env variable `PYTORCH_ENABLE_MPS_FALLBACK=1` should be set in most cases to avoid failures."
)
device = [0]
accelerator = 'mps'
map_location = torch.device('mps')
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cpu')
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device(f'cuda:{cfg.cuda}')
logging.info(f"Inference will be done on device: {map_location}")
asr_model, model_name = setup_model(cfg, map_location)
trainer = pl.Trainer(devices=device, accelerator=accelerator)
asr_model.set_trainer(trainer)
asr_model = asr_model.eval()
# we will adjust this flag if the model does not support it
compute_timestamps = cfg.compute_timestamps
compute_langs = cfg.compute_langs
# has to be True if timestamps are required
preserve_alignment = True if cfg.compute_timestamps else cfg.preserve_alignment
# Check whether model and decoder type match
if isinstance(asr_model, EncDecCTCModel):
if cfg.decoder_type and cfg.decoder_type != 'ctc':
raise ValueError('CTC model only support ctc decoding!')
elif isinstance(asr_model, EncDecHybridRNNTCTCModel):
if cfg.decoder_type and cfg.decoder_type not in ['ctc', 'rnnt']:
raise ValueError('Hybrid model only support ctc or rnnt decoding!')
else: # rnnt model, there could be other models needs to be addressed.
if cfg.decoder_type and cfg.decoder_type != 'rnnt':
raise ValueError('RNNT model only support rnnt decoding!')
if cfg.decoder_type and hasattr(asr_model.encoder, 'set_default_att_context_size'):
asr_model.encoder.set_default_att_context_size(cfg.att_context_size)
# Setup decoding strategy
if hasattr(asr_model, 'change_decoding_strategy'):
if cfg.decoder_type is not None:
# TODO: Support compute_langs in CTC eventually
if cfg.compute_langs and cfg.decoder_type == 'ctc':
raise ValueError("CTC models do not support `compute_langs` at the moment")
decoding_cfg = cfg.rnnt_decoding if cfg.decoder_type == 'rnnt' else cfg.ctc_decoding
decoding_cfg.compute_timestamps = cfg.compute_timestamps # both ctc and rnnt support it
if 'preserve_alignments' in decoding_cfg:
decoding_cfg.preserve_alignments = preserve_alignment
if 'compute_langs' in decoding_cfg:
decoding_cfg.compute_langs = cfg.compute_langs
if hasattr(asr_model, 'cur_decoder'):
asr_model.change_decoding_strategy(decoding_cfg, decoder_type=cfg.decoder_type)
else:
asr_model.change_decoding_strategy(decoding_cfg)
# Check if ctc or rnnt model
elif hasattr(asr_model, 'joint'): # RNNT model
cfg.rnnt_decoding.fused_batch_size = -1
cfg.rnnt_decoding.compute_timestamps = cfg.compute_timestamps
cfg.rnnt_decoding.compute_langs = cfg.compute_langs
if 'preserve_alignments' in cfg.rnnt_decoding:
cfg.rnnt_decoding.preserve_alignments = preserve_alignment
asr_model.change_decoding_strategy(cfg.rnnt_decoding)
else:
if cfg.compute_langs:
raise ValueError("CTC models do not support `compute_langs` at the moment.")
cfg.ctc_decoding.compute_timestamps = cfg.compute_timestamps
asr_model.change_decoding_strategy(cfg.ctc_decoding)
# Setup decoding config based on model type and decoder_type
with open_dict(cfg):
if isinstance(asr_model, EncDecCTCModel) or (
isinstance(asr_model, EncDecHybridRNNTCTCModel) and cfg.decoder_type == "ctc"
):
cfg.decoding = cfg.ctc_decoding
else:
cfg.decoding = cfg.rnnt_decoding
# prepare audio filepaths and decide wether it's partial audio
filepaths, partial_audio = prepare_audio_data(cfg)
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast(dtype=None):
yield
# Compute output filename
cfg = compute_output_filename(cfg, model_name)
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.return_transcriptions and not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename):
logging.info(
f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`"
f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text."
)
return cfg
# transcribe audio
amp_dtype = torch.float16 if cfg.amp_dtype == "float16" else torch.bfloat16
with autocast(dtype=amp_dtype):
with torch.no_grad():
if partial_audio:
transcriptions = transcribe_partial_audio(
asr_model=asr_model,
path2manifest=cfg.dataset_manifest,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=cfg.return_hypotheses,
channel_selector=cfg.channel_selector,
augmentor=augmentor,
decoder_type=cfg.decoder_type,
)
else:
transcriptions = asr_model.transcribe(
paths2audio_files=filepaths,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=cfg.return_hypotheses,
channel_selector=cfg.channel_selector,
augmentor=augmentor,
)
logging.info(f"Finished transcribing {len(filepaths)} files !")
logging.info(f"Writing transcriptions into file: {cfg.output_filename}")
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if type(transcriptions) == tuple and len(transcriptions) == 2:
transcriptions = transcriptions[0]
if cfg.return_transcriptions:
return transcriptions
# write audio transcriptions
output_filename, pred_text_attr_name = write_transcription(
transcriptions,
cfg,
model_name,
filepaths=filepaths,
compute_langs=compute_langs,
compute_timestamps=compute_timestamps,
)
logging.info(f"Finished writing predictions to {output_filename}!")
if cfg.calculate_wer:
output_manifest_w_wer, total_res, _ = cal_write_wer(
pred_manifest=output_filename,
pred_text_attr_name=pred_text_attr_name,
clean_groundtruth_text=cfg.clean_groundtruth_text,
langid=cfg.langid,
use_cer=cfg.use_cer,
output_filename=None,
)
if output_manifest_w_wer:
logging.info(f"Writing prediction and error rate of each sample to {output_manifest_w_wer}!")
logging.info(f"{total_res}")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/transcribe_speech.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can used to fine-tune a speech-to-text model of any instance type when users want to
fine-tune an existing model without changing its core architecture but may change the tokenizer.
One can mention the pretrained model in two ways:
1) `init_from_nemo_model` or
2) `init_from_pretrained_model` in the configuration.
To update the model architecture in conjunction with other modifications, it is advisable to use the primary 'speech_to_text_rnnt/ctc_*.py' script.
Note: To create a single script for all model types, we currently only support two types of
initializations:
1) `init_from_nemo_model`, and
2) `init_from_pretrained_model`,
but not `init_from_ptl_ckpt`.
To train with prior base model tokenizer keep `model.tokenizer.update_tokenizer` as false else
make it true and provide tokenizer dir along with tokenizer type.
To fine-tune the model, use the following commands:
For initialization from a NEMO model:
```sh
python <NEMO_ROOT>/examples/asr/speech_to_text_finetune.py \
init_from_nemo_model=<path_to_nemo_model>
```
For initialization from a pretrained model:
```sh
python <NEMO_ROOT>/examples/asr/speech_to_text_finetune.py \
init_from_pretrained_model=<pretrained_model_name>
```
# Fine-Tune a Model
For documentation on fine-tuning this model, please visit:
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from nemo.collections.asr.models import ASRModel
from nemo.core.config import hydra_runner
from nemo.utils import logging, model_utils
from nemo.utils.exp_manager import exp_manager
@rank_zero_only
def get_base_model(cfg):
"""
Returns the base model to be fine-tuned.
Currently supports two types of initializations:
1) `init_from_nemo_model`, and
2) `init_from_pretrained_model`.
Args:
cfg: config
Returns:
asr_model: ASRModel instance
"""
asr_model = None
nemo_model_path = cfg.get('init_from_nemo_model', None)
pretrained_name = cfg.get('init_from_pretrained_model', None)
if nemo_model_path is not None and pretrained_name is not None:
raise ValueError("Only pass `init_from_nemo_model` or `init_from_pretrained_model` but not both")
elif nemo_model_path is None and pretrained_name is None:
raise ValueError(
"Both `init_from_nemo_model` and `init_from_pretrained_model cannot be None, should pass atleast one of them"
)
elif nemo_model_path is not None:
asr_model = ASRModel.restore_from(restore_path=nemo_model_path)
elif pretrained_name is not None:
asr_model = ASRModel.from_pretrained(model_name=pretrained_name)
return asr_model
def check_vocabulary(asr_model, cfg):
"""
Checks if the decoder and vocabulary of the model needs to be updated.
If either of them needs to be updated, it updates them and returns the updated model.
else vocabulary will be reused from the pre-trained model.
Args:
asr_model: ASRModel instance
cfg: config
Returns:
asr_model: ASRModel instance with updated decoder and vocabulary
"""
if hasattr(cfg.model.tokenizer, 'update_tokenizer') and cfg.model.tokenizer.update_tokenizer:
if hasattr(cfg.model.char_labels, 'update_labels') and cfg.model.char_labels.update_labels:
raise ValueError(
"Both `model.tokenizer.update_tokenizer` and `model.char_labels.update_labels` cannot be passed together"
)
else:
asr_model = update_tokenizer(asr_model, cfg.model.tokenizer.dir, cfg.model.tokenizer.type)
elif hasattr(cfg.model, 'char_labels') and cfg.model.char_labels.update_labels:
asr_model.change_vocabulary(new_vocabulary=cfg.model.char_labels.labels)
logging.warning("The vocabulary of the model has been updated with provided char labels.")
else:
logging.info("Reusing the vocabulary from the pre-trained model.")
return asr_model
def update_tokenizer(asr_model, tokenizer_dir, tokenizer_type):
"""
Updates the tokenizer of the model and also reinitializes the decoder if the vocabulary size
of the new tokenizer differs from that of the loaded model.
Args:
asr_model: ASRModel instance
tokenizer_dir: tokenizer directory
tokenizer_type: tokenizer type
Returns:
asr_model: ASRModel instance with updated tokenizer and decoder
"""
vocab_size = asr_model.tokenizer.vocab_size
decoder = asr_model.decoder.state_dict()
if hasattr(asr_model, 'joint'):
joint_state = asr_model.joint.state_dict()
else:
joint_state = None
if tokenizer_dir is None:
raise ValueError("dir must be specified if update_tokenizer is True")
logging.info("Using the tokenizer provided through config")
asr_model.change_vocabulary(new_tokenizer_dir=tokenizer_dir, new_tokenizer_type=tokenizer_type)
if asr_model.tokenizer.vocab_size != vocab_size:
logging.warning(
"The vocabulary size of the new tokenizer differs from that of the loaded model. As a result, finetuning will proceed with the new vocabulary, and the decoder will be reinitialized."
)
else:
asr_model.decoder.load_state_dict(decoder)
if joint_state is not None:
asr_model.joint.load_state_dict(joint_state)
return asr_model
def setup_dataloaders(asr_model, cfg):
"""
Sets up the training, validation and test dataloaders for the model.
Args:
asr_model: ASRModel instance
cfg: config
Returns:
asr_model: ASRModel instance with updated dataloaders
"""
cfg = model_utils.convert_model_config_to_dict_config(cfg)
asr_model.setup_training_data(cfg.model.train_ds)
asr_model.setup_multiple_validation_data(cfg.model.validation_ds)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
asr_model.setup_multiple_test_data(cfg.model.test_ds)
return asr_model
@hydra_runner(config_path="conf", config_name="speech_to_text_finetune")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if hasattr(cfg, 'init_from_ptl_ckpt') and cfg.init_from_ptl_ckpt is not None:
raise NotImplementedError(
"Currently for simplicity of single script for all model types, we only support `init_from_nemo_model` and `init_from_pretrained_model`"
)
asr_model = get_base_model(cfg)
# Check vocabulary type and update if needed
asr_model = check_vocabulary(asr_model, cfg)
# Setup Data
asr_model = setup_dataloaders(asr_model, cfg)
# Setup Optimizer
asr_model.setup_optimization(cfg.model.optim)
# Setup SpecAug
if hasattr(cfg.model, 'spec_augment') and cfg.model.spec_augment is not None:
asr_model.spec_augment = ASRModel.from_config_dict(cfg.model.spec_augment)
trainer.fit(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_to_text_finetune.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# ASR transcribe/inference with multi-GPU/multi-node support for large datasets
# It supports both tarred and non-tarred datasets
# Arguments
# model: path to a nemo/PTL checkpoint file or name of a pretrained model
# predict_ds: config of the dataset/dataloader
# output_path: path to store the predictions
# return_predictions: whether to return the predictions as output other than writing into the files
# use_cer: whether to calculate the error in terms of CER or use the default WER
#
# Results of each GPU/worker is written into a file named 'predictions_{rank}.json, and aggregated results of all workers are written into 'predictions_all.json'
Example for non-tarred datasets:
python transcribe_speech_parallel.py \
model=stt_en_conformer_ctc_large \
predict_ds.manifest_filepath=/dataset/manifest_file.json \
predict_ds.batch_size=16 \
output_path=/tmp/
Example for Hybrid-CTC/RNNT models with non-tarred datasets:
python transcribe_speech_parallel.py \
model=stt_en_fastconformer_hybrid_large \
decoder_type=ctc \
predict_ds.manifest_filepath=/dataset/manifest_file.json \
predict_ds.batch_size=16 \
output_path=/tmp/
Example for tarred datasets:
python transcribe_speech_parallel.py \
predict_ds.is_tarred=true \
predict_ds.manifest_filepath=/tarred_dataset/tarred_audio_manifest.json \
predict_ds.tarred_audio_filepaths=/tarred_dataset/audio__OP_0..127_CL_.tar \
...
By default the trainer uses all the GPUs available and default precision is FP32.
By setting the trainer config you may control these configs. For example to do the predictions with AMP on just two GPUs:
python transcribe_speech_parallel.py \
trainer.precision=16 \
trainer.devices=2 \
...
You may control the dataloader's config by setting the predict_ds:
python transcribe_speech_parallel.py \
predict_ds.num_workers=8 \
predict_ds.min_duration=2.0 \
predict_ds.sample_rate=16000 \
model=stt_en_conformer_ctc_small \
...
"""
import itertools
import json
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as ptl
import torch
from omegaconf import MISSING, OmegaConf
from nemo.collections.asr.data.audio_to_text_dataset import ASRPredictionWriter
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ASRModel, EncDecHybridRNNTCTCModel
from nemo.collections.asr.models.configs.asr_models_config import ASRDatasetConfig
from nemo.core.config import TrainerConfig, hydra_runner
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
@dataclass
class ParallelTranscriptionConfig:
model: Optional[str] = None # name
predict_ds: ASRDatasetConfig = ASRDatasetConfig(return_sample_id=True, num_workers=4)
output_path: str = MISSING
# when return_predictions is enabled, the prediction call would keep all the predictions in memory and return them when prediction is done
return_predictions: bool = False
use_cer: bool = False
# decoding strategy for RNNT models
rnnt_decoding: RNNTDecodingConfig = RNNTDecodingConfig()
# decoder type: ctc or rnnt, can be used to switch between CTC and RNNT decoder for Hybrid RNNT/CTC models
decoder_type: Optional[str] = None
# att_context_size can be set for cache-aware streaming models with multiple look-aheads
att_context_size: Optional[list] = None
trainer: TrainerConfig = TrainerConfig(devices=-1, accelerator="gpu", strategy="ddp")
def match_train_config(predict_ds, train_ds):
# It copies the important configurations from the train dataset of the model
# into the predict_ds to be used for prediction. It is needed to match the training configurations.
if train_ds is None:
return
predict_ds.sample_rate = train_ds.get("sample_rate", 16000)
cfg_name_list = [
"int_values",
"use_start_end_token",
"blank_index",
"unk_index",
"normalize",
"parser",
"eos_id",
"bos_id",
"pad_id",
]
if is_dataclass(predict_ds):
predict_ds = OmegaConf.structured(predict_ds)
for cfg_name in cfg_name_list:
if hasattr(train_ds, cfg_name):
setattr(predict_ds, cfg_name, getattr(train_ds, cfg_name))
return predict_ds
@hydra_runner(config_name="TranscriptionConfig", schema=ParallelTranscriptionConfig)
def main(cfg: ParallelTranscriptionConfig):
if cfg.model.endswith(".nemo"):
logging.info("Attempting to initialize from .nemo file")
model = ASRModel.restore_from(restore_path=cfg.model, map_location="cpu")
elif cfg.model.endswith(".ckpt"):
logging.info("Attempting to initialize from .ckpt file")
model = ASRModel.load_from_checkpoint(checkpoint_path=cfg.model, map_location="cpu")
else:
logging.info(
"Attempting to initialize from a pretrained model as the model name does not have the extension of .nemo or .ckpt"
)
model = ASRModel.from_pretrained(model_name=cfg.model, map_location="cpu")
if isinstance(model, EncDecHybridRNNTCTCModel) and cfg.decoder_type is not None:
model.change_decoding_strategy(decoder_type=cfg.decoder_type)
trainer = ptl.Trainer(**cfg.trainer)
cfg.predict_ds.return_sample_id = True
cfg.predict_ds = match_train_config(predict_ds=cfg.predict_ds, train_ds=model.cfg.train_ds)
data_loader = model._setup_dataloader_from_config(cfg.predict_ds)
os.makedirs(cfg.output_path, exist_ok=True)
# trainer.global_rank is not valid before predict() is called. Need this hack to find the correct global_rank.
global_rank = trainer.node_rank * trainer.num_devices + int(os.environ.get("LOCAL_RANK", 0))
output_file = os.path.join(cfg.output_path, f"predictions_{global_rank}.json")
predictor_writer = ASRPredictionWriter(dataset=data_loader.dataset, output_file=output_file)
trainer.callbacks.extend([predictor_writer])
predictions = trainer.predict(model=model, dataloaders=data_loader, return_predictions=cfg.return_predictions)
if predictions is not None:
predictions = list(itertools.chain.from_iterable(predictions))
samples_num = predictor_writer.close_output_file()
logging.info(
f"Prediction on rank {global_rank} is done for {samples_num} samples and results are stored in {output_file}."
)
if torch.distributed.is_initialized():
torch.distributed.barrier()
samples_num = 0
pred_text_list = []
text_list = []
if is_global_rank_zero():
output_file = os.path.join(cfg.output_path, f"predictions_all.json")
logging.info(f"Prediction files are being aggregated in {output_file}.")
with open(output_file, 'w') as outf:
for rank in range(trainer.world_size):
input_file = os.path.join(cfg.output_path, f"predictions_{rank}.json")
with open(input_file, 'r') as inpf:
lines = inpf.readlines()
for line in lines:
item = json.loads(line)
pred_text_list.append(item["pred_text"])
text_list.append(item["text"])
outf.write(json.dumps(item) + "\n")
samples_num += 1
wer_cer = word_error_rate(hypotheses=pred_text_list, references=text_list, use_cer=cfg.use_cer)
logging.info(
f"Prediction is done for {samples_num} samples in total on all workers and results are aggregated in {output_file}."
)
logging.info("{} for all predictions is {:.4f}.".format("CER" if cfg.use_cer else "WER", wer_cer))
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/transcribe_speech_parallel.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
from pathlib import Path
from subprocess import PIPE, Popen
from threading import Thread
from nemo.collections.common import tokenizers
from nemo.utils import logging
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="""Create token LM for input manifest and tokenizer.""",
)
parser.add_argument(
"--manifest", required=True, type=str, help="Comma separated list of manifest files",
)
parser.add_argument(
"--tokenizer_dir",
required=True,
type=str,
help="The directory path to the tokenizer vocabulary + additional metadata",
)
parser.add_argument(
"--tokenizer_type",
required=True,
type=str,
choices=["bpe", "wpe"],
help="The type of the tokenizer. Currently supports `bpe` and `wpe`",
)
parser.add_argument(
"--lm_builder",
default="chain-est-phone-lm",
type=str,
help=(
"The path or name of an LM builder. Supported builders: chain-est-phone-lm "
"and scripts/asr_language_modeling/ngram_lm/make_phone_lm.py"
),
)
parser.add_argument(
"--ngram_order", type=int, default=2, choices=[2, 3, 4, 5], help="Order of n-gram to use",
)
parser.add_argument(
"--output_file", required=True, type=str, help="The path to store the token LM",
)
parser.add_argument(
"--do_lowercase", action="store_true", help="Whether to apply lower case conversion on the text",
)
args = parser.parse_args()
is_chain_builder = Path(args.lm_builder).stem == "chain-est-phone-lm"
""" TOKENIZER SETUP """
logging.info(f"Loading {args.tokenizer_type} tokenizer from '{args.tokenizer_dir}' ...")
if args.tokenizer_type == "bpe":
# This is a BPE Tokenizer
model_path = os.path.join(args.tokenizer_dir, "tokenizer.model")
# Update special tokens
tokenizer = tokenizers.SentencePieceTokenizer(model_path=model_path)
else:
# This is a WPE Tokenizer
vocab_path = os.path.join(args.tokenizer_dir, "vocab.txt")
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name="bert-base-cased", vocab_file=vocab_path)
logging.info(f"Tokenizer {tokenizer.__class__.__name__} loaded with {tokenizer.vocab_size} tokens")
""" DATA PROCESSING """
if "," in args.manifest:
manifests = args.manifest.split(",")
else:
manifests = [args.manifest]
offset = 1 # tokens in token LM cannot be 0
tok_text_list = []
num_lines = 0
for manifest in manifests:
logging.info(f"Processing manifest : {manifest} ...")
with open(manifest, "r") as in_reader:
for line in in_reader:
item = json.loads(line)
text = item["text"]
if args.do_lowercase:
text = text.lower()
tok_text = " ".join([str(i + offset) for i in tokenizer.text_to_ids(text)])
if is_chain_builder:
tok_text = f"line_{num_lines} " + tok_text
tok_text_list.append(tok_text)
num_lines += 1
tok_texts = "\n".join(tok_text_list)
del tok_text_list
logging.info("Finished processing all manifests ! Number of sentences : {}".format(num_lines))
""" LM BUILDING """
logging.info(f"Calling {args.lm_builder} ...")
if is_chain_builder:
pipe_args = [
args.lm_builder,
f"--ngram-order={args.ngram_order}",
f"--no-prune-ngram-order={args.ngram_order}",
"ark:-",
"-",
]
p1 = Popen(pipe_args, stdin=PIPE, stdout=PIPE, text=True)
p2 = Popen(["fstprint"], stdin=p1.stdout, stdout=PIPE, text=True)
p1.stdout.close()
p1.stdout = None
Thread(target=p1.communicate, args=[tok_texts]).start()
out, err = p2.communicate()
else:
pipe_args = [
args.lm_builder,
f"--ngram-order={args.ngram_order}",
f"--no-backoff-ngram-order={args.ngram_order}",
"--phone-disambig-symbol=-11",
]
p1 = Popen(pipe_args, stdout=PIPE, stdin=PIPE, text=True)
out, err = p1.communicate(tok_texts)
logging.info(f"LM is built, writing to {args.output_file} ...")
with open(args.output_file, "w", encoding="utf-8") as f:
f.write(out)
logging.info(f"Done writing to '{args.output_file}'.")
if __name__ == "__main__":
main()
| NeMo-main | examples/asr/experimental/k2/make_token_lm.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Based on examples/asr/transcribe_speech_parallel.py
# ASR alignment with multi-GPU/multi-node support for large datasets
# It supports both tarred and non-tarred datasets
# Arguments
# model: path to a nemo/PTL checkpoint file or name of a pretrained model
# predict_ds: config of the dataset/dataloader
# aligner_args: aligner config
# output_path: path to store the predictions
# model_stride: model downsampling factor, 8 for Citrinet models and 4 for Conformer models
#
# Results of each GPU/worker is written into a file named 'predictions_{rank}.json, and aggregated results of all workers are written into 'predictions_all.json'
Example for non-tarred datasets:
python align_speech_parallel.py \
model=stt_en_conformer_ctc_large \
predict_ds.manifest_filepath=/dataset/manifest_file.json \
predict_ds.batch_size=16 \
output_path=/tmp/
Example for tarred datasets:
python align_speech_parallel.py \
predict_ds.is_tarred=true \
predict_ds.manifest_filepath=/tarred_dataset/tarred_audio_manifest.json \
predict_ds.tarred_audio_filepaths=/tarred_dataset/audio__OP_0..127_CL_.tar \
...
By default the trainer uses all the GPUs available and default precision is FP32.
By setting the trainer config you may control these configs. For example to do the predictions with AMP on just two GPUs:
python align_speech_parallel.py \
trainer.precision=16 \
trainer.gpus=2 \
...
You may control the dataloader's config by setting the predict_ds:
python align_speech_parallel.py \
predict_ds.num_workers=8 \
predict_ds.min_duration=2.0 \
predict_ds.sample_rate=16000 \
model=stt_en_conformer_ctc_small \
...
You may control the aligner's config by setting the aligner_args:
aligner_args.alignment_type=argmax \
aligner_args.word_output=False \
aligner_args.cpu_decoding=True \
aligner_args.decode_batch_size=8 \
aligner_args.ctc_cfg.prob_suppress_index=-1 \
aligner_args.ctc_cfg.prob_suppress_value=0.5 \
aligner_args.rnnt_cfg.predictor_window_size=10 \
aligner_args.decoder_module_cfg.intersect_pruned=true \
aligner_args.decoder_module_cfg.intersect_conf.search_beam=40 \
...
"""
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as ptl
import torch
from omegaconf import MISSING, OmegaConf
from nemo.collections.asr.data.audio_to_ctm_dataset import ASRCTMPredictionWriter
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.models.configs.aligner_config import K2AlignerWrapperModelConfig
from nemo.collections.asr.models.configs.asr_models_config import ASRDatasetConfig
from nemo.collections.asr.models.k2_aligner_model import AlignerWrapperModel
from nemo.core.config import TrainerConfig, hydra_runner
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
@dataclass
class ParallelAlignmentConfig:
model: Optional[str] = None # name
predict_ds: ASRDatasetConfig = ASRDatasetConfig(return_sample_id=True, num_workers=4)
aligner_args: K2AlignerWrapperModelConfig = K2AlignerWrapperModelConfig()
output_path: str = MISSING
model_stride: int = 8
trainer: TrainerConfig = TrainerConfig(gpus=-1, accelerator="ddp")
# there arguments will be ignored
return_predictions: bool = False
use_cer: bool = False
def match_train_config(predict_ds, train_ds):
# It copies the important configurations from the train dataset of the model
# into the predict_ds to be used for prediction. It is needed to match the training configurations.
if train_ds is None:
return
predict_ds.sample_rate = train_ds.get("sample_rate", 16000)
cfg_name_list = [
"int_values",
"use_start_end_token",
"blank_index",
"unk_index",
"normalize",
"parser",
"eos_id",
"bos_id",
"pad_id",
]
if is_dataclass(predict_ds):
predict_ds = OmegaConf.structured(predict_ds)
for cfg_name in cfg_name_list:
if hasattr(train_ds, cfg_name):
setattr(predict_ds, cfg_name, getattr(train_ds, cfg_name))
return predict_ds
@hydra_runner(config_name="AlignmentConfig", schema=ParallelAlignmentConfig)
def main(cfg: ParallelAlignmentConfig):
if cfg.model.endswith(".nemo"):
logging.info("Attempting to initialize from .nemo file")
model = ASRModel.restore_from(restore_path=cfg.model, map_location="cpu")
elif cfg.model.endswith(".ckpt"):
logging.info("Attempting to initialize from .ckpt file")
model = ASRModel.load_from_checkpoint(checkpoint_path=cfg.model, map_location="cpu")
else:
logging.info(
"Attempting to initialize from a pretrained model as the model name does not have the extension of .nemo or .ckpt"
)
model = ASRModel.from_pretrained(model_name=cfg.model, map_location="cpu")
trainer = ptl.Trainer(**cfg.trainer)
cfg.predict_ds.return_sample_id = True
cfg.return_predictions = False
cfg.use_cer = False
cfg.predict_ds = match_train_config(predict_ds=cfg.predict_ds, train_ds=model._cfg.train_ds)
data_loader = model._setup_dataloader_from_config(cfg.predict_ds)
os.makedirs(cfg.output_path, exist_ok=True)
# trainer.global_rank is not valid before predict() is called. Need this hack to find the correct global_rank.
global_rank = trainer.node_rank * trainer.num_devices + int(os.environ.get("LOCAL_RANK", 0))
output_file = os.path.join(cfg.output_path, f"predictions_{global_rank}.json")
output_ctm_dir = os.path.join(cfg.output_path, "ctm")
predictor_writer = ASRCTMPredictionWriter(
dataset=data_loader.dataset,
output_file=output_file,
output_ctm_dir=output_ctm_dir,
time_per_frame=cfg.model_stride * model._cfg.preprocessor['window_stride'],
)
trainer.callbacks.extend([predictor_writer])
aligner_wrapper = AlignerWrapperModel(model=model, cfg=cfg.aligner_args)
trainer.predict(model=aligner_wrapper, dataloaders=data_loader, return_predictions=cfg.return_predictions)
samples_num = predictor_writer.close_output_file()
logging.info(
f"Prediction on rank {global_rank} is done for {samples_num} samples and results are stored in {output_file}."
)
if torch.distributed.is_initialized():
torch.distributed.barrier()
samples_num = 0
if is_global_rank_zero():
output_file = os.path.join(cfg.output_path, f"predictions_all.json")
logging.info(f"Prediction files are being aggregated in {output_file}.")
with open(output_file, 'tw', encoding="utf-8") as outf:
for rank in range(trainer.world_size):
input_file = os.path.join(cfg.output_path, f"predictions_{rank}.json")
with open(input_file, 'r', encoding="utf-8") as inpf:
lines = inpf.readlines()
samples_num += len(lines)
outf.writelines(lines)
logging.info(
f"Prediction is done for {samples_num} samples in total on all workers and results are aggregated in {output_file}."
)
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/experimental/k2/align_speech_parallel.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_rnnt_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
trainer.devices=-1 \
trainer.accelerator="gpu" \
trainer.strategy="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>" \
model.graph_module_cfg.criterion_type=ml \
model.graph_module_cfg.loss_type=rnnt \
model.graph_module_cfg.split_batch_size=0 \
model.graph_module_cfg.background_cfg.topo_type=minimal
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecK2RnntSeqModelBPE
from nemo.collections.asr.models.configs.k2_sequence_models_config import EncDecK2SeqModelConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="experimental/k2/conf/conformer", config_name="conformer_transducer_bpe.yaml")
def main(cfg: EncDecK2SeqModelConfig):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecK2RnntSeqModelBPE(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/experimental/k2/speech_to_text_rnnt_bpe.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# [FOR MMI LOSS ONLY] Building a token-level LM for the model training
```sh
python experimental/k2/make_token_lm.py \
--manifest=<comma separated list of manifest files> \
--tokenizer_dir=<path to directory of tokenizer (not full path to the vocab file!)> \
--tokenizer_type=<either `bpe` or `wpe`> \
--output_file=<path to store the token LM> \
--lm_builder=<NEMO_ROOT>/scripts/asr_language_modeling/ngram_lm/make_phone_lm.py \
--ngram_order=2 \
--do_lowercase
```
# Training the model
```sh
python speech_to_text_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either `bpe` or `wpe`> \
trainer.gpus=-1 \
trainer.accelerator="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>" \
model.graph_module_cfg.criterion_type=<either `ml` or `map`> \
model.graph_module_cfg.loss_type=<either `ctc` or `mmi`> \
model.graph_module_cfg.transcribe_training=False \
model.graph_module_cfg.split_batch_size=0 \
model.graph_module_cfg.background_cfg.topo_type=<`default` or `compact` or `shared_blank` or `minimal`> \
model.graph_module_cfg.background_cfg.topo_with_self_loops=True \
```
# If graph_module_cfg.criterion_type=`map`, you can set the following parameters:
model.graph_module_cfg.background_cfg.token_lm=<path to the token LM> \
model.graph_module_cfg.background_cfg.intersect_pruned=False \
model.graph_module_cfg.background_cfg.boost_coeff=0.0
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.configs.k2_sequence_models_config import EncDecK2SeqModelConfig
from nemo.collections.asr.models.k2_sequence_models import EncDecK2SeqModelBPE
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="experimental/k2/conf/citrinet", config_name="citrinet_mmi_1024.yaml")
def main(cfg: EncDecK2SeqModelConfig):
logging.info(f"Hydra config: {OmegaConf.to_yaml(cfg)}")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecK2SeqModelBPE(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == "__main__":
main()
| NeMo-main | examples/asr/experimental/k2/speech_to_text_bpe.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is based on speech_to_text_eval.py and allows you to score the hypotheses
with sclite. A local installation from https://github.com/usnistgov/SCTK is required.
Hypotheses and references are first saved in trn format and are scored after applying a glm
file (if provided).
# Usage
python speech_to_text_sclite.py \
--asr_model="<Path to ASR Model>" \
--dataset="<Path to manifest file>" \
--out_dir="<Path to output dir, should be unique per model evaluated>" \
--sctk_dir="<Path to root directory where SCTK is installed>" \
--glm="<OPTIONAL: Path to glm file>" \
--batch_size=4
"""
import errno
import json
import os
import subprocess
from argparse import ArgumentParser
import torch
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def score_with_sctk(sctk_dir, ref_fname, hyp_fname, out_dir, glm=""):
sclite_path = os.path.join(sctk_dir, "bin", "sclite")
if not os.path.exists(sclite_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), sclite_path)
# apply glm
if os.path.exists(glm):
rfilter_path = os.path.join(sctk_dir, "bin", "rfilter1")
if not os.path.exists(rfilter_path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), rfilter_path)
hypglm = os.path.join(out_dir, os.path.basename(hyp_fname)) + ".glm"
rfilt_cmd = [rfilter_path] + [glm]
with open(hypglm, "w", encoding='utf-8') as hypf, open(hyp_fname, "r", encoding='utf-8') as hyp_in:
subprocess.run(rfilt_cmd, stdin=hyp_in, stdout=hypf)
refglm = os.path.join(out_dir, os.path.basename(ref_fname)) + ".glm"
with open(refglm, "w", encoding='utf-8') as reff, open(ref_fname, "r", encoding='utf-8') as ref_in:
subprocess.run(rfilt_cmd, stdin=ref_in, stdout=reff)
else:
refglm = ref_fname
hypglm = hyp_fname
_ = subprocess.check_output(f"{sclite_path} -h {hypglm} -r {refglm} -i wsj -o all", shell=True)
can_gpu = torch.cuda.is_available()
def get_utt_info(manifest_path):
info_list = []
with open(manifest_path, "r", encoding='utf-8') as utt_f:
for line in utt_f:
utt = json.loads(line)
info_list.append(utt)
return info_list
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=False, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--out_dir", type=str, required=True, help="Destination dir for output files")
parser.add_argument("--sctk_dir", type=str, required=False, default="", help="Path to sctk root dir")
parser.add_argument("--glm", type=str, required=False, default="", help="Path to glm file")
args = parser.parse_args()
torch.set_grad_enabled(False)
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir, exist_ok=True)
use_sctk = os.path.exists(args.sctk_dir)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = ASRModel.restore_from(restore_path=args.asr_model, map_location='cpu')
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = ASRModel.from_pretrained(model_name=args.asr_model, map_location='cpu')
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
manifest_data = read_manifest(args.dataset)
references = [data['text'] for data in manifest_data]
audio_filepaths = [data['audio_filepath'] for data in manifest_data]
with autocast():
hypotheses = asr_model.transcribe(audio_filepaths, batch_size=args.batch_size)
# if transcriptions form a tuple (from RNNT), extract just "best" hypothesis
if type(hypotheses) == tuple and len(hypotheses) == 2:
hypotheses = hypotheses[0]
info_list = get_utt_info(args.dataset)
hypfile = os.path.join(args.out_dir, "hyp.trn")
reffile = os.path.join(args.out_dir, "ref.trn")
with open(hypfile, "w") as hyp_f, open(reffile, "w") as ref_f:
for i in range(len(hypotheses)):
utt_id = os.path.splitext(os.path.basename(info_list[i]['audio_filepath']))[0]
# rfilter in sctk likes each transcript to have a space at the beginning
hyp_f.write(" " + hypotheses[i] + " (" + utt_id + ")" + "\n")
ref_f.write(" " + references[i] + " (" + utt_id + ")" + "\n")
if use_sctk:
score_with_sctk(args.sctk_dir, reffile, hypfile, args.out_dir, glm=args.glm)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/experimental/sclite/speech_to_text_sclite.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict
import pytorch_lightning as pl
from nemo.collections.asr.models import EncDecCTCModel, configs
from nemo.core.config import modelPT, optimizers, schedulers
from nemo.utils.exp_manager import exp_manager
"""
python speech_to_text_structured_v2.py
"""
# fmt: off
LABELS = [
" ", "a", "b", "c", "d", "e",
"f", "g", "h", "i", "j", "k",
"l", "m", "n", "o", "p", "q",
"r", "s", "t", "u", "v", "w",
"x", "y", "z", "'",
]
optim_cfg = optimizers.NovogradParams(
lr=0.01,
betas=(0.8, 0.5),
weight_decay=0.001
)
sched_cfg = schedulers.CosineAnnealingParams(
warmup_steps=None,
warmup_ratio=None,
min_lr=0.0,
)
# fmt: on
def main():
# NeMo Model config
cfg = modelPT.NemoConfig(name='Custom QuartzNet')
# Generate default asr model config
builder = configs.EncDecCTCModelConfigBuilder(name='quartznet_15x5')
# set model global values
builder.set_labels(LABELS)
builder.set_optim(cfg=optim_cfg, sched_cfg=sched_cfg)
model_cfg = builder.build()
# set the model config to the NeMo Model
cfg.model = model_cfg
# Update values
# MODEL UPDATES
# train ds
model_cfg.train_ds.manifest_filepath = ""
# validation ds
model_cfg.validation_ds.manifest_filepath = ""
# Trainer config
cfg.trainer.devices = 1
cfg.trainer.max_epochs = 5
# Exp Manager config
cfg.exp_manager.name = cfg.name
# Note usage of asdict
trainer = pl.Trainer(**asdict(cfg.trainer))
exp_manager(trainer, asdict(cfg.exp_manager))
asr_model = EncDecCTCModel(cfg=cfg.model, trainer=trainer)
trainer.fit(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/experimental/structured/speech_to_text_structured_v2.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict
import pytorch_lightning as pl
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.models import EncDecCTCModel, configs
from nemo.utils.exp_manager import exp_manager
"""
python speech_to_text_structured.py
"""
# Generate default asr model config
cfg = configs.EncDecCTCModelConfig()
# set global values
cfg.model.repeat = 5
cfg.model.separable = True
# fmt: off
LABELS = [
" ", "a", "b", "c", "d", "e",
"f", "g", "h", "i", "j", "k",
"l", "m", "n", "o", "p", "q",
"r", "s", "t", "u", "v", "w",
"x", "y", "z", "'",
]
# fmt: on
qn_15x5 = [
nemo_asr.modules.conv_asr.JasperEncoderConfig(
filters=256,
repeat=1,
kernel=[33],
stride=[2],
separable=cfg.model.separable,
dilation=[1],
dropout=cfg.model.dropout,
residual=False,
),
nemo_asr.modules.conv_asr.JasperEncoderConfig(
filters=256,
repeat=1,
kernel=[33],
stride=[1],
separable=cfg.model.separable,
dilation=[1],
dropout=cfg.model.dropout,
residual=True,
),
# ... repeat 14 more times
nemo_asr.modules.conv_asr.JasperEncoderConfig(
filters=1024, repeat=1, kernel=[1], stride=[1], dilation=[1], dropout=cfg.model.dropout, residual=False,
),
]
def main():
# Update values
# MODEL UPDATES
cfg.name = "Mini QuartzNet"
cfg.model.labels = LABELS
# train ds
cfg.model.train_ds.manifest_filepath = "<path to train dataset>"
cfg.model.train_ds.labels = LABELS
cfg.model.train_ds.sample_rate = cfg.model.sample_rate
# validation ds
cfg.model.validation_ds.manifest_filepath = "<path to test dataset>"
cfg.model.validation_ds.labels = LABELS
cfg.model.validation_ds.sample_rate = cfg.model.sample_rate
# del `test_ds` does not work!
# Refer - https://stackoverflow.com/questions/58119758/how-to-remove-dataclass-attributes
# Hydra/OmegaConf dont allow custom .asdict() methods either
# For now, explicitly set parameters
cfg.model.test_ds.sample_rate = cfg.model.sample_rate
cfg.model.test_ds.labels = cfg.model.labels
# preprocessor
cfg.model.preprocessor.sample_rate = cfg.model.sample_rate
# spec aug
cfg.model.spec_augment.rect_masks = 5
cfg.model.spec_augment.rect_freq = 50
cfg.model.spec_augment.rect_time = 120
# encoder
cfg.model.encoder.feat_in = cfg.model.preprocessor.features
cfg.model.encoder.activation = 'relu'
cfg.model.encoder.jasper = qn_15x5
# decoder
cfg.model.decoder.feat_in = qn_15x5[-1].filters
cfg.model.decoder.num_classes = len(LABELS)
cfg.model.decoder.vocabulary = LABELS
# optim
cfg.model.optim.name = 'novograd'
cfg.model.optim.lr = 0.01
# `betas` dont exist inside the base config,
# so they cannot be added as such!
# Same for `weight_decay`.
cfg.model.optim.betas = [0.8, 0.5]
cfg.model.optim.weight_decay = 0.001
# sched
# As parameters such as warmup_steps and warmup_ratio
# dont exist inside the shell config, these values are not added!
cfg.model.optim.sched.name = "CosineAnnealing"
cfg.model.optim.sched.warmup_steps = None
cfg.model.optim.sched.warmup_ratio = 0.01
# Trainer config
cfg.trainer.devices = 1
cfg.trainer.max_epochs = 5
# Exp Manager config
cfg.exp_manager.name = cfg.name
# Note usage of asdict
trainer = pl.Trainer(**asdict(cfg.trainer))
exp_manager(trainer, asdict(cfg.exp_manager))
asr_model = EncDecCTCModel(cfg=cfg.model, trainer=trainer)
trainer.fit(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/experimental/structured/speech_to_text_structured.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.asr.models import EncDecCTCModel, configs
from nemo.core.config import hydra_runner
from nemo.utils.config_utils import update_model_config
from nemo.utils.exp_manager import exp_manager
"""
python speech_to_text_hybrid.py \
--config-path="conf/quartznet" \
--config-name="quartznet_15x5" \
model.train_ds.manifest_filepath="/home/smajumdar/PycharmProjects/NeMo-som/examples/asr/an4/train_manifest.json" \
model.validation_ds.manifest_filepath="/home/smajumdar/PycharmProjects/NeMo-som/examples/asr/an4/test_manifest.json" \
trainer.devices=1
"""
@hydra_runner(config_path="conf/quartznet", config_name="quartznet_15x5")
def main(cfg):
# Generate default asr model config
asr_model_config = configs.EncDecCTCModelConfig()
# Merge hydra updates with model config
# `drop_missing_subconfig=True` is necessary here. Without it, while the data class will instantiate and be added
# to the config, it contains test_ds.sample_rate = MISSING and test_ds.labels = MISSING.
# This will raise a OmegaConf MissingMandatoryValue error when processing the dataloaders inside
# model_utils.resolve_test_dataloaders(model=self) (used for multi data loader support).
# In general, any operation that tries to use a DictConfig with MISSING in it will fail,
# other than explicit update operations to change MISSING to some actual value.
asr_model_config = update_model_config(asr_model_config, cfg, drop_missing_subconfigs=True)
# From here on out, its a general OmegaConf DictConfig, directly usable by our code.
trainer = pl.Trainer(**asr_model_config.trainer)
exp_manager(trainer, asr_model_config.get("exp_manager", None))
asr_model = EncDecCTCModel(cfg=asr_model_config.model, trainer=trainer)
trainer.fit(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/experimental/structured/speech_to_text_hybrid.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Training hybrid ASR-TTS model using text-only data and/or audio-text pairs.
Provide ASR model config, add options related to TTS and text-only data.
```shell
python speech_to_text_bpe_with_text.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
++asr_model_type=<rnnt_bpe, ctc_bpe or hybrid_rnnt_ctc_bpe> \
++tts_model_path=<path to compatible tts model> \
++enhancer_model_path=<optional path to enhancer model> \
model.tokenizer.dir=<path to tokenizer> \
model.tokenizer.type="bpe" \
model.train_ds.manifest_filepath=<path(s) to manifest with audio-text pairs or null> \
++model.train_ds.text_data.manifest_filepath=<path(s) to manifests with train text> \
++model.train_ds.text_data.speakers_filepath=<path(s) to speakers list> \
++model.train_ds.text_data.min_words=1 \
++model.train_ds.text_data.max_words=45 \
++model.train_ds.text_data.tokenizer_workers=4 \
model.validation_ds.manifest_filepath=<path(s) to val/test manifest> \
model.train_ds.batch_size=<batch size> \
trainer.max_epochs=<num epochs> \
trainer.num_nodes=<number of nodes> \
trainer.accumulate_grad_batches=<grad accumultion> \
++trainer.precision=<precision> \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<name of project>" \
++exp_manager.wandb_logger_kwargs.resume=auto \
++exp_manager.wandb_logger_kwargs.id="<name of experiment>" \
exp_manager.resume_if_exists=true \
exp_manager.resume_ignore_no_checkpoint=true \
exp_manager.exp_dir=<experiment dir> \
exp_manager.name=<name of experiment>
```
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.hybrid_asr_tts_models import ASRWithTTSModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="examples/asr/conf/conformer", config_name="conformer_transducer_bpe")
def main(cfg):
"""
Training hybrid ASR-TTS model using text-only data and/or audio-text pairs.
Provide ASR model config, add options related to TTS and text-only data.
"""
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
OmegaConf.resolve(cfg)
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = ASRWithTTSModel.from_asr_config(
asr_cfg=cfg.model,
asr_model_type=cfg.asr_model_type,
tts_model_path=cfg.tts_model_path,
enhancer_model_path=cfg.get("enhancer_model_path", None),
trainer=trainer,
)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_with_tts/speech_to_text_bpe_with_text.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Finetuning pretrained ASR model with text-only data (can be mixed with audio-text pairs)
```shell
python speech_to_text_bpe_with_text_finetune.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.asr_model_path=<path to ASR model> \
model.tts_model_path=<path to compatible TTS model> \
model.enhancer_model_path=<optional path to enhancer model> \
model.asr_model_fuse_bn=<true recommended if ConformerEncoder with BatchNorm, false otherwise> \
model.train_ds.manifest_filepath=<path to manifest with audio-text pairs or null> \
model.train_ds.text_data.manifest_filepath=<path(s) to manifest with train text> \
model.train_ds.text_data.speakers_filepath=<path(s) to speakers list> \
model.train_ds.text_data.tokenizer_workers=4 \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.train_ds.batch_size={args.batch_size} \
trainer.max_epochs=<num epochs> \
trainer.num_nodes=<number of nodes> \
trainer.accumulate_grad_batches=<grad accumulation> \
trainer.precision=<precision> \
model.optim.lr=1e-4 \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<name of project>" \
++exp_manager.wandb_logger_kwargs.resume=auto \
++exp_manager.wandb_logger_kwargs.id="<name of experiment>" \
exp_manager.resume_if_exists=true \
exp_manager.resume_ignore_no_checkpoint=true \
exp_manager.exp_dir=<experiment dir> \
exp_manager.name=<name of experiment>
```
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.hybrid_asr_tts_models import ASRWithTTSModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="examples/asr/asr_tts", config_name="hybrid_asr_tts")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
OmegaConf.resolve(cfg)
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = ASRWithTTSModel(cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
# validate before training to get baseline metrics
trainer.validate(asr_model)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_with_tts/speech_to_text_bpe_with_text_finetune.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script to perform buffered inference using RNNT models.
Buffered inference is the primary form of audio transcription when the audio segment is longer than 20-30 seconds.
This is especially useful for models such as Conformers, which have quadratic time and memory scaling with
audio duration.
The difference between streaming and buffered inference is the chunk size (or the latency of inference).
Buffered inference will use large chunk sizes (5-10 seconds) + some additional buffer for context.
Streaming inference will use small chunk sizes (0.1 to 0.25 seconds) + some additional buffer for context.
# Middle Token merge algorithm
python speech_to_text_buffered_infer_rnnt.py \
model_path=null \
pretrained_name=null \
audio_dir="<remove or path to folder of audio files>" \
dataset_manifest="<remove or path to manifest>" \
output_filename="<remove or specify output filename>" \
total_buffer_in_secs=4.0 \
chunk_len_in_secs=1.6 \
model_stride=4 \
batch_size=32 \
clean_groundtruth_text=True \
langid='en'
# Longer Common Subsequence (LCS) Merge algorithm
python speech_to_text_buffered_infer_rnnt.py \
model_path=null \
pretrained_name=null \
audio_dir="<remove or path to folder of audio files>" \
dataset_manifest="<remove or path to manifest>" \
output_filename="<remove or specify output filename>" \
total_buffer_in_secs=4.0 \
chunk_len_in_secs=1.6 \
model_stride=4 \
batch_size=32 \
merge_algo="lcs" \
lcs_alignment_dir=<OPTIONAL: Some path to store the LCS alignments>
# NOTE:
You can use `DEBUG=1 python speech_to_text_buffered_infer_ctc.py ...` to print out the
predictions of the model, and ground-truth text if presents in manifest.
"""
import copy
import glob
import math
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf, open_dict
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.models import EncDecHybridRNNTCTCModel, EncDecRNNTModel
from nemo.collections.asr.parts.utils.eval_utils import cal_write_wer
from nemo.collections.asr.parts.utils.streaming_utils import (
BatchedFrameASRRNNT,
LongestCommonSubsequenceBatchedFrameASRRNNT,
)
from nemo.collections.asr.parts.utils.transcribe_utils import (
compute_output_filename,
get_buffered_pred_feat_rnnt,
setup_model,
write_transcription,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
can_gpu = torch.cuda.is_available()
@dataclass
class TranscriptionConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
num_workers: int = 0
append_pred: bool = False # Sets mode of work, if True it will add new field transcriptions.
pred_name_postfix: Optional[str] = None # If you need to use another model name, rather than standard one.
random_seed: Optional[int] = None # seed number going to be used in seed_everything()
# Set to True to output greedy timestamp information (only supported models)
compute_timestamps: bool = False
# Set to True to output language ID information
compute_langs: bool = False
# Chunked configs
chunk_len_in_secs: float = 1.6 # Chunk length in seconds
total_buffer_in_secs: float = 4.0 # Length of buffer (chunk + left and right padding) in seconds
model_stride: int = 8 # Model downsampling factor, 8 for Citrinet and FastConformer models and 4 for Conformer models.
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
audio_type: str = "wav"
# Recompute model transcription, even if the output folder exists with scores.
overwrite_transcripts: bool = True
# Decoding strategy for RNNT models
decoding: RNNTDecodingConfig = RNNTDecodingConfig()
# Decoding configs
max_steps_per_timestep: int = 5 #'Maximum number of tokens decoded per acoustic timestep'
stateful_decoding: bool = False # Whether to perform stateful decoding
# Merge algorithm for transducers
merge_algo: Optional[str] = 'middle' # choices=['middle', 'lcs'], choice of algorithm to apply during inference.
lcs_alignment_dir: Optional[str] = None # Path to a directory to store LCS algo alignments
# Config for word / character error rate calculation
calculate_wer: bool = True
clean_groundtruth_text: bool = False
langid: str = "en" # specify this for convert_num_to_words step in groundtruth cleaning
use_cer: bool = False
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg: TranscriptionConfig) -> TranscriptionConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
torch.set_grad_enabled(False)
for key in cfg:
cfg[key] = None if cfg[key] == 'None' else cfg[key]
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.random_seed:
pl.seed_everything(cfg.random_seed)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
filepaths = None
manifest = cfg.dataset_manifest
if cfg.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
manifest = None # ignore dataset_manifest if audio_dir and dataset_manifest both presents
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
logging.info(f"Inference will be done on device : {device}")
asr_model, model_name = setup_model(cfg, map_location)
model_cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(model_cfg.preprocessor, False)
# some changes for streaming scenario
model_cfg.preprocessor.dither = 0.0
model_cfg.preprocessor.pad_to = 0
if model_cfg.preprocessor.normalize != "per_feature":
logging.error("Only EncDecRNNTBPEModel models trained with per_feature normalization are supported currently")
# Disable config overwriting
OmegaConf.set_struct(model_cfg.preprocessor, True)
# Compute output filename
cfg = compute_output_filename(cfg, model_name)
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename):
logging.info(
f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`"
f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text."
)
return cfg
asr_model.freeze()
asr_model = asr_model.to(asr_model.device)
# Change Decoding Config
with open_dict(cfg.decoding):
if cfg.stateful_decoding:
cfg.decoding.strategy = "greedy"
else:
cfg.decoding.strategy = "greedy_batch"
cfg.decoding.preserve_alignments = True # required to compute the middle token for transducers.
cfg.decoding.fused_batch_size = -1 # temporarily stop fused batch during inference.
cfg.decoding.beam.return_best_hypothesis = True # return and write the best hypothsis only
# Setup decoding strategy
if hasattr(asr_model, 'change_decoding_strategy'):
if not isinstance(asr_model, EncDecRNNTModel) and not isinstance(asr_model, EncDecHybridRNNTCTCModel):
raise ValueError("The script supports rnnt model and hybrid model with rnnt decodng!")
else:
# rnnt model
if isinstance(asr_model, EncDecRNNTModel):
asr_model.change_decoding_strategy(cfg.decoding)
# hybrid ctc rnnt model with decoder_type = rnnt
if hasattr(asr_model, 'cur_decoder'):
asr_model.change_decoding_strategy(cfg.decoding, decoder_type='rnnt')
feature_stride = model_cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * cfg.model_stride
total_buffer = cfg.total_buffer_in_secs
chunk_len = float(cfg.chunk_len_in_secs)
tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs)
mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs)
logging.info(f"tokens_per_chunk is {tokens_per_chunk}, mid_delay is {mid_delay}")
if cfg.merge_algo == 'middle':
frame_asr = BatchedFrameASRRNNT(
asr_model=asr_model,
frame_len=chunk_len,
total_buffer=cfg.total_buffer_in_secs,
batch_size=cfg.batch_size,
max_steps_per_timestep=cfg.max_steps_per_timestep,
stateful_decoding=cfg.stateful_decoding,
)
elif cfg.merge_algo == 'lcs':
frame_asr = LongestCommonSubsequenceBatchedFrameASRRNNT(
asr_model=asr_model,
frame_len=chunk_len,
total_buffer=cfg.total_buffer_in_secs,
batch_size=cfg.batch_size,
max_steps_per_timestep=cfg.max_steps_per_timestep,
stateful_decoding=cfg.stateful_decoding,
alignment_basepath=cfg.lcs_alignment_dir,
)
# Set the LCS algorithm delay.
frame_asr.lcs_delay = math.floor(((total_buffer - chunk_len)) / model_stride_in_secs)
else:
raise ValueError("Invalid choice of merge algorithm for transducer buffered inference.")
hyps = get_buffered_pred_feat_rnnt(
asr=frame_asr,
tokens_per_chunk=tokens_per_chunk,
delay=mid_delay,
model_stride_in_secs=model_stride_in_secs,
batch_size=cfg.batch_size,
manifest=manifest,
filepaths=filepaths,
)
output_filename, pred_text_attr_name = write_transcription(
hyps, cfg, model_name, filepaths=filepaths, compute_langs=False, compute_timestamps=False
)
logging.info(f"Finished writing predictions to {output_filename}!")
if cfg.calculate_wer:
output_manifest_w_wer, total_res, _ = cal_write_wer(
pred_manifest=output_filename,
pred_text_attr_name=pred_text_attr_name,
clean_groundtruth_text=cfg.clean_groundtruth_text,
langid=cfg.langid,
use_cer=cfg.use_cer,
output_filename=None,
)
if output_manifest_w_wer:
logging.info(f"Writing prediction and error rate of each sample to {output_manifest_w_wer}!")
logging.info(f"{total_res}")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_chunked_inference/rnnt/speech_to_text_buffered_infer_rnnt.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script serves three goals:
(1) Demonstrate how to use NeMo Models outside of PytorchLightning
(2) Shows example of batch ASR inference
(3) Serves as CI test for pre-trained checkpoint
python speech_to_text_buffered_infer_ctc.py \
model_path=null \
pretrained_name=null \
audio_dir="<remove or path to folder of audio files>" \
dataset_manifest="<remove or path to manifest>" \
output_filename="<remove or specify output filename>" \
total_buffer_in_secs=4.0 \
chunk_len_in_secs=1.6 \
model_stride=4 \
batch_size=32 \
clean_groundtruth_text=True \
langid='en'
# NOTE:
You can use `DEBUG=1 python speech_to_text_buffered_infer_ctc.py ...` to print out the
predictions of the model, and ground-truth text if presents in manifest.
"""
import contextlib
import copy
import glob
import math
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, EncDecHybridRNNTCTCModel
from nemo.collections.asr.parts.utils.eval_utils import cal_write_wer
from nemo.collections.asr.parts.utils.streaming_utils import FrameBatchASR
from nemo.collections.asr.parts.utils.transcribe_utils import (
compute_output_filename,
get_buffered_pred_feat,
setup_model,
write_transcription,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
can_gpu = torch.cuda.is_available()
@dataclass
class TranscriptionConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
num_workers: int = 0
append_pred: bool = False # Sets mode of work, if True it will add new field transcriptions.
pred_name_postfix: Optional[str] = None # If you need to use another model name, rather than standard one.
random_seed: Optional[int] = None # seed number going to be used in seed_everything()
# Set to True to output greedy timestamp information (only supported models)
compute_timestamps: bool = False
# Set to True to output language ID information
compute_langs: bool = False
# Chunked configs
chunk_len_in_secs: float = 1.6 # Chunk length in seconds
total_buffer_in_secs: float = 4.0 # Length of buffer (chunk + left and right padding) in seconds
model_stride: int = 8 # Model downsampling factor, 8 for Citrinet and FasConformer models and 4 for Conformer models.
# Decoding strategy for CTC models
decoding: CTCDecodingConfig = CTCDecodingConfig()
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Recompute model transcription, even if the output folder exists with scores.
overwrite_transcripts: bool = True
# Config for word / character error rate calculation
calculate_wer: bool = True
clean_groundtruth_text: bool = False
langid: str = "en" # specify this for convert_num_to_words step in groundtruth cleaning
use_cer: bool = False
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg: TranscriptionConfig) -> TranscriptionConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
torch.set_grad_enabled(False)
for key in cfg:
cfg[key] = None if cfg[key] == 'None' else cfg[key]
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.random_seed:
pl.seed_everything(cfg.random_seed)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
filepaths = None
manifest = cfg.dataset_manifest
if cfg.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(cfg.audio_dir, f"**/*.{cfg.audio_type}"), recursive=True))
manifest = None # ignore dataset_manifest if audio_dir and dataset_manifest both presents
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
logging.info(f"Inference will be done on device : {device}")
asr_model, model_name = setup_model(cfg, map_location)
model_cfg = copy.deepcopy(asr_model._cfg)
OmegaConf.set_struct(model_cfg.preprocessor, False)
# some changes for streaming scenario
model_cfg.preprocessor.dither = 0.0
model_cfg.preprocessor.pad_to = 0
if model_cfg.preprocessor.normalize != "per_feature":
logging.error("Only EncDecCTCModelBPE models trained with per_feature normalization are supported currently")
# Disable config overwriting
OmegaConf.set_struct(model_cfg.preprocessor, True)
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# Compute output filename
cfg = compute_output_filename(cfg, model_name)
# if transcripts should not be overwritten, and already exists, skip re-transcription step and return
if not cfg.overwrite_transcripts and os.path.exists(cfg.output_filename):
logging.info(
f"Previous transcripts found at {cfg.output_filename}, and flag `overwrite_transcripts`"
f"is {cfg.overwrite_transcripts}. Returning without re-transcribing text."
)
return cfg
# Setup decoding strategy
if hasattr(asr_model, 'change_decoding_strategy'):
if not isinstance(asr_model, EncDecCTCModel) and not isinstance(asr_model, EncDecHybridRNNTCTCModel):
raise ValueError("The script supports ctc model and hybrid model with ctc decodng!")
else:
if cfg.compute_langs:
raise ValueError("CTC models do not support `compute_langs` at the moment.")
if hasattr(
asr_model, 'cur_decoder'
): # hybrid model with ctc decoding or potential other models containing decoding switch feature
asr_model.change_decoding_strategy(cfg.decoding, decoder_type='ctc')
else: # ctc model
asr_model.change_decoding_strategy(cfg.decoding)
asr_model.eval()
asr_model = asr_model.to(asr_model.device)
feature_stride = model_cfg.preprocessor['window_stride']
model_stride_in_secs = feature_stride * cfg.model_stride
total_buffer = cfg.total_buffer_in_secs
chunk_len = float(cfg.chunk_len_in_secs)
tokens_per_chunk = math.ceil(chunk_len / model_stride_in_secs)
mid_delay = math.ceil((chunk_len + (total_buffer - chunk_len) / 2) / model_stride_in_secs)
logging.info(f"tokens_per_chunk is {tokens_per_chunk}, mid_delay is {mid_delay}")
frame_asr = FrameBatchASR(
asr_model=asr_model, frame_len=chunk_len, total_buffer=cfg.total_buffer_in_secs, batch_size=cfg.batch_size,
)
hyps = get_buffered_pred_feat(
frame_asr,
chunk_len,
tokens_per_chunk,
mid_delay,
model_cfg.preprocessor,
model_stride_in_secs,
asr_model.device,
manifest,
filepaths,
)
output_filename, pred_text_attr_name = write_transcription(
hyps, cfg, model_name, filepaths=filepaths, compute_langs=False, compute_timestamps=False
)
logging.info(f"Finished writing predictions to {output_filename}!")
if cfg.calculate_wer:
output_manifest_w_wer, total_res, _ = cal_write_wer(
pred_manifest=output_filename,
pred_text_attr_name=pred_text_attr_name,
clean_groundtruth_text=cfg.clean_groundtruth_text,
langid=cfg.langid,
use_cer=cfg.use_cer,
output_filename=None,
)
if output_manifest_w_wer:
logging.info(f"Writing prediction and error rate of each sample to {output_manifest_w_wer}!")
logging.info(f"{total_res}")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_chunked_inference/ctc/speech_to_text_buffered_infer_ctc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The script trains a model that peforms classification on each frame of the input audio.
The default config (i.e., marblenet_3x2x64_20ms.yaml) outputs 20ms frames.
## Training
```sh
python speech_to_label.py \
--config-path=<path to dir of configs e.g. "../conf/marblenet">
--config-name=<name of config without .yaml e.g. "marblenet_3x2x64_20ms"> \
model.train_ds.manifest_filepath="<path to train manifest>" \
model.validation_ds.manifest_filepath=["<path to val manifest>","<path to test manifest>"] \
trainer.devices=2 \
trainer.accelerator="gpu" \
strategy="ddp" \
trainer.max_epochs=200
```
The input manifest must be a manifest json file, where each line is a Python dictionary. The fields ["audio_filepath", "offset", "duration", "label"] are required. An example of a manifest file is:
```
{"audio_filepath": "/path/to/audio_file1", "offset": 0, "duration": 10000, "label": "0 1 0 0 1"}
{"audio_filepath": "/path/to/audio_file2", "offset": 0, "duration": 10000, "label": "0 0 0 1 1 1 1 0 0"}
```
For example, if you have a 1s audio file, you'll need to have 50 frame labels in the manifest entry like "0 0 0 0 1 1 0 1 .... 0 1".
However, shorter label strings are also supported for smaller file sizes. For example, you can prepare the `label` in 40ms frame, and the model will properly repeat the label for each 20ms frame.
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.classification_models import EncDecFrameClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf/marblenet", config_name="marblenet_3x2x64_20ms")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = EncDecFrameClassificationModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if model.prepare_test(trainer):
trainer.test(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_classification/speech_to_frame_label.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script peforms VAD on each 20ms frames of the input audio files.
Postprocessing is also performed to generate speech segments and store them as RTTM files.
Long audio files will be splitted into smaller chunks to avoid OOM issues, but the frames close
to the split points might have worse performance due to truncated context.
## Usage:
python frame_vad_infer.py \
--config-path="../conf/vad" --config-name="frame_vad_infer_postprocess" \
input_manifest=<Path of manifest file containing evaluation data. Audio files should have unique names> \
output_dir=<Path of output directory>
The manifest json file should have the following format (each line is a Python dictionary):
{"audio_filepath": "/path/to/audio_file1", "offset": 0, "duration": 10000}
{"audio_filepath": "/path/to/audio_file2", "offset": 0, "duration": 10000}
If you want to evaluate tne model's AUROC and DER performance, you need to set `evaluate=True` in config yaml,
and also provide groundtruth in either RTTM files or label strings:
{"audio_filepath": "/path/to/audio_file1", "offset": 0, "duration": 10000, "label": "0 1 0 0 0 1 1 1 0"}
or
{"audio_filepath": "/path/to/audio_file1", "offset": 0, "duration": 10000, "rttm_filepath": "/path/to/rttm_file1.rttm"}
"""
import os
from pathlib import Path
import torch
from nemo.collections.asr.parts.utils.manifest_utils import write_manifest
from nemo.collections.asr.parts.utils.vad_utils import (
frame_vad_eval_detection_error,
frame_vad_infer_load_manifest,
generate_overlap_vad_seq,
generate_vad_frame_pred,
generate_vad_segment_table,
init_frame_vad_model,
prepare_manifest,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
@hydra_runner(config_path="../conf/vad", config_name="frame_vad_infer_postprocess")
def main(cfg):
if not cfg.input_manifest:
raise ValueError("You must input the path of json file of evaluation data")
output_dir = cfg.output_dir if cfg.output_dir else "frame_vad_outputs"
if os.path.exists(output_dir):
logging.warning(
f"Output directory {output_dir} already exists, use this only if you're tuning post-processing params."
)
Path(output_dir).mkdir(parents=True, exist_ok=True)
cfg.frame_out_dir = os.path.join(output_dir, "frame_preds")
cfg.smoothing_out_dir = os.path.join(output_dir, "smoothing_preds")
cfg.rttm_out_dir = os.path.join(output_dir, "rttm_preds")
# each line of input_manifest should be have different audio_filepath and unique name to simplify edge cases or conditions
logging.info(f"Loading manifest file {cfg.input_manifest}")
manifest_orig, key_labels_map, key_rttm_map = frame_vad_infer_load_manifest(cfg)
# Prepare manifest for streaming VAD
manifest_vad_input = cfg.input_manifest
if cfg.prepare_manifest.auto_split:
logging.info("Split long audio file to avoid CUDA memory issue")
logging.debug("Try smaller split_duration if you still have CUDA memory issue")
config = {
'input': manifest_vad_input,
'window_length_in_sec': cfg.vad.parameters.window_length_in_sec,
'split_duration': cfg.prepare_manifest.split_duration,
'num_workers': cfg.num_workers,
'prepared_manifest_vad_input': cfg.prepared_manifest_vad_input,
'out_dir': output_dir,
}
manifest_vad_input = prepare_manifest(config)
else:
logging.warning(
"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it."
)
torch.set_grad_enabled(False)
vad_model = init_frame_vad_model(cfg.vad.model_path)
# setup_test_data
vad_model.setup_test_data(
test_data_config={
'batch_size': 1,
'sample_rate': 16000,
'manifest_filepath': manifest_vad_input,
'labels': ['infer'],
'num_workers': cfg.num_workers,
'shuffle': False,
'normalize_audio_db': cfg.vad.parameters.normalize_audio_db,
}
)
vad_model = vad_model.to(device)
vad_model.eval()
if not os.path.exists(cfg.frame_out_dir):
logging.info(f"Frame predictions do not exist at {cfg.frame_out_dir}, generating frame prediction.")
os.mkdir(cfg.frame_out_dir)
extract_frame_preds = True
else:
logging.info(f"Frame predictions already exist at {cfg.frame_out_dir}, skipping frame prediction generation.")
extract_frame_preds = False
if extract_frame_preds:
logging.info("Generating frame-level prediction ")
pred_dir = generate_vad_frame_pred(
vad_model=vad_model,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
manifest_vad_input=manifest_vad_input,
out_dir=cfg.frame_out_dir,
)
logging.info(f"Finish generating VAD frame level prediction. You can find the prediction in {pred_dir}")
else:
pred_dir = cfg.frame_out_dir
frame_length_in_sec = cfg.vad.parameters.shift_length_in_sec
# overlap smoothing filter
if cfg.vad.parameters.smoothing:
# Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
# smoothing_method would be either in majority vote (median) or average (mean)
logging.info("Generating predictions with overlapping input segments")
smoothing_pred_dir = generate_overlap_vad_seq(
frame_pred_dir=pred_dir,
smoothing_method=cfg.vad.parameters.smoothing,
overlap=cfg.vad.parameters.overlap,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
num_workers=cfg.num_workers,
out_dir=cfg.smoothing_out_dir,
)
logging.info(
f"Finish generating predictions with overlapping input segments with smoothing_method={cfg.vad.parameters.smoothing} and overlap={cfg.vad.parameters.overlap}"
)
pred_dir = smoothing_pred_dir
# postprocessing and generate speech segments
logging.info("Converting frame level prediction to RTTM files.")
rttm_out_dir = generate_vad_segment_table(
vad_pred_dir=pred_dir,
postprocessing_params=cfg.vad.parameters.postprocessing,
frame_length_in_sec=frame_length_in_sec,
num_workers=cfg.num_workers,
use_rttm=cfg.vad.use_rttm,
out_dir=cfg.rttm_out_dir,
)
logging.info(
f"Finish generating speech semgents table with postprocessing_params: {cfg.vad.parameters.postprocessing}"
)
logging.info("Writing VAD output to manifest")
key_pred_rttm_map = {}
manifest_new = []
for entry in manifest_orig:
key = Path(entry['audio_filepath']).stem
entry['rttm_filepath'] = Path(os.path.join(rttm_out_dir, key + ".rttm")).absolute().as_posix()
if not Path(entry['rttm_filepath']).is_file():
logging.warning(f"Not able to find {entry['rttm_filepath']} for {entry['audio_filepath']}")
entry['rttm_filepath'] = ""
manifest_new.append(entry)
key_pred_rttm_map[key] = entry['rttm_filepath']
if not cfg.out_manifest_filepath:
out_manifest_filepath = os.path.join(output_dir, "manifest_vad_output.json")
else:
out_manifest_filepath = cfg.out_manifest_filepath
write_manifest(out_manifest_filepath, manifest_new)
logging.info(f"Finished writing VAD output to manifest: {out_manifest_filepath}")
if cfg.get("evaluate", False):
logging.info("Evaluating VAD results")
auroc, report = frame_vad_eval_detection_error(
pred_dir=pred_dir,
key_labels_map=key_labels_map,
key_rttm_map=key_rttm_map,
key_pred_rttm_map=key_pred_rttm_map,
frame_length_in_sec=frame_length_in_sec,
)
DetER = report.iloc[[-1]][('detection error rate', '%')].item()
FA = report.iloc[[-1]][('false alarm', '%')].item()
MISS = report.iloc[[-1]][('miss', '%')].item()
logging.info(f"AUROC: {auroc:.4f}")
logging.info(f"DetER={DetER:0.4f}, False Alarm={FA:0.4f}, Miss={MISS:0.4f}")
logging.info(f"with params: {cfg.vad.parameters.postprocessing}")
logging.info("Done!")
if __name__ == "__main__":
main() # pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_classification/frame_vad_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
During inference, we perform frame-level prediction by two approaches:
1) shift the window of length window_length_in_sec (e.g. 0.63s) by shift_length_in_sec (e.g. 10ms) to generate the frame and use the prediction of the window to represent the label for the frame;
[this script demonstrate how to do this approach]
2) generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
[get frame level prediction by this script and use vad_overlap_posterior.py in NeMo/scripts/voice_activity_detection
One can also find posterior about converting frame level prediction
to speech/no-speech segment in start and end times format in that script.]
Image https://raw.githubusercontent.com/NVIDIA/NeMo/main/tutorials/asr/images/vad_post_overlap_diagram.png
will help you understand this method.
This script will also help you perform postprocessing and generate speech segments if needed
Usage:
python vad_infer.py --config-path="../conf/vad" --config-name="vad_inference_postprocessing.yaml" dataset=<Path of json file of evaluation data. Audio files should have unique names>
"""
import json
import os
import torch
from nemo.collections.asr.parts.utils.speaker_utils import write_rttm2manifest
from nemo.collections.asr.parts.utils.vad_utils import (
generate_overlap_vad_seq,
generate_vad_frame_pred,
generate_vad_segment_table,
init_vad_model,
prepare_manifest,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@hydra_runner(config_path="../conf/vad", config_name="vad_inference_postprocessing.yaml")
def main(cfg):
if not cfg.dataset:
raise ValueError("You must input the path of json file of evaluation data")
# each line of dataset should be have different audio_filepath and unique name to simplify edge cases or conditions
key_meta_map = {}
with open(cfg.dataset, 'r') as manifest:
for line in manifest.readlines():
audio_filepath = json.loads(line.strip())['audio_filepath']
uniq_audio_name = audio_filepath.split('/')[-1].rsplit('.', 1)[0]
if uniq_audio_name in key_meta_map:
raise ValueError("Please make sure each line is with different audio_filepath! ")
key_meta_map[uniq_audio_name] = {'audio_filepath': audio_filepath}
# Prepare manifest for streaming VAD
manifest_vad_input = cfg.dataset
if cfg.prepare_manifest.auto_split:
logging.info("Split long audio file to avoid CUDA memory issue")
logging.debug("Try smaller split_duration if you still have CUDA memory issue")
config = {
'input': manifest_vad_input,
'window_length_in_sec': cfg.vad.parameters.window_length_in_sec,
'split_duration': cfg.prepare_manifest.split_duration,
'num_workers': cfg.num_workers,
'prepared_manifest_vad_input': cfg.prepared_manifest_vad_input,
}
manifest_vad_input = prepare_manifest(config)
else:
logging.warning(
"If you encounter CUDA memory issue, try splitting manifest entry by split_duration to avoid it."
)
torch.set_grad_enabled(False)
vad_model = init_vad_model(cfg.vad.model_path)
# setup_test_data
vad_model.setup_test_data(
test_data_config={
'vad_stream': True,
'sample_rate': 16000,
'manifest_filepath': manifest_vad_input,
'labels': ['infer',],
'num_workers': cfg.num_workers,
'shuffle': False,
'window_length_in_sec': cfg.vad.parameters.window_length_in_sec,
'shift_length_in_sec': cfg.vad.parameters.shift_length_in_sec,
'trim_silence': False,
'normalize_audio': cfg.vad.parameters.normalize_audio,
}
)
vad_model = vad_model.to(device)
vad_model.eval()
if not os.path.exists(cfg.frame_out_dir):
os.mkdir(cfg.frame_out_dir)
else:
logging.warning(
"Note frame_out_dir exists. If new file has same name as file inside existing folder, it will append result to existing file and might cause mistakes for next steps."
)
logging.info("Generating frame level prediction ")
pred_dir = generate_vad_frame_pred(
vad_model=vad_model,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
manifest_vad_input=manifest_vad_input,
out_dir=cfg.frame_out_dir,
)
logging.info(
f"Finish generating VAD frame level prediction with window_length_in_sec={cfg.vad.parameters.window_length_in_sec} and shift_length_in_sec={cfg.vad.parameters.shift_length_in_sec}"
)
frame_length_in_sec = cfg.vad.parameters.shift_length_in_sec
# overlap smoothing filter
if cfg.vad.parameters.smoothing:
# Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
# smoothing_method would be either in majority vote (median) or average (mean)
logging.info("Generating predictions with overlapping input segments")
smoothing_pred_dir = generate_overlap_vad_seq(
frame_pred_dir=pred_dir,
smoothing_method=cfg.vad.parameters.smoothing,
overlap=cfg.vad.parameters.overlap,
window_length_in_sec=cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=cfg.vad.parameters.shift_length_in_sec,
num_workers=cfg.num_workers,
out_dir=cfg.smoothing_out_dir,
)
logging.info(
f"Finish generating predictions with overlapping input segments with smoothing_method={cfg.vad.parameters.smoothing} and overlap={cfg.vad.parameters.overlap}"
)
pred_dir = smoothing_pred_dir
frame_length_in_sec = 0.01
# postprocessing and generate speech segments
if cfg.gen_seg_table:
logging.info("Converting frame level prediction to speech/no-speech segment in start and end times format.")
table_out_dir = generate_vad_segment_table(
vad_pred_dir=pred_dir,
postprocessing_params=cfg.vad.parameters.postprocessing,
frame_length_in_sec=frame_length_in_sec,
num_workers=cfg.num_workers,
out_dir=cfg.table_out_dir,
)
logging.info(
f"Finish generating speech semgents table with postprocessing_params: {cfg.vad.parameters.postprocessing}"
)
if cfg.write_to_manifest:
for i in key_meta_map:
key_meta_map[i]['rttm_filepath'] = os.path.join(table_out_dir, i + ".txt")
if not cfg.out_manifest_filepath:
out_manifest_filepath = "vad_out.json"
else:
out_manifest_filepath = cfg.out_manifest_filepath
out_manifest_filepath = write_rttm2manifest(key_meta_map, out_manifest_filepath)
logging.info(f"Writing VAD output to manifest: {out_manifest_filepath}")
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/speech_classification/vad_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Task 1: Speech Command Recognition
## Preparing the dataset
Use the `process_speech_commands_data.py` script under <NEMO_ROOT>/scripts/dataset_processing in order to prepare the dataset.
```sh
python <NEMO_ROOT>/scripts/dataset_processing/process_speech_commands_data.py \
--data_root=<absolute path to where the data should be stored> \
--data_version=<either 1 or 2, indicating version of the dataset> \
--class_split=<either "all" or "sub", indicates whether all 30/35 classes should be used, or the 10+2 split should be used> \
--rebalance \
--log
```
## Train to convergence
```sh
python speech_to_label.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<path to train manifest>" \
model.validation_ds.manifest_filepath=["<path to val manifest>","<path to test manifest>"] \
trainer.devices=2 \
trainer.accelerator="gpu" \
strategy="ddp" \
trainer.max_epochs=200 \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="MatchboxNet-3x1x64-v1" \
exp_manager.wandb_logger_kwargs.project="MatchboxNet-v1" \
+trainer.precision=16 \
+trainer.amp_level=O1 # needed if using PyTorch < 1.6
```
# Task 2: Voice Activity Detection
## Preparing the dataset
Use the `process_vad_data.py` script under <NEMO_ROOT>/scripts/dataset_processing in order to prepare the dataset.
```sh
python process_vad_data.py \
--out_dir=<output path to where the generated manifest should be stored> \
--speech_data_root=<path where the speech data are stored> \
--background_data_root=<path where the background data are stored> \
--rebalance_method=<'under' or 'over' of 'fixed'> \
--log
(Optional --demo (for demonstration in tutorial). If you want to use your own background noise data, make sure to delete --demo)
```
## Train to convergence
```sh
python speech_to_label.py \
--config-path=<path to dir of configs e.g. "conf">
--config-name=<name of config without .yaml e.g. "matchboxnet_3x1x64_vad"> \
model.train_ds.manifest_filepath="<path to train manifest>" \
model.validation_ds.manifest_filepath=["<path to val manifest>","<path to test manifest>"] \
trainer.devices=2 \
trainer.accelerator="gpu" \
strategy="ddp" \
trainer.max_epochs=200 \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="MatchboxNet-3x1x64-vad" \
exp_manager.wandb_logger_kwargs.project="MatchboxNet-vad" \
+trainer.precision=16 \
+trainer.amp_level=O1 # needed if using PyTorch < 1.6
```
# Task 3: Language Identification
## Preparing the dataset
Use the `filelist_to_manifest.py` script under <NEMO_ROOT>/scripts/speaker_tasks in order to prepare the dataset.
```
## Train to convergence
```sh
python speech_to_label.py \
--config-path=<path to dir of configs e.g. "../conf/lang_id">
--config-name=<name of config without .yaml e.g. "titanet_large"> \
model.train_ds.manifest_filepath="<path to train manifest>" \
model.validation_ds.manifest_filepath="<path to val manifest>" \
model.train_ds.augmentor.noise.manifest_path="<path to noise manifest>" \
model.train_ds.augmentor.impulse.manifest_path="<path to impulse manifest>" \
model.decoder.num_classes=<num of languages> \
trainer.devices=2 \
trainer.max_epochs=40 \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="titanet" \
exp_manager.wandb_logger_kwargs.project="langid" \
+exp_manager.checkpoint_callback_params.monitor="val_acc_macro" \
+exp_manager.checkpoint_callback_params.mode="max" \
+trainer.precision=16 \
```
# Optional: Use tarred dataset to speed up data loading. Apply to both tasks.
## Prepare tarred dataset.
Prepare ONE manifest that contains all training data you would like to include. Validation should use non-tarred dataset.
Note that it's possible that tarred datasets impacts validation scores because it drop values in order to have same amount of files per tarfile;
Scores might be off since some data is missing.
Use the `convert_to_tarred_audio_dataset.py` script under <NEMO_ROOT>/scripts/speech_recognition in order to prepare tarred audio dataset.
For details, please see TarredAudioToClassificationLabelDataset in <NEMO_ROOT>/nemo/collections/asr/data/audio_to_label.py
python speech_to_label.py \
--config-path=<path to dir of configs e.g. "conf">
--config-name=<name of config without .yaml e.g. "matchboxnet_3x1x64_vad"> \
model.train_ds.manifest_filepath=<path to train tarred_audio_manifest.json> \
model.train_ds.is_tarred=True \
model.train_ds.tarred_audio_filepaths=<path to train tarred audio dataset e.g. audio_{0..2}.tar> \
+model.train_ds.num_worker=<num_shards used generating tarred dataset> \
model.validation_ds.manifest_filepath=<path to validation audio_manifest.json>\
trainer.devices=2 \
trainer.accelerator="gpu" \
strategy="ddp" \ \
trainer.max_epochs=200 \
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="MatchboxNet-3x1x64-vad" \
exp_manager.wandb_logger_kwargs.project="MatchboxNet-vad" \
+trainer.precision=16 \
+trainer.amp_level=O1 # needed if using PyTorch < 1.6
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/speech_classification/results.html#
"""
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecClassificationModel, EncDecSpeakerLabelModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf/matchboxnet", config_name="matchboxnet_3x1x64_v1")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if 'titanet' in cfg.name.lower():
model = EncDecSpeakerLabelModel(cfg=cfg.model, trainer=trainer)
else:
model = EncDecClassificationModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(model)
torch.distributed.destroy_process_group()
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if trainer.is_global_zero:
trainer = pl.Trainer(devices=1, accelerator=cfg.trainer.accelerator, strategy=cfg.trainer.strategy)
if model.prepare_test(trainer):
trainer.test(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_classification/speech_to_label.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Training the model
Basic run (on CPU for 50 epochs):
python examples/asr/asr_transducer/speech_to_text_hybrid_rnnt_ctc.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<path to manifest file>" \
model.validation_ds.manifest_filepath="<path to manifest file>" \
trainer.devices=1 \
trainer.accelerator='cpu' \
trainer.max_epochs=50
Add PyTorch Lightning Trainer arguments from CLI:
python speech_to_text_rnnt.py \
... \
+trainer.fast_dev_run=true
Hydra logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/.hydra)"
PTL logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/lightning_logs)"
Override some args of optimizer:
python speech_to_text_hybrid_rnnt_ctc.py \
--config-path="../conf/conformer/hybrid_transducer_ctc/conformer_hybrid_transducer_ctc" \
--config-name="config_rnnt" \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
trainer.devices=2 \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.precision=16 \
trainer.max_epochs=2 \
model.optim.betas=[0.8,0.5] \
model.optim.weight_decay=0.0001
Override optimizer entirely
python speech_to_text_hybrid_rnnt_ctc.py \
--config-path="../conf/conformer/hybrid_transducer_ctc/conformer_hybrid_transducer_ctc" \
--config-name="config_rnnt" \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.devices=2 \
trainer.precision=16 \
trainer.max_epochs=2 \
model.optim.name=adamw \
model.optim.lr=0.001 \
~model.optim.args \
+model.optim.args.betas=[0.8,0.5]\
+model.optim.args.weight_decay=0.0005
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecHybridRNNTCTCModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf/conformer/hybrid_transducer_ctc/", config_name="conformer_hybrid_transducer_ctc")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecHybridRNNTCTCModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_hybrid_transducer_ctc/speech_to_text_hybrid_rnnt_ctc_char.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_hybrid_rnnt_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
model.aux_ctc.ctc_loss_weight=0.3 \
trainer.devices=-1 \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecHybridRNNTCTCBPEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(
config_path="../conf/conformer/hybrid_transducer_ctc/", config_name="conformer_hybrid_transducer_ctc_bpe"
)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecHybridRNNTCTCBPEModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_hybrid_transducer_ctc/speech_to_text_hybrid_rnnt_ctc_bpe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A script to convert a Nemo ASR Hybrid model file (.nemo) to a Nemo ASR CTC or RNNT model file (.nemo)
This allows you to train a RNNT-CTC Hybrid model, but then convert it into a pure CTC or pure RNNT model for use
in NeMo. The resulting .nemo file will be a pure CTC or RNNT model, and can be used like any other .nemo model
including in nemo2riva.
Usage: python convert_nemo_asr_hybrid_to_ctc.py -i /path/to/hybrid.nemo -o /path/to/saved_ctc_model.nemo -m ctc|rnnt
"""
import argparse
import os
from copy import deepcopy
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.models import (
ASRModel,
EncDecCTCModel,
EncDecCTCModelBPE,
EncDecRNNTBPEModel,
EncDecRNNTModel,
)
from nemo.utils import logging
def extract_model_ctc(args, hybrid_model):
"""
A function which converts a hybrid model to a pure ctc model.
Args:
args (argparse): the args collection from ArgumentParser created by running this script
hybrid_model (ASRModel): the loaded hybrid RNNT-CTC Nemo model
"""
BPE = False
ctc_class = EncDecCTCModel
if 'tokenizer' in hybrid_model.cfg.keys():
BPE = True
ctc_class = EncDecCTCModelBPE
hybrid_model_cfg = OmegaConf.to_container(hybrid_model.cfg)
new_cfg = deepcopy(hybrid_model_cfg)
new_cfg['ctc_reduction'] = hybrid_model_cfg['aux_ctc']['ctc_reduction']
new_cfg['decoder'] = hybrid_model_cfg['aux_ctc']['decoder']
del new_cfg['compute_eval_loss']
del new_cfg['model_defaults']
del new_cfg['joint']
del new_cfg['decoding']
del new_cfg['aux_ctc']
del new_cfg['loss']
if BPE and 'labels' in new_cfg:
del new_cfg['labels']
elif (not BPE) and 'tokenizer' in new_cfg:
del new_cfg['tokenizer']
del new_cfg['target']
del new_cfg['nemo_version']
new_cfg_oc = OmegaConf.create(new_cfg)
# we call restore_from with strict=False because the .nemo file we're restoring from is a hybrid model, which will have named
# tensors in the state_dict that do not exist in the pure CTC model class, which would result in an exception with strict=True
ctc_model = ctc_class.restore_from(
args.input, map_location=torch.device('cpu'), override_config_path=new_cfg_oc, strict=False
)
assert all(
[
torch.allclose(hybrid_model.state_dict()[x], ctc_model.state_dict()[x])
for x in hybrid_model.state_dict().keys()
if x.split('.')[0] in ['preprocessor', 'encoder']
]
), "Encoder and preprocessor state dicts don't match!"
ctc_model.decoder.load_state_dict(hybrid_model.ctc_decoder.state_dict())
assert all(
[
torch.allclose(hybrid_model.ctc_decoder.state_dict()[x], ctc_model.decoder.state_dict()[x])
for x in hybrid_model.ctc_decoder.state_dict().keys()
]
), "Decoder state_dict load failed!"
assert isinstance(ctc_model, ctc_class), "Extracted CTC model is of the wrong expected class!"
return ctc_model
def extract_model_rnnt(args, hybrid_model):
"""
A function which converts a hybrid model to a pure rnnt model.
Args:
args (argparse): the args collection from ArgumentParser created by running this script
hybrid_model (ASRModel): the loaded hybrid RNNT-CTC Nemo model
"""
BPE = False
rnnt_class = EncDecRNNTModel
if 'tokenizer' in hybrid_model.cfg.keys():
BPE = True
rnnt_class = EncDecRNNTBPEModel
hybrid_model_cfg = OmegaConf.to_container(hybrid_model.cfg)
new_cfg = deepcopy(hybrid_model_cfg)
del new_cfg['aux_ctc']
if BPE and 'labels' in new_cfg:
del new_cfg['labels']
elif (not BPE) and 'tokenizer' in new_cfg:
del new_cfg['tokenizer']
del new_cfg['target']
del new_cfg['nemo_version']
new_cfg_oc = OmegaConf.create(new_cfg)
# we call restore_from with strict=False because the .nemo file we're restoring from is a hybrid model, which will have named
# tensors in the state_dict that do not exist in the pure RNNT model class, which would result in an exception with strict=True
rnnt_model = rnnt_class.restore_from(
args.input, map_location=torch.device('cpu'), override_config_path=new_cfg_oc, strict=False
)
assert all(
[
torch.allclose(hybrid_model.state_dict()[x], rnnt_model.state_dict()[x])
for x in hybrid_model.state_dict().keys()
if x.split('.')[0] in ['preprocessor', 'encoder', 'decoder', 'joint']
]
), "State dict values mismatch, something went wrong!"
assert isinstance(rnnt_model, rnnt_class), "Extracted RNNT model is of the wrong expected class!"
return rnnt_model
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True, type=str, help='path to Nemo Hybrid model .nemo file')
parser.add_argument('-o', '--output', required=True, type=str, help='path and name of output .nemo file')
parser.add_argument(
'-t',
'--model_type',
required=False,
type=str,
default='ctc',
choices=['ctc', 'rnnt'],
help='whether to output a ctc or rnnt model from the hybrid',
)
args = parser.parse_args()
if not os.path.exists(args.input):
logging.critical(f'Input file [ {args.input} ] does not exist or cannot be found. Aborting.')
exit(255)
hybrid_model = ASRModel.restore_from(args.input, map_location=torch.device('cpu'))
if args.model_type == 'ctc':
output_model = extract_model_ctc(args, hybrid_model)
elif args.model_type == 'rnnt':
output_model = extract_model_rnnt(args, hybrid_model)
else:
logging.critical(
f"the model_type arg must be one of 'ctc' or 'rnnt', received unknown value: '{args.model_type}'. Aborting."
)
exit(255)
output_model.save_to(args.output)
logging.info(f'Converted {args.model_type.upper()} model was successfully saved to {args.output}')
| NeMo-main | examples/asr/asr_hybrid_transducer_ctc/helpers/convert_nemo_asr_hybrid_to_ctc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.ssl_models import SpeechEncDecSelfSupervisedModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
# Example of unsupervised pre-training of a model
```sh
python speech_pre_training.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
trainer.devices=-1 \
trainer.accelerator="gpu" \
strategy="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Namex of project>"
```
For documentation on fine-tuning, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
When doing supervised fine-tuning from unsupervised pre-trained encoder, set flag init_strict to False
"""
@hydra_runner(config_path="../conf/ssl/citrinet/", config_name="citrinet_ssl_1024")
def main(cfg):
logging.info(f"Hydra config: {OmegaConf.to_yaml(cfg)}")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = SpeechEncDecSelfSupervisedModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if __name__ == "__main__":
main()
| NeMo-main | examples/asr/speech_pretraining/speech_pre_training.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to simulate cache-aware streaming for ASR models. The ASR model to be used with this script need to get trained in streaming mode. Currently only Conformer models supports this streaming mode.
You may find examples of streaming models under 'NeMo/example/asr/conf/conformer/streaming/'.
It works both on a manifest of audio files or a single audio file. It can perform streaming for a single stream (audio) or perform the evalution in multi-stream model (batch_size>1).
The manifest file must conform to standard ASR definition - containing `audio_filepath` and `text` as the ground truth.
# Usage
## To evaluate a model in cache-aware streaming mode on a single audio file:
python speech_to_text_streaming_infer.py \
--asr_model=asr_model.nemo \
--audio_file=audio_file.wav \
--compare_vs_offline \
--use_amp \
--debug_mode
## To evaluate a model in cache-aware streaming mode on a manifest file:
python speech_to_text_streaming_infer.py \
--asr_model=asr_model.nemo \
--manifest_file=manifest_file.json \
--batch_size=16 \
--compare_vs_offline \
--use_amp \
--debug_mode
You may drop the '--debug_mode' and '--compare_vs_offline' to speedup the streaming evaluation.
If compare_vs_offline is not used, then significantly larger batch_size can be used.
Setting `--pad_and_drop_preencoded` would perform the caching for all steps including the first step.
It may result in slightly different outputs from the sub-sampling module compared to offline mode for some techniques like striding and sw_striding.
Enabling it would make it easier to export the model to ONNX.
## Hybrid ASR models
For Hybrid ASR models which have two decoders, you may select the decoder by --set_decoder DECODER_TYPE, where DECODER_TYPE can be "ctc" or "rnnt".
If decoder is not set, then the default decoder would be used which is the RNNT decoder for Hybrid ASR models.
## Multi-lookahead models
For models which support multiple lookaheads, the default is the first one in the list of model.encoder.att_context_size. To change it, you may use --att_context_size, for example --att_context_size [70,1].
## Evaluate a model trained with full context for offline mode
You may try the cache-aware streaming with a model trained with full context in offline mode.
But the accuracy would not be very good with small chunks as there is inconsistency between how the model is trained and how the streaming inference is done.
The accuracy of the model on the borders of chunks would not be very good.
To use a model trained with full context, you need to pass the chunk_size and shift_size arguments.
If shift_size is not passed, chunk_size would be used as the shift_size too.
Also argument online_normalization should be enabled to simulate a realistic streaming.
The following command would simulate cache-aware streaming on a pretrained model from NGC with chunk_size of 100, shift_size of 50 and 2 left chunks as left context.
The chunk_size of 100 would be 100*4*10=4000ms for a model with 4x downsampling and 10ms shift in feature extraction.
python speech_to_text_streaming_infer.py \
--asr_model=stt_en_conformer_ctc_large \
--chunk_size=100 \
--shift_size=50 \
--left_chunks=2 \
--online_normalization \
--manifest_file=manifest_file.json \
--batch_size=16 \
--compare_vs_offline \
--use_amp \
--debug_mode
"""
import contextlib
import json
import os
import time
from argparse import ArgumentParser
import torch
from omegaconf import open_dict
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.asr.parts.utils.streaming_utils import CacheAwareStreamingAudioBuffer
from nemo.utils import logging
def extract_transcriptions(hyps):
"""
The transcribed_texts returned by CTC and RNNT models are different.
This method would extract and return the text section of the hypothesis.
"""
if isinstance(hyps[0], Hypothesis):
transcriptions = []
for hyp in hyps:
transcriptions.append(hyp.text)
else:
transcriptions = hyps
return transcriptions
def calc_drop_extra_pre_encoded(asr_model, step_num, pad_and_drop_preencoded):
# for the first step there is no need to drop any tokens after the downsampling as no caching is being used
if step_num == 0 and not pad_and_drop_preencoded:
return 0
else:
return asr_model.encoder.streaming_cfg.drop_extra_pre_encoded
def perform_streaming(
asr_model, streaming_buffer, compare_vs_offline=False, debug_mode=False, pad_and_drop_preencoded=False
):
batch_size = len(streaming_buffer.streams_length)
if compare_vs_offline:
# would pass the whole audio at once through the model like offline mode in order to compare the results with the stremaing mode
# the output of the model in the offline and streaming mode should be exactly the same
with torch.inference_mode():
with autocast():
processed_signal, processed_signal_length = streaming_buffer.get_all_audios()
with torch.no_grad():
(
pred_out_offline,
transcribed_texts,
cache_last_channel_next,
cache_last_time_next,
cache_last_channel_len,
best_hyp,
) = asr_model.conformer_stream_step(
processed_signal=processed_signal,
processed_signal_length=processed_signal_length,
return_transcription=True,
)
final_offline_tran = extract_transcriptions(transcribed_texts)
logging.info(f" Final offline transcriptions: {final_offline_tran}")
else:
final_offline_tran = None
cache_last_channel, cache_last_time, cache_last_channel_len = asr_model.encoder.get_initial_cache_state(
batch_size=batch_size
)
previous_hypotheses = None
streaming_buffer_iter = iter(streaming_buffer)
pred_out_stream = None
for step_num, (chunk_audio, chunk_lengths) in enumerate(streaming_buffer_iter):
with torch.inference_mode():
with autocast():
# keep_all_outputs needs to be True for the last step of streaming when model is trained with att_context_style=regular
# otherwise the last outputs would get dropped
with torch.no_grad():
(
pred_out_stream,
transcribed_texts,
cache_last_channel,
cache_last_time,
cache_last_channel_len,
previous_hypotheses,
) = asr_model.conformer_stream_step(
processed_signal=chunk_audio,
processed_signal_length=chunk_lengths,
cache_last_channel=cache_last_channel,
cache_last_time=cache_last_time,
cache_last_channel_len=cache_last_channel_len,
keep_all_outputs=streaming_buffer.is_buffer_empty(),
previous_hypotheses=previous_hypotheses,
previous_pred_out=pred_out_stream,
drop_extra_pre_encoded=calc_drop_extra_pre_encoded(
asr_model, step_num, pad_and_drop_preencoded
),
return_transcription=True,
)
if debug_mode:
logging.info(f"Streaming transcriptions: {extract_transcriptions(transcribed_texts)}")
final_streaming_tran = extract_transcriptions(transcribed_texts)
logging.info(f"Final streaming transcriptions: {final_streaming_tran}")
if compare_vs_offline:
# calculates and report the differences between the predictions of the model in offline mode vs streaming mode
# Normally they should be exactly the same predictions for streaming models
pred_out_stream_cat = torch.cat(pred_out_stream)
pred_out_offline_cat = torch.cat(pred_out_offline)
if pred_out_stream_cat.size() == pred_out_offline_cat.size():
diff_num = torch.sum(pred_out_stream_cat != pred_out_offline_cat).cpu().numpy()
logging.info(
f"Found {diff_num} differences in the outputs of the model in streaming mode vs offline mode."
)
else:
logging.info(
f"The shape of the outputs of the model in streaming mode ({pred_out_stream_cat.size()}) is different from offline mode ({pred_out_offline_cat.size()})."
)
return final_streaming_tran, final_offline_tran
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, required=True, help="Path to an ASR model .nemo file or name of a pretrained model.",
)
parser.add_argument(
"--device", type=str, help="The device to load the model onto and perform the streaming", default="cuda"
)
parser.add_argument("--audio_file", type=str, help="Path to an audio file to perform streaming", default=None)
parser.add_argument(
"--manifest_file",
type=str,
help="Path to a manifest file containing audio files to perform streaming",
default=None,
)
parser.add_argument("--use_amp", action="store_true", help="Whether to use AMP")
parser.add_argument("--debug_mode", action="store_true", help="Whether to print more detail in the output.")
parser.add_argument(
"--compare_vs_offline",
action="store_true",
help="Whether to compare the output of the model with the offline mode.",
)
parser.add_argument(
"--batch_size",
type=int,
default=32,
help="The batch size to be used to perform streaming in batch mode with multiple streams",
)
parser.add_argument(
"--chunk_size",
type=int,
default=-1,
help="The chunk_size to be used for models trained with full context and offline models",
)
parser.add_argument(
"--shift_size",
type=int,
default=-1,
help="The shift_size to be used for models trained with full context and offline models",
)
parser.add_argument(
"--left_chunks",
type=int,
default=2,
help="The number of left chunks to be used as left context via caching for offline models",
)
parser.add_argument(
"--online_normalization",
default=False,
action='store_true',
help="Perform normalization on the run per chunk.",
)
parser.add_argument(
"--output_path", type=str, help="path to output file when manifest is used as input", default=None
)
parser.add_argument(
"--pad_and_drop_preencoded",
action="store_true",
help="Enables padding the audio input and then dropping the extra steps after the pre-encoding for all the steps including the the first step. It may make the outputs of the downsampling slightly different from offline mode for some techniques like striding or sw_striding.",
)
parser.add_argument(
"--set_decoder",
choices=["ctc", "rnnt"],
default=None,
help="Selects the decoder for Hybrid ASR models which has both the CTC and RNNT decoder. Supported decoders are ['ctc', 'rnnt']",
)
parser.add_argument(
"--att_context_size",
type=str,
default=None,
help="Sets the att_context_size for the models which support multiple lookaheads",
)
args = parser.parse_args()
if (args.audio_file is None and args.manifest_file is None) or (
args.audio_file is not None and args.manifest_file is not None
):
raise ValueError("One of the audio_file and manifest_file should be non-empty!")
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model = nemo_asr.models.ASRModel.restore_from(restore_path=args.asr_model)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = nemo_asr.models.ASRModel.from_pretrained(model_name=args.asr_model)
logging.info(asr_model.encoder.streaming_cfg)
if args.set_decoder is not None:
if hasattr(asr_model, "cur_decoder"):
asr_model.change_decoding_strategy(decoder_type=args.set_decoder)
else:
raise ValueError("Decoder cannot get changed for non-Hybrid ASR models.")
if args.att_context_size is not None:
if hasattr(asr_model.encoder, "set_default_att_context_size"):
asr_model.encoder.set_default_att_context_size(att_context_size=json.loads(args.att_context_size))
else:
raise ValueError("Model does not support multiple lookaheads.")
global autocast
if (
args.use_amp
and torch.cuda.is_available()
and hasattr(torch.cuda, 'amp')
and hasattr(torch.cuda.amp, 'autocast')
):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# configure the decoding config
decoding_cfg = asr_model.cfg.decoding
with open_dict(decoding_cfg):
decoding_cfg.strategy = "greedy"
decoding_cfg.preserve_alignments = False
if hasattr(asr_model, 'joint'): # if an RNNT model
decoding_cfg.greedy.max_symbols = 10
decoding_cfg.fused_batch_size = -1
asr_model.change_decoding_strategy(decoding_cfg)
asr_model = asr_model.to(args.device)
asr_model.eval()
# chunk_size is set automatically for models trained for streaming. For models trained for offline mode with full context, we need to pass the chunk_size explicitly.
if args.chunk_size > 0:
if args.shift_size < 0:
shift_size = args.chunk_size
else:
shift_size = args.shift_size
asr_model.encoder.setup_streaming_params(
chunk_size=args.chunk_size, left_chunks=args.left_chunks, shift_size=shift_size
)
# In streaming, offline normalization is not feasible as we don't have access to the whole audio at the beginning
# When online_normalization is enabled, the normalization of the input features (mel-spectrograms) are done per step
# It is suggested to train the streaming models without any normalization in the input features.
if args.online_normalization:
if asr_model.cfg.preprocessor.normalize not in ["per_feature", "all_feature"]:
logging.warning(
"online_normalization is enabled but the model has no normalization in the feature extration part, so it is ignored."
)
online_normalization = False
else:
online_normalization = True
else:
online_normalization = False
streaming_buffer = CacheAwareStreamingAudioBuffer(
model=asr_model,
online_normalization=online_normalization,
pad_and_drop_preencoded=args.pad_and_drop_preencoded,
)
if args.audio_file is not None:
# stream a single audio file
processed_signal, processed_signal_length, stream_id = streaming_buffer.append_audio_file(
args.audio_file, stream_id=-1
)
perform_streaming(
asr_model=asr_model,
streaming_buffer=streaming_buffer,
compare_vs_offline=args.compare_vs_offline,
pad_and_drop_preencoded=args.pad_and_drop_preencoded,
)
else:
# stream audio files in a manifest file in batched mode
samples = []
all_streaming_tran = []
all_offline_tran = []
all_refs_text = []
with open(args.manifest_file, 'r') as f:
for line in f:
item = json.loads(line)
samples.append(item)
logging.info(f"Loaded {len(samples)} from the manifest at {args.manifest_file}.")
start_time = time.time()
for sample_idx, sample in enumerate(samples):
processed_signal, processed_signal_length, stream_id = streaming_buffer.append_audio_file(
sample['audio_filepath'], stream_id=-1
)
if "text" in sample:
all_refs_text.append(sample["text"])
logging.info(f'Added this sample to the buffer: {sample["audio_filepath"]}')
if (sample_idx + 1) % args.batch_size == 0 or sample_idx == len(samples) - 1:
logging.info(f"Starting to stream samples {sample_idx - len(streaming_buffer) + 1} to {sample_idx}...")
streaming_tran, offline_tran = perform_streaming(
asr_model=asr_model,
streaming_buffer=streaming_buffer,
compare_vs_offline=args.compare_vs_offline,
debug_mode=args.debug_mode,
pad_and_drop_preencoded=args.pad_and_drop_preencoded,
)
all_streaming_tran.extend(streaming_tran)
if args.compare_vs_offline:
all_offline_tran.extend(offline_tran)
streaming_buffer.reset_buffer()
if args.compare_vs_offline and len(all_refs_text) == len(all_offline_tran):
offline_wer = word_error_rate(hypotheses=all_offline_tran, references=all_refs_text)
logging.info(f"WER% of offline mode: {round(offline_wer * 100, 2)}")
if len(all_refs_text) == len(all_streaming_tran):
streaming_wer = word_error_rate(hypotheses=all_streaming_tran, references=all_refs_text)
logging.info(f"WER% of streaming mode: {round(streaming_wer*100, 2)}")
end_time = time.time()
logging.info(f"The whole streaming process took: {round(end_time - start_time, 2)}s")
# stores the results including the transcriptions of the streaming inference in a json file
if args.output_path is not None and len(all_refs_text) == len(all_streaming_tran):
fname = (
"streaming_out_"
+ os.path.splitext(os.path.basename(args.asr_model))[0]
+ "_"
+ os.path.splitext(os.path.basename(args.test_manifest))[0]
+ ".json"
)
hyp_json = os.path.join(args.output_path, fname)
os.makedirs(args.output_path, exist_ok=True)
with open(hyp_json, "w") as out_f:
for i, hyp in enumerate(all_streaming_tran):
record = {
"pred_text": hyp,
"text": all_refs_text[i],
"wer": round(word_error_rate(hypotheses=[hyp], references=[all_refs_text[i]]) * 100, 2),
}
out_f.write(json.dumps(record) + '\n')
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/asr_cache_aware_streaming/speech_to_text_cache_aware_streaming_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for post training quantization of ASR models
"""
import collections
from argparse import ArgumentParser
from pprint import pprint
import torch
from omegaconf import open_dict
from nemo.collections.asr.metrics.wer import WER, CTCDecoding, CTCDecodingConfig, word_error_rate
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import quant_modules
except ImportError:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
can_gpu = torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--wer_target", type=float, default=None, help="used by test")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument("--wer_tolerance", type=float, default=1.0, help="used by test")
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_false',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument(
"--use_cer", default=False, action='store_true', help="Use Character Error Rate as the evaluation metric"
)
parser.add_argument('--sensitivity', action="store_true", help="Perform sensitivity analysis")
parser.add_argument('--onnx', action="store_true", help="Export to ONNX")
parser.add_argument('--quant-disable-keyword', type=str, nargs='+', help='disable quantizers by keyword')
args = parser.parse_args()
torch.set_grad_enabled(False)
quant_modules.initialize()
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model_cfg = EncDecCTCModel.restore_from(restore_path=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, override_config_path=asr_model_cfg)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model_cfg = EncDecCTCModel.from_pretrained(model_name=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, override_config_path=asr_model_cfg)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': args.dont_normalize_text,
}
)
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
if args.quant_disable_keyword:
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
for keyword in args.quant_disable_keyword:
if keyword in name:
logging.warning(F"Disable {name}")
module.disable()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
decoding_cfg = CTCDecodingConfig()
char_decoding = CTCDecoding(decoding_cfg, vocabulary=labels_map)
wer = WER(char_decoding, use_cer=args.use_cer)
wer_quant = evaluate(asr_model, labels_map, wer)
logging.info(f'Got WER of {wer_quant}. Tolerance was {args.wer_tolerance}')
if args.sensitivity:
if wer_quant < args.wer_tolerance:
logging.info("Tolerance is already met. Skip sensitivity analyasis.")
return
quant_layer_names = []
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
module.disable()
layer_name = name.replace("._input_quantizer", "").replace("._weight_quantizer", "")
if layer_name not in quant_layer_names:
quant_layer_names.append(layer_name)
logging.info(F"{len(quant_layer_names)} quantized layers found.")
# Build sensitivity profile
quant_layer_sensitivity = {}
for i, quant_layer in enumerate(quant_layer_names):
logging.info(F"Enable {quant_layer}")
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer) and quant_layer in name:
module.enable()
logging.info(F"{name:40}: {module}")
# Eval the model
wer_value = evaluate(asr_model, labels_map, wer)
logging.info(F"WER: {wer_value}")
quant_layer_sensitivity[quant_layer] = args.wer_tolerance - wer_value
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer) and quant_layer in name:
module.disable()
logging.info(F"{name:40}: {module}")
# Skip most sensitive layers until WER target is met
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
module.enable()
quant_layer_sensitivity = collections.OrderedDict(sorted(quant_layer_sensitivity.items(), key=lambda x: x[1]))
pprint(quant_layer_sensitivity)
skipped_layers = []
for quant_layer, _ in quant_layer_sensitivity.items():
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if quant_layer in name:
logging.info(F"Disable {name}")
if not quant_layer in skipped_layers:
skipped_layers.append(quant_layer)
module.disable()
wer_value = evaluate(asr_model, labels_map, wer)
if wer_value <= args.wer_tolerance:
logging.info(
F"WER tolerance {args.wer_tolerance} is met by skipping {len(skipped_layers)} sensitive layers."
)
print(skipped_layers)
export_onnx(args, asr_model)
return
raise ValueError(f"WER tolerance {args.wer_tolerance} can not be met with any layer quantized!")
export_onnx(args, asr_model)
def export_onnx(args, asr_model):
if args.onnx:
if args.asr_model.endswith("nemo"):
onnx_name = args.asr_model.replace(".nemo", ".onnx")
else:
onnx_name = args.asr_model
logging.info(F"Export to {onnx_name}")
quant_nn.TensorQuantizer.use_fb_fake_quant = True
asr_model.export(onnx_name, onnx_opset_version=13)
quant_nn.TensorQuantizer.use_fb_fake_quant = False
def evaluate(asr_model, labels_map, wer):
# Eval the model
hypotheses = []
references = []
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
with autocast():
log_probs, encoded_len, greedy_predictions = asr_model(
input_signal=test_batch[0], input_signal_length=test_batch[1]
)
hypotheses += wer.decoding.ctc_decoder_predictions_tensor(greedy_predictions)[0]
for batch_ind in range(greedy_predictions.shape[0]):
seq_len = test_batch[3][batch_ind].cpu().detach().numpy()
seq_ids = test_batch[2][batch_ind].cpu().detach().numpy()
reference = ''.join([labels_map[c] for c in seq_ids[0:seq_len]])
references.append(reference)
del test_batch
wer_value = word_error_rate(hypotheses=hypotheses, references=references, use_cer=wer.use_cer)
return wer_value
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/quantization/speech_to_text_quant_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for inference ASR models using TensorRT
"""
import os
from argparse import ArgumentParser
import numpy as np
import pycuda.driver as cuda
import tensorrt as trt
import torch
from omegaconf import open_dict
from nemo.collections.asr.metrics.wer import WER, CTCDecoding, CTCDecodingConfig, word_error_rate
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
# Use autoprimaryctx if available (pycuda >= 2021.1) to
# prevent issues with other modules that rely on the primary
# device context.
try:
import pycuda.autoprimaryctx
except ModuleNotFoundError:
import pycuda.autoinit
TRT_LOGGER = trt.Logger()
can_gpu = torch.cuda.is_available()
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument(
"--asr_onnx",
type=str,
default="./QuartzNet15x5Base-En-max-32.onnx",
help="Pass: 'QuartzNet15x5Base-En-max-32.onnx'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=4)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_false',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument(
"--use_cer", default=False, action='store_true', help="Use Character Error Rate as the evaluation metric"
)
parser.add_argument('--qat', action="store_true", help="Use onnx file exported from QAT tools")
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model_cfg = EncDecCTCModel.restore_from(restore_path=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, override_config_path=asr_model_cfg)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model_cfg = EncDecCTCModel.from_pretrained(model_name=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, override_config_path=asr_model_cfg)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': args.dont_normalize_text,
}
)
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
decoding_cfg = CTCDecodingConfig()
char_decoding = CTCDecoding(decoding_cfg, vocabulary=labels_map)
wer = WER(char_decoding, use_cer=args.use_cer)
wer_result = evaluate(asr_model, args.asr_onnx, labels_map, wer, args.qat)
logging.info(f'Got WER of {wer_result}.')
def get_min_max_input_shape(asr_model):
max_shape = (1, 64, 1)
min_shape = (64, 64, 99999)
for test_batch in asr_model.test_dataloader():
test_batch = [x.cuda() for x in test_batch]
processed_signal, processed_signal_length = asr_model.preprocessor(
input_signal=test_batch[0], length=test_batch[1]
)
shape = processed_signal.cpu().numpy().shape
if shape[0] > max_shape[0]:
max_shape = (shape[0], *max_shape[1:])
if shape[0] < min_shape[0]:
min_shape = (shape[0], *min_shape[1:])
if shape[2] > max_shape[2]:
max_shape = (*max_shape[0:2], shape[2])
if shape[2] < min_shape[2]:
min_shape = (*min_shape[0:2], shape[2])
return min_shape, max_shape
def build_trt_engine(asr_model, onnx_path, qat):
trt_engine_path = "{}.trt".format(onnx_path)
if os.path.exists(trt_engine_path):
return trt_engine_path
min_input_shape, max_input_shape = get_min_max_input_shape(asr_model)
workspace_size = 512
with trt.Builder(TRT_LOGGER) as builder:
network_flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
if qat:
network_flags |= 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_PRECISION)
with builder.create_network(flags=network_flags) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser, builder.create_builder_config() as builder_config:
parser.parse_from_file(onnx_path)
builder_config.max_workspace_size = workspace_size * (1024 * 1024)
if qat:
builder_config.set_flag(trt.BuilderFlag.INT8)
profile = builder.create_optimization_profile()
profile.set_shape("audio_signal", min=min_input_shape, opt=max_input_shape, max=max_input_shape)
builder_config.add_optimization_profile(profile)
engine = builder.build_engine(network, builder_config)
serialized_engine = engine.serialize()
with open(trt_engine_path, "wb") as fout:
fout.write(serialized_engine)
return trt_engine_path
def trt_inference(stream, trt_ctx, d_input, d_output, input_signal, input_signal_length):
print("infer with shape: {}".format(input_signal.shape))
trt_ctx.set_binding_shape(0, input_signal.shape)
assert trt_ctx.all_binding_shapes_specified
h_output = cuda.pagelocked_empty(tuple(trt_ctx.get_binding_shape(1)), dtype=np.float32)
h_input_signal = cuda.register_host_memory(np.ascontiguousarray(input_signal.cpu().numpy().ravel()))
cuda.memcpy_htod_async(d_input, h_input_signal, stream)
trt_ctx.execute_async_v2(bindings=[int(d_input), int(d_output)], stream_handle=stream.handle)
cuda.memcpy_dtoh_async(h_output, d_output, stream)
stream.synchronize()
greedy_predictions = torch.tensor(h_output).argmax(dim=-1, keepdim=False)
return greedy_predictions
def evaluate(asr_model, asr_onnx, labels_map, wer, qat):
# Eval the model
hypotheses = []
references = []
stream = cuda.Stream()
vocabulary_size = len(labels_map) + 1
engine_file_path = build_trt_engine(asr_model, asr_onnx, qat)
with open(engine_file_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
trt_engine = runtime.deserialize_cuda_engine(f.read())
trt_ctx = trt_engine.create_execution_context()
profile_shape = trt_engine.get_profile_shape(profile_index=0, binding=0)
print("profile shape min:{}, opt:{}, max:{}".format(profile_shape[0], profile_shape[1], profile_shape[2]))
max_input_shape = profile_shape[2]
input_nbytes = trt.volume(max_input_shape) * trt.float32.itemsize
d_input = cuda.mem_alloc(input_nbytes)
max_output_shape = [max_input_shape[0], vocabulary_size, (max_input_shape[-1] + 1) // 2]
output_nbytes = trt.volume(max_output_shape) * trt.float32.itemsize
d_output = cuda.mem_alloc(output_nbytes)
for test_batch in asr_model.test_dataloader():
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
processed_signal, processed_signal_length = asr_model.preprocessor(
input_signal=test_batch[0], length=test_batch[1]
)
greedy_predictions = trt_inference(
stream,
trt_ctx,
d_input,
d_output,
input_signal=processed_signal,
input_signal_length=processed_signal_length,
)
hypotheses += wer.decoding.ctc_decoder_predictions_tensor(greedy_predictions)[0]
for batch_ind in range(greedy_predictions.shape[0]):
seq_len = test_batch[3][batch_ind].cpu().detach().numpy()
seq_ids = test_batch[2][batch_ind].cpu().detach().numpy()
reference = ''.join([labels_map[c] for c in seq_ids[0:seq_len]])
references.append(reference)
del test_batch
wer_value = word_error_rate(hypotheses=hypotheses, references=references, use_cer=wer.use_cer)
return wer_value
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/quantization/speech_to_text_quant_infer_trt.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Script for calibrating a pretrained ASR model for quantization
"""
from argparse import ArgumentParser
import torch
from omegaconf import open_dict
from nemo.collections.asr.models import EncDecCTCModel
from nemo.utils import logging
try:
from pytorch_quantization import calib
from pytorch_quantization import nn as quant_nn
from pytorch_quantization import quant_modules
from pytorch_quantization.tensor_quant import QuantDescriptor
except ImportError:
raise ImportError(
"pytorch-quantization is not installed. Install from "
"https://github.com/NVIDIA/TensorRT/tree/master/tools/pytorch-quantization."
)
try:
from torch.cuda.amp import autocast
except ImportError:
from contextlib import contextmanager
@contextmanager
def autocast(enabled=None):
yield
can_gpu = torch.cuda.is_available()
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model", type=str, default="QuartzNet15x5Base-En", required=True, help="Pass: 'QuartzNet15x5Base-En'",
)
parser.add_argument("--dataset", type=str, required=True, help="path to evaluation data")
parser.add_argument("--batch_size", type=int, default=256)
parser.add_argument(
"--dont_normalize_text",
default=False,
action='store_false',
help="Turn off trasnscript normalization. Recommended for non-English.",
)
parser.add_argument('--num_calib_batch', default=1, type=int, help="Number of batches for calibration.")
parser.add_argument('--calibrator', type=str, choices=["max", "histogram"], default="max")
parser.add_argument('--percentile', nargs='+', type=float, default=[99.9, 99.99, 99.999, 99.9999])
parser.add_argument("--amp", action="store_true", help="Use AMP in calibration.")
parser.set_defaults(amp=False)
args = parser.parse_args()
torch.set_grad_enabled(False)
# Initialize quantization
quant_desc_input = QuantDescriptor(calib_method=args.calibrator)
quant_nn.QuantConv2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantConvTranspose2d.set_default_quant_desc_input(quant_desc_input)
quant_nn.QuantLinear.set_default_quant_desc_input(quant_desc_input)
if args.asr_model.endswith('.nemo'):
logging.info(f"Using local ASR model from {args.asr_model}")
asr_model_cfg = EncDecCTCModel.restore_from(restore_path=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.restore_from(restore_path=args.asr_model, override_config_path=asr_model_cfg)
else:
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model_cfg = EncDecCTCModel.from_pretrained(model_name=args.asr_model, return_config=True)
with open_dict(asr_model_cfg):
asr_model_cfg.encoder.quantize = True
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model, override_config_path=asr_model_cfg)
asr_model.setup_test_data(
test_data_config={
'sample_rate': 16000,
'manifest_filepath': args.dataset,
'labels': asr_model.decoder.vocabulary,
'batch_size': args.batch_size,
'normalize_transcripts': args.dont_normalize_text,
'shuffle': True,
}
)
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
if can_gpu:
asr_model = asr_model.cuda()
asr_model.eval()
# Enable calibrators
for name, module in asr_model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
for i, test_batch in enumerate(asr_model.test_dataloader()):
if can_gpu:
test_batch = [x.cuda() for x in test_batch]
if args.amp:
with autocast():
_ = asr_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
else:
_ = asr_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
if i >= args.num_calib_batch:
break
# Save calibrated model(s)
model_name = args.asr_model.replace(".nemo", "") if args.asr_model.endswith(".nemo") else args.asr_model
if not args.calibrator == "histogram":
compute_amax(asr_model, method="max")
asr_model.save_to(F"{model_name}-max-{args.num_calib_batch*args.batch_size}.nemo")
else:
for percentile in args.percentile:
print(F"{percentile} percentile calibration")
compute_amax(asr_model, method="percentile")
asr_model.save_to(F"{model_name}-percentile-{percentile}-{args.num_calib_batch*args.batch_size}.nemo")
for method in ["mse", "entropy"]:
print(F"{method} calibration")
compute_amax(asr_model, method=method)
asr_model.save_to(F"{model_name}-{method}-{args.num_calib_batch*args.batch_size}.nemo")
def compute_amax(model, **kwargs):
for name, module in model.named_modules():
if isinstance(module, quant_nn.TensorQuantizer):
if module._calibrator is not None:
if isinstance(module._calibrator, calib.MaxCalibrator):
module.load_calib_amax()
else:
module.load_calib_amax(**kwargs)
print(F"{name:40}: {module}")
if can_gpu:
model.cuda()
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/quantization/speech_to_text_calibrate.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_ctc_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
trainer.devices=-1 \
trainer.accelerator="gpu" \
trainer.strategy="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf/citrinet/", config_name="config_bpe")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecCTCModelBPE(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/asr_ctc/speech_to_text_ctc_bpe.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Training the model
Basic run (on CPU for 50 epochs):
python examples/asr/asr_ctc/speech_to_text_ctc.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<path to manifest file>" \
model.validation_ds.manifest_filepath="<path to manifest file>" \
trainer.devices=1 \
trainer.accelerator='cpu' \
trainer.max_epochs=50
Add PyTorch Lightning Trainer arguments from CLI:
python speech_to_text_ctc.py \
... \
+trainer.fast_dev_run=true
Hydra logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/.hydra)"
PTL logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/lightning_logs)"
Override some args of optimizer:
python speech_to_text_ctc.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
trainer.devices=2 \
trainer.max_epochs=2 \
model.optim.args.betas=[0.8,0.5] \
model.optim.args.weight_decay=0.0001
Override optimizer entirely
python speech_to_text_ctc.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
trainer.devices=2 \
trainer.max_epochs=2 \
model.optim.name=adamw \
model.optim.lr=0.001 \
~model.optim.args \
+model.optim.args.betas=[0.8,0.5]\
+model.optim.args.weight_decay=0.0005
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecCTCModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf", config_name="config")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecCTCModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_ctc/speech_to_text_ctc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import json
import os
from dataclasses import dataclass, is_dataclass
from typing import List, Optional, Union
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.modules.conformer_encoder import ConformerChangeConfig
from nemo.collections.asr.parts.utils.transcribe_utils import compute_output_filename, prepare_audio_data, setup_model
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
Translate audio file on a single CPU/GPU. Useful for translations of moderate amounts of audio data.
# Arguments
model_path: path to .nemo ST checkpoint
pretrained_name: name of pretrained ST model (from NGC registry)
audio_dir: path to directory with audio files
dataset_manifest: path to dataset JSON manifest file (in NeMo format)
output_filename: Output filename where the translations will be written
batch_size: batch size during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device.
allow_mps: Bool to allow using MPS (Apple Silicon M-series GPU) device if available
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
overwrite_translations: Bool which when set allows repeated translations to overwrite previous results.
# Usage
ST model can be specified by either "model_path" or "pretrained_name".
Data for translation can be defined with either "audio_dir" or "dataset_manifest".
Results are returned in a JSON manifest file.
python translate_speech.py \
model_path=null \
pretrained_name=null \
audio_dir="<remove or path to folder of audio files>" \
dataset_manifest="<remove or path to manifest>" \
output_filename="<remove or specify output filename>" \
batch_size=32 \
cuda=0 \
amp=True \
"""
@dataclass
class ModelChangeConfig:
# Sub-config for changes specific to the Conformer Encoder
conformer: ConformerChangeConfig = ConformerChangeConfig()
@dataclass
class TranslationConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
audio_dir: Optional[str] = None # Path to a directory which contains audio files
dataset_manifest: Optional[str] = None # Path to dataset's JSON manifest
audio_key: str = 'audio_filepath' # Used to override the default audio key in dataset_manifest
eval_config_yaml: Optional[str] = None # Path to a yaml file of config of evaluation
# General configs
output_filename: Optional[str] = None
batch_size: int = 32
random_seed: Optional[int] = None # seed number going to be used in seed_everything()
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
allow_mps: bool = False # allow to select MPS device (Apple Silicon M-series GPU)
amp: bool = False
audio_type: str = "wav"
# Recompute model translation, even if the output folder exists with scores.
overwrite_translations: bool = True
# can be set to True to return list of translations instead of the config
# if True, will also skip writing anything to the output file
return_translations: bool = False
@hydra_runner(config_name="TranslationConfig", schema=TranslationConfig)
def main(cfg: TranslationConfig) -> Union[TranslationConfig, List[str]]:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
for key in cfg:
cfg[key] = None if cfg[key] == 'None' else cfg[key]
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.random_seed:
pl.seed_everything(cfg.random_seed)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
if cfg.audio_dir is None and cfg.dataset_manifest is None:
raise ValueError("Both cfg.audio_dir and cfg.dataset_manifest cannot be None!")
# Load augmentor from exteranl yaml file which contains eval info, could be extend to other feature such VAD, P&C
augmentor = None
if cfg.eval_config_yaml:
eval_config = OmegaConf.load(cfg.eval_config_yaml)
augmentor = eval_config.test_ds.get("augmentor")
logging.info(f"Will apply on-the-fly augmentation on samples during translation: {augmentor} ")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
map_location = torch.device('cuda:0')
elif cfg.allow_mps and hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
logging.warning(
"MPS device (Apple Silicon M-series GPU) support is experimental."
" Env variable `PYTORCH_ENABLE_MPS_FALLBACK=1` should be set in most cases to avoid failures."
)
device = [0]
accelerator = 'mps'
map_location = torch.device('mps')
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cpu')
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device(f'cuda:{cfg.cuda}')
logging.info(f"Inference will be done on device: {map_location}")
asr_model, model_name = setup_model(cfg, map_location)
trainer = pl.Trainer(devices=device, accelerator=accelerator)
asr_model.set_trainer(trainer)
asr_model = asr_model.eval()
# collect additional translation information
return_hypotheses = False
# prepare audio filepaths and decide wether it's partial audio
filepaths, partial_audio = prepare_audio_data(cfg)
# setup AMP (optional)
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
else:
@contextlib.contextmanager
def autocast():
yield
# Compute output filename
cfg = compute_output_filename(cfg, model_name)
# if translations should not be overwritten, and already exists, skip re-translation step and return
if not cfg.return_translations and not cfg.overwrite_translations and os.path.exists(cfg.output_filename):
logging.info(
f"Previous translations found at {cfg.output_filename}, and flag `overwrite_translations`"
f"is {cfg.overwrite_translations}. Returning without re-translating text."
)
return cfg
# translate audio
with autocast():
with torch.no_grad():
translations = asr_model.translate(
paths2audio_files=filepaths, batch_size=cfg.batch_size, return_hypotheses=return_hypotheses,
)
logging.info(f"Finished translating {len(filepaths)} files !")
logging.info(f"Writing translations into file: {cfg.output_filename}")
if cfg.return_translations:
return translations
# write audio translations
with open(cfg.output_filename, 'w', encoding='utf-8', newline='\n') as f:
for filepath, translation in zip(filepaths, translations):
item = {'audio_filepath': filepath, 'pred_translation': translation}
f.write(json.dumps(item, ensure_ascii=False) + "\n")
logging.info(f"Finished writing predictions to {cfg.output_filename}!")
return cfg
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/speech_translation/translate_speech.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Training the model
```sh
python speech_to_text_transformer.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.audio.tarred_audio_filepaths=<path to tar files with audio> \
model.train_ds.audio_manifest_filepath=<path to audio data manifest> \
model.validation_ds.manifest_filepath=<path to validation manifest> \
model.test_ds.manifest_filepath=<path to test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.model_path=<path to speech tokenizer model> \
model.tokenizer.type=<either bpe, wpe, or yttm> \
trainer.gpus=-1 \
trainer.accelerator="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecTransfModelBPE
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="../conf/speech_translation/", config_name="fast-conformer_transformer")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecTransfModelBPE(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/asr/speech_translation/speech_to_text_transformer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Training the model
Basic run (on CPU for 50 epochs):
python examples/asr/asr_transducer/speech_to_text_rnnt.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<path to manifest file>" \
model.validation_ds.manifest_filepath="<path to manifest file>" \
trainer.devices=1 \
trainer.accelerator='cpu' \
trainer.max_epochs=50
Add PyTorch Lightning Trainer arguments from CLI:
python speech_to_text_rnnt.py \
... \
+trainer.fast_dev_run=true
Hydra logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/.hydra)"
PTL logs will be found in "$(./outputs/$(date +"%y-%m-%d")/$(date +"%H-%M-%S")/lightning_logs)"
Override some args of optimizer:
python speech_to_text_rnnt.py \
--config-path="experimental/contextnet_rnnt" \
--config-name="config_rnnt" \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
trainer.devices=2 \
trainer.precision=16 \
trainer.max_epochs=2 \
model.optim.betas=[0.8,0.5] \
model.optim.weight_decay=0.0001
Override optimizer entirely
python speech_to_text_rnnt.py \
--config-path="experimental/contextnet_rnnt" \
--config-name="config_rnnt" \
model.train_ds.manifest_filepath="./an4/train_manifest.json" \
model.validation_ds.manifest_filepath="./an4/test_manifest.json" \
trainer.devices=2 \
trainer.precision=16 \
trainer.max_epochs=2 \
model.optim.name=adamw \
model.optim.lr=0.001 \
~model.optim.args \
+model.optim.args.betas=[0.8,0.5]\
+model.optim.args.weight_decay=0.0005
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecRNNTModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="experimental/contextnet_rnnt", config_name="config_rnnt")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecRNNTModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_transducer/speech_to_text_rnnt.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Preparing the Tokenizer for the dataset
Use the `process_asr_text_tokenizer.py` script under <NEMO_ROOT>/scripts/tokenizers/ in order to prepare the tokenizer.
```sh
python <NEMO_ROOT>/scripts/tokenizers/process_asr_text_tokenizer.py \
--manifest=<path to train manifest files, seperated by commas>
OR
--data_file=<path to text data, seperated by commas> \
--data_root="<output directory>" \
--vocab_size=<number of tokens in vocabulary> \
--tokenizer=<"spe" or "wpe"> \
--no_lower_case \
--spe_type=<"unigram", "bpe", "char" or "word"> \
--spe_character_coverage=1.0 \
--log
```
# Training the model
```sh
python speech_to_text_rnnt_bpe.py \
# (Optional: --config-path=<path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath=<path to train manifest> \
model.validation_ds.manifest_filepath=<path to val/test manifest> \
model.tokenizer.dir=<path to directory of tokenizer (not full path to the vocab file!)> \
model.tokenizer.type=<either bpe or wpe> \
trainer.devices=-1 \
trainer.accelerator="gpu" \
trainer.strategy="ddp" \
trainer.max_epochs=100 \
model.optim.name="adamw" \
model.optim.lr=0.001 \
model.optim.betas=[0.9,0.999] \
model.optim.weight_decay=0.0001 \
model.optim.sched.warmup_steps=2000
exp_manager.create_wandb_logger=True \
exp_manager.wandb_logger_kwargs.name="<Name of experiment>" \
exp_manager.wandb_logger_kwargs.project="<Name of project>"
```
# Fine-tune a model
For documentation on fine-tuning this model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.asr.models import EncDecRNNTBPEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="experimental/contextnet_rnnt", config_name="config_rnnt_bpe")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
asr_model = EncDecRNNTBPEModel(cfg=cfg.model, trainer=trainer)
# Initialize the weights of the model from another model, if provided via config
asr_model.maybe_init_from_pretrained_checkpoint(cfg)
trainer.fit(asr_model)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.manifest_filepath is not None:
if asr_model.prepare_test(trainer):
trainer.test(asr_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_transducer/speech_to_text_rnnt_bpe.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file provides the ASR+VAD inference pipeline, with the option to perform only ASR or VAD alone.
There are two types of input, the first one is a manifest passed to `manifest_filepath`,
and the other one is to pass a directory containing audios to `audio_dir` and specify `audio_type`.
The input manifest must be a manifest json file, where each line is a Python dictionary. The fields ["audio_filepath", "offset", "duration", "text"] are required. An example of a manifest file is:
```
{"audio_filepath": "/path/to/audio_file1", "offset": 0, "duration": 10000, "text": "a b c d e"}
{"audio_filepath": "/path/to/audio_file2", "offset": 0, "duration": 10000, "text": "f g h i j"}
```
To run the code with ASR+VAD default settings:
```bash
python speech_to_text_with_vad.py \
manifest_filepath=/PATH/TO/MANIFEST.json \
vad_model=vad_multilingual_frame_marblenet\
asr_model=stt_en_conformer_ctc_large \
vad_config=../conf/vad/frame_vad_inference_postprocess.yaml
```
To use only ASR and disable VAD, set `vad_model=None` and `use_rttm=False`.
To use only VAD, set `asr_model=None` and specify both `vad_model` and `vad_config`.
To enable profiling, set `profiling=True`, but this will significantly slow down the program.
To use or disable feature masking/droping based on RTTM files, set `use_rttm` to `True` or `False`.
There are two ways to use RTTM files, either by masking the features (`rttm_mode=mask`) or by dropping the features (`rttm_mode=drop`).
For audios that have long non-speech audios between speech segments, dropping frames is recommended.
To normalize feature before masking, set `normalize=pre_norm`,
and set `normalize=post_norm` for masking before normalization.
To use a specific value for feature masking, set `feat_mask_val` to the desired value.
Default is `feat_mask_val=None`, where -16.635 will be used for `post_norm` and 0 will be used for `pre_norm`.
See more options in the `InferenceConfig` class.
"""
import contextlib
import json
import os
import time
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import Callable, Optional
import torch
import yaml
from omegaconf import DictConfig, OmegaConf
from torch.profiler import ProfilerActivity, profile, record_function
from tqdm import tqdm
from nemo.collections.asr.data import feature_to_text_dataset
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig, word_error_rate
from nemo.collections.asr.models import ASRModel, EncDecClassificationModel
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
from nemo.collections.asr.parts.utils.vad_utils import (
generate_overlap_vad_seq,
generate_vad_segment_table,
get_vad_stream_status,
init_frame_vad_model,
init_vad_model,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
try:
from torch.cuda.amp import autocast
except ImportError:
@contextlib.contextmanager
def autocast(enabled=None):
yield
@dataclass
class InferenceConfig:
# Required configs
asr_model: Optional[str] = None # Path to a .nemo file or a pretrained NeMo model on NGC
vad_model: Optional[str] = None # Path to a .nemo file or a pretrained NeMo model on NGC
vad_config: Optional[str] = None # Path to a yaml file containing VAD post-processing configs
manifest_filepath: Optional[str] = None # Path to dataset's JSON manifest
audio_dir: Optional[str] = None # Path to a directory containing audio files, use this if no manifest is provided
use_rttm: bool = True # whether to use RTTM
rttm_mode: str = "mask" # how to use RTTM files, choices=[`mask`, `drop`]
feat_mask_val: Optional[float] = None # value used to mask features based on RTTM, set None to use defaults
normalize: Optional[
str
] = "post_norm" # whether and where to normalize audio feature, choices=[None, `pre_norm`, `post_norm`]
normalize_type: str = "per_feature" # how to determine mean and std used for normalization
normalize_audio_db: Optional[float] = None # set to normalize RMS DB of audio before extracting audio features
profiling: bool = False # whether to enable pytorch profiling
# General configs
batch_size: int = 1 # batch size for ASR. Feature extraction and VAD only support single sample per batch.
num_workers: int = 8
sample_rate: int = 16000
frame_unit_time_secs: float = 0.01 # unit time per frame in seconds, equal to `window_stride` in ASR configs, typically 10ms.
audio_type: str = "wav"
# Output settings, no need to change
output_dir: Optional[str] = None # will be automatically set by the program
output_filename: Optional[str] = None # will be automatically set by the program
pred_name_postfix: Optional[str] = None # If you need to use another model name, other than the standard one.
# Set to True to output language ID information
compute_langs: bool = False
# Decoding strategy for CTC models
ctc_decoding: CTCDecodingConfig = CTCDecodingConfig()
# Decoding strategy for RNNT models
rnnt_decoding: RNNTDecodingConfig = RNNTDecodingConfig(fused_batch_size=-1)
# VAD model type
vad_type: str = "frame" # which type of VAD to use, choices=[`frame`, `segment`]
@hydra_runner(config_name="InferenceConfig", schema=InferenceConfig)
def main(cfg):
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.output_dir is None:
cfg.output_dir = "./outputs"
output_dir = Path(cfg.output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
# setup profiling, note that profiling will significantly increast the total runtime
if cfg.profiling:
logging.info("Profiling enabled")
profile_fn = profile
record_fn = record_function
else:
logging.info("Profiling disabled")
@contextlib.contextmanager
def profile_fn(*args, **kwargs):
yield
@contextlib.contextmanager
def record_fn(*args, **kwargs):
yield
input_manifest_file = prepare_inference_manifest(cfg)
if cfg.manifest_filepath is None:
cfg.manifest_filepath = str(input_manifest_file)
with profile_fn(
activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, profile_memory=True
) as prof:
input_manifest_file = extract_audio_features(input_manifest_file, cfg, record_fn)
if cfg.vad_model is not None:
logging.info(f"Running VAD with model: {cfg.vad_model}")
input_manifest_file = run_vad_inference(input_manifest_file, cfg, record_fn)
if cfg.asr_model is not None:
logging.info(f"Running ASR with model: {cfg.asr_model}")
run_asr_inference(input_manifest_file, cfg, record_fn)
if cfg.profiling:
print("--------------------------------------------------------------------\n")
print(prof.key_averages().table(sort_by="cuda_time_total", row_limit=15))
print("--------------------------------------------------------------------\n")
logging.info("Done.")
def prepare_inference_manifest(cfg: DictConfig) -> str:
if cfg.audio_dir is not None and cfg.manifest_filepath is None:
manifest_data = []
for audio_file in Path(cfg.audio_dir).glob(f"**/*.{cfg.audio_type}"):
item = {"audio_filepath": str(audio_file.absolute()), "duration": 1000000, "offset": 0}
manifest_data.append(item)
parent_dir = Path(cfg.audio_dir)
else:
manifest_data = read_manifest(cfg.manifest_filepath)
parent_dir = Path(cfg.manifest_filepath).parent
new_manifest_data = []
for item in manifest_data:
audio_file = Path(item["audio_filepath"])
if len(str(audio_file)) < 255 and not audio_file.is_file() and not audio_file.is_absolute():
new_audio_file = parent_dir / audio_file
if new_audio_file.is_file():
item["audio_filepath"] = str(new_audio_file.absolute())
else:
item["audio_filepath"] = os.path.expanduser(str(audio_file))
else:
item["audio_filepath"] = os.path.expanduser(str(audio_file))
item["label"] = "infer"
item["text"] = "-"
new_manifest_data.append(item)
new_manifest_filepath = str(Path(cfg.output_dir) / Path("temp_manifest_input.json"))
write_manifest(new_manifest_filepath, new_manifest_data)
return new_manifest_filepath
def extract_audio_features(manifest_filepath: str, cfg: DictConfig, record_fn: Callable) -> str:
file_list = []
manifest_data = []
out_dir = Path(cfg.output_dir) / Path("features")
new_manifest_filepath = str(Path(cfg.output_dir) / Path("temp_manifest_input_feature.json"))
if Path(new_manifest_filepath).is_file():
logging.info("Features already exist in output_dir, skipping feature extraction.")
return new_manifest_filepath
has_feat = False
with open(manifest_filepath, 'r', encoding='utf-8') as fin:
for line in fin.readlines():
item = json.loads(line.strip())
manifest_data.append(item)
file_list.append(Path(item['audio_filepath']).stem)
if "feature_file" in item:
has_feat = True
if has_feat:
logging.info("Features already exist in manifest, skipping feature extraction.")
return manifest_filepath
out_dir.mkdir(parents=True, exist_ok=True)
torch.set_grad_enabled(False)
if cfg.vad_model:
vad_model = init_frame_vad_model(cfg.vad_model)
else:
vad_model = EncDecClassificationModel.from_pretrained("vad_multilingual_marblenet")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vad_model = vad_model.to(device)
vad_model.eval()
vad_model.setup_test_data(
test_data_config={
'batch_size': 1,
'vad_stream': False,
'sample_rate': cfg.sample_rate,
'manifest_filepath': manifest_filepath,
'labels': ['infer',],
'num_workers': cfg.num_workers,
'shuffle': False,
'normalize_audio_db': cfg.normalize_audio_db,
}
)
logging.info(f"Extracting features on {len(file_list)} audio files...")
with record_fn("feat_extract_loop"):
for i, test_batch in enumerate(tqdm(vad_model.test_dataloader(), total=len(vad_model.test_dataloader()))):
test_batch = [x.to(vad_model.device) for x in test_batch]
with autocast():
with record_fn("feat_extract_infer"):
processed_signal, processed_signal_length = vad_model.preprocessor(
input_signal=test_batch[0], length=test_batch[1],
)
with record_fn("feat_extract_other"):
processed_signal = processed_signal.squeeze(0)[:, :processed_signal_length]
processed_signal = processed_signal.cpu()
outpath = os.path.join(out_dir, file_list[i] + ".pt")
outpath = str(Path(outpath).absolute())
torch.save(processed_signal, outpath)
manifest_data[i]["feature_file"] = outpath
del test_batch
logging.info(f"Features saved at: {out_dir}")
write_manifest(new_manifest_filepath, manifest_data)
return new_manifest_filepath
def run_vad_inference(manifest_filepath: str, cfg: DictConfig, record_fn: Callable) -> str:
logging.info("Start VAD inference pipeline...")
if cfg.vad_type == "segment":
vad_model = init_vad_model(cfg.vad_model)
elif cfg.vad_type == "frame":
vad_model = init_frame_vad_model(cfg.vad_model)
else:
raise ValueError(f"Unknown VAD type: {cfg.vad_type}, supported types: ['segment', 'frame']")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vad_model = vad_model.to(device)
vad_model.eval()
vad_yaml = Path(cfg.vad_config)
if not vad_yaml.is_file():
raise ValueError(f"VAD config file not found: {cfg.vad_config}")
with vad_yaml.open("r") as fp:
vad_cfg = yaml.safe_load(fp)
vad_cfg = DictConfig(vad_cfg)
test_data_config = {
'vad_stream': True,
'manifest_filepath': manifest_filepath,
'labels': ['infer',],
'num_workers': cfg.num_workers,
'shuffle': False,
'window_length_in_sec': vad_cfg.vad.parameters.window_length_in_sec,
'shift_length_in_sec': vad_cfg.vad.parameters.shift_length_in_sec,
}
vad_model.setup_test_data(test_data_config=test_data_config, use_feat=True)
pred_dir = Path(cfg.output_dir) / Path("vad_frame_pred")
if pred_dir.is_dir():
logging.info(f"VAD frame-level prediction already exists: {pred_dir}, skipped")
else:
logging.info("Generating VAD frame-level prediction")
pred_dir.mkdir(parents=True)
t0 = time.time()
pred_dir = generate_vad_frame_pred(
vad_model=vad_model,
window_length_in_sec=vad_cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=vad_cfg.vad.parameters.shift_length_in_sec,
manifest_vad_input=manifest_filepath,
out_dir=str(pred_dir),
use_feat=True,
record_fn=record_fn,
)
t1 = time.time()
logging.info(f"Time elapsed: {t1 - t0: .2f} seconds")
logging.info(
f"Finished generating VAD frame level prediction with window_length_in_sec={vad_cfg.vad.parameters.window_length_in_sec} and shift_length_in_sec={vad_cfg.vad.parameters.shift_length_in_sec}"
)
frame_length_in_sec = vad_cfg.vad.parameters.shift_length_in_sec
# overlap smoothing filter
if vad_cfg.vad.parameters.smoothing:
# Generate predictions with overlapping input segments. Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
# smoothing_method would be either in majority vote (median) or average (mean)
logging.info("Generating predictions with overlapping input segments")
t0 = time.time()
smoothing_pred_dir = generate_overlap_vad_seq(
frame_pred_dir=pred_dir,
smoothing_method=vad_cfg.vad.parameters.smoothing,
overlap=vad_cfg.vad.parameters.overlap,
window_length_in_sec=vad_cfg.vad.parameters.window_length_in_sec,
shift_length_in_sec=vad_cfg.vad.parameters.shift_length_in_sec,
num_workers=cfg.num_workers,
out_dir=vad_cfg.smoothing_out_dir,
)
logging.info(
f"Finish generating predictions with overlapping input segments with smoothing_method={vad_cfg.vad.parameters.smoothing} and overlap={vad_cfg.vad.parameters.overlap}"
)
t1 = time.time()
logging.info(f"Time elapsed: {t1 - t0: .2f} seconds")
pred_dir = smoothing_pred_dir
frame_length_in_sec = 0.01
# Turn frame-wise prediction into speech intervals
logging.info(f"Generating segment tables with postprocessing params: {vad_cfg.vad.parameters.postprocessing}")
segment_dir_name = "vad_rttm"
for key, val in vad_cfg.vad.parameters.postprocessing.items():
segment_dir_name = segment_dir_name + "-" + str(key) + str(val)
segment_dir = Path(cfg.output_dir) / Path(segment_dir_name)
if segment_dir.is_dir():
logging.info(f"VAD speech segments already exists: {segment_dir}, skipped")
else:
segment_dir.mkdir(parents=True)
t0 = time.time()
segment_dir = generate_vad_segment_table(
vad_pred_dir=pred_dir,
postprocessing_params=vad_cfg.vad.parameters.postprocessing,
frame_length_in_sec=frame_length_in_sec,
num_workers=cfg.num_workers,
out_dir=segment_dir,
use_rttm=True,
)
t1 = time.time()
logging.info(f"Time elapsed: {t1 - t0: .2f} seconds")
logging.info("Finished generating RTTM files from VAD predictions.")
rttm_map = {}
for filepath in Path(segment_dir).glob("*.rttm"):
rttm_map[filepath.stem] = str(filepath.absolute())
manifest_data = read_manifest(manifest_filepath)
for i in range(len(manifest_data)):
key = Path(manifest_data[i]["audio_filepath"]).stem
manifest_data[i]["rttm_file"] = rttm_map[key]
new_manifest_filepath = str(Path(cfg.output_dir) / Path(f"temp_manifest_{segment_dir_name}.json"))
write_manifest(new_manifest_filepath, manifest_data)
return new_manifest_filepath
def generate_vad_frame_pred(
vad_model: EncDecClassificationModel,
window_length_in_sec: float,
shift_length_in_sec: float,
manifest_vad_input: str,
out_dir: str,
use_feat: bool = False,
record_fn: Callable = None,
) -> str:
"""
Generate VAD frame level prediction and write to out_dir
"""
time_unit = int(window_length_in_sec / shift_length_in_sec)
trunc = int(time_unit / 2)
trunc_l = time_unit - trunc
all_len = 0
data = []
with open(manifest_vad_input, 'r', encoding='utf-8') as fin:
for line in fin.readlines():
file = json.loads(line)['audio_filepath'].split("/")[-1]
data.append(file.split(".wav")[0])
logging.info(f"Inference on {len(data)} audio files/json lines!")
status = get_vad_stream_status(data)
with record_fn("vad_infer_loop"):
for i, test_batch in enumerate(tqdm(vad_model.test_dataloader(), total=len(vad_model.test_dataloader()))):
test_batch = [x.to(vad_model.device) for x in test_batch]
with autocast():
with record_fn("vad_infer_model"):
if use_feat:
log_probs = vad_model(processed_signal=test_batch[0], processed_signal_length=test_batch[1])
else:
log_probs = vad_model(input_signal=test_batch[0], input_signal_length=test_batch[1])
with record_fn("vad_infer_other"):
probs = torch.softmax(log_probs, dim=-1)
if len(probs.shape) == 3:
# squeeze the batch dimension, since batch size is 1
probs = probs.squeeze(0) # [1,T,C] -> [T,C]
pred = probs[:, 1]
if window_length_in_sec == 0:
to_save = pred
elif status[i] == 'start':
to_save = pred[:-trunc]
elif status[i] == 'next':
to_save = pred[trunc:-trunc_l]
elif status[i] == 'end':
to_save = pred[trunc_l:]
else:
to_save = pred
to_save = to_save.cpu().tolist()
all_len += len(to_save)
outpath = os.path.join(out_dir, data[i] + ".frame")
with open(outpath, "a", encoding='utf-8') as fout:
for p in to_save:
fout.write(f'{p:0.4f}\n')
del test_batch
if status[i] == 'end' or status[i] == 'single':
all_len = 0
return out_dir
def init_asr_model(model_path: str) -> ASRModel:
if model_path.endswith('.nemo'):
logging.info(f"Using local ASR model from {model_path}")
asr_model = ASRModel.restore_from(restore_path=model_path)
elif model_path.endswith('.ckpt'):
asr_model = ASRModel.load_from_checkpoint(checkpoint_path=model_path)
else:
logging.info(f"Using NGC ASR model {model_path}")
asr_model = ASRModel.from_pretrained(model_name=model_path)
return asr_model
def run_asr_inference(manifest_filepath, cfg, record_fn) -> str:
logging.info("Start ASR inference pipeline...")
asr_model = init_asr_model(cfg.asr_model)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
asr_model = asr_model.to(device)
asr_model.eval()
# Setup decoding strategy
decode_function = None
decoder_type = cfg.get("decoder_type", None)
if not hasattr(asr_model, 'change_decoding_strategy'):
raise ValueError(f"ASR model {cfg.asr_model} does not support decoding strategy.")
if decoder_type is not None: # Hybrid model
if decoder_type == 'rnnt':
cfg.rnnt_decoding.fused_batch_size = -1
cfg.rnnt_decoding.compute_langs = cfg.compute_langs
asr_model.change_decoding_strategy(cfg.rnnt_decoding, decoder_type=decoder_type)
decode_function = asr_model.decoding.rnnt_decoder_predictions_tensor
elif decoder_type == 'ctc':
asr_model.change_decoding_strategy(cfg.ctc_decoding, decoder_type=decoder_type)
decode_function = asr_model.decoding.ctc_decoder_predictions_tensor
else:
raise ValueError(
f"Unknown decoder type for hybrid model: {decoder_type}, supported types: ['rnnt', 'ctc']"
)
elif hasattr(asr_model, 'joint'): # RNNT model
cfg.rnnt_decoding.fused_batch_size = -1
cfg.rnnt_decoding.compute_langs = cfg.compute_langs
asr_model.change_decoding_strategy(cfg.rnnt_decoding)
decode_function = asr_model.decoding.rnnt_decoder_predictions_tensor
else:
asr_model.change_decoding_strategy(cfg.ctc_decoding)
decode_function = asr_model.decoding.ctc_decoder_predictions_tensor
# Compute output filename
if cfg.output_filename is None:
# create default output filename
if cfg.pred_name_postfix is not None:
cfg.output_filename = cfg.manifest_filepath.replace('.json', f'_{cfg.pred_name_postfix}.json')
else:
tag = f"{cfg.normalize}_{cfg.normalize_type}"
if cfg.use_rttm:
vad_tag = Path(manifest_filepath).stem
vad_tag = vad_tag[len("temp_manifest_vad_rttm_") :]
if cfg.rttm_mode == "mask":
tag += f"-mask{cfg.feat_mask_val}-{vad_tag}"
else:
tag += f"-dropframe-{vad_tag}"
cfg.output_filename = cfg.manifest_filepath.replace('.json', f'-{Path(cfg.asr_model).stem}-{tag}.json')
cfg.output_filename = Path(cfg.output_dir) / Path(cfg.output_filename).name
logging.info("Setting up dataloader for ASR...")
data_config = {
"manifest_filepath": manifest_filepath,
"normalize": cfg.normalize,
"normalize_type": cfg.normalize_type,
"use_rttm": cfg.use_rttm,
"rttm_mode": cfg.rttm_mode,
"feat_mask_val": cfg.feat_mask_val,
"frame_unit_time_secs": cfg.frame_unit_time_secs,
}
logging.info(f"use_rttm = {cfg.use_rttm}, rttm_mode = {cfg.rttm_mode}, feat_mask_val = {cfg.feat_mask_val}")
if hasattr(asr_model, "tokenizer"):
dataset = feature_to_text_dataset.get_bpe_dataset(config=data_config, tokenizer=asr_model.tokenizer)
else:
data_config["labels"] = asr_model.decoder.vocabulary
dataset = feature_to_text_dataset.get_char_dataset(config=data_config)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=cfg.batch_size,
collate_fn=dataset._collate_fn,
drop_last=False,
shuffle=False,
num_workers=cfg.get('num_workers', 0),
pin_memory=cfg.get('pin_memory', False),
)
logging.info("Start transcribing...")
hypotheses = []
all_hypotheses = []
t0 = time.time()
with autocast():
with torch.no_grad():
with record_fn("asr_infer_loop"):
for test_batch in tqdm(dataloader, desc="Transcribing"):
with record_fn("asr_infer_model"):
outputs = asr_model.forward(
processed_signal=test_batch[0].to(device),
processed_signal_length=test_batch[1].to(device),
)
with record_fn("asr_infer_other"):
logits, logits_len = outputs[0], outputs[1]
current_hypotheses, all_hyp = decode_function(logits, logits_len, return_hypotheses=False,)
if isinstance(current_hypotheses, tuple) and len(current_hypotheses) == 2:
current_hypotheses = current_hypotheses[0] # handle RNNT output
hypotheses += current_hypotheses
if all_hyp is not None:
all_hypotheses += all_hyp
else:
all_hypotheses += current_hypotheses
del logits
del test_batch
t1 = time.time()
logging.info(f"Time elapsed: {t1 - t0: .2f} seconds")
logging.info("Finished transcribing.")
# Save output to manifest
input_manifest_data = read_manifest(manifest_filepath)
manifest_data = read_manifest(cfg.manifest_filepath)
if "text" not in manifest_data[0]:
has_groundtruth = False
else:
has_groundtruth = True
groundtruth = []
for i in range(len(manifest_data)):
if has_groundtruth:
groundtruth.append(manifest_data[i]["text"])
manifest_data[i]["pred_text"] = hypotheses[i]
manifest_data[i]["feature_file"] = input_manifest_data[i]["feature_file"]
if "rttm_file" in input_manifest_data[i]:
manifest_data[i]["feature_file"] = input_manifest_data[i]["feature_file"]
write_manifest(cfg.output_filename, manifest_data)
if not has_groundtruth:
hypotheses = " ".join(hypotheses)
words = hypotheses.split()
chars = "".join(words)
logging.info("-----------------------------------------")
logging.info(f"Number of generated characters={len(chars)}")
logging.info(f"Number of generated words={len(words)}")
logging.info("-----------------------------------------")
else:
wer_score = word_error_rate(hypotheses=hypotheses, references=groundtruth)
cer_score = word_error_rate(hypotheses=hypotheses, references=groundtruth, use_cer=True)
logging.info("-----------------------------------------")
logging.info(f"WER={wer_score:.4f}, CER={cer_score:.4f}")
logging.info("-----------------------------------------")
logging.info(f"ASR output saved at {cfg.output_filename}")
return cfg.output_filename
if __name__ == "__main__":
main()
| NeMo-main | examples/asr/asr_vad/speech_to_text_with_vad.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import tempfile
from argparse import ArgumentParser
import torch
from tqdm import tqdm
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.parts.submodules.rnnt_greedy_decoding import ONNXGreedyBatchedRNNTInfer
from nemo.utils import logging
"""
Script to compare the outputs of a NeMo Pytorch based RNNT Model and its ONNX exported representation.
# Compare a NeMo and ONNX model
python infer_transducer_onnx.py \
--nemo_model="<path to a .nemo file>" \
OR
--pretrained_model="<name of a pretrained model>" \
--onnx_encoder="<path to onnx encoder file>" \
--onnx_decoder="<path to onnx decoder-joint file>" \
--dataset_manifest="<Either pass a manifest file path here>" \
--audio_dir="<Or pass a directory containing preprocessed monochannel audio files>" \
--max_symbold_per_step=5 \
--batch_size=32 \
--log
# Export and compare a NeMo and ONNX model
python infer_transducer_onnx.py \
--nemo_model="<path to a .nemo file>" \
OR
--pretrained_model="<name of a pretrained model>" \
--export \
--dataset_manifest="<Either pass a manifest file path here>" \
--audio_dir="<Or pass a directory containing preprocessed monochannel audio files>" \
--max_symbold_per_step=5 \
--batch_size=32 \
--log
"""
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
"--nemo_model", type=str, default=None, required=False, help="Path to .nemo file",
)
parser.add_argument(
'--pretrained_model', type=str, default=None, required=False, help='Name of a pretrained NeMo file'
)
parser.add_argument('--onnx_encoder', type=str, default=None, required=False, help="Path to onnx encoder model")
parser.add_argument(
'--onnx_decoder', type=str, default=None, required=False, help="Path to onnx decoder + joint model"
)
parser.add_argument('--threshold', type=float, default=0.01, required=False)
parser.add_argument('--dataset_manifest', type=str, default=None, required=False, help='Path to dataset manifest')
parser.add_argument('--audio_dir', type=str, default=None, required=False, help='Path to directory of audio files')
parser.add_argument('--audio_type', type=str, default='wav', help='File format of audio')
parser.add_argument('--export', action='store_true', help="Whether to export the model into onnx prior to eval")
parser.add_argument('--max_symbold_per_step', type=int, default=5, required=False, help='Number of decoding steps')
parser.add_argument('--batch_size', type=int, default=32, help='Batchsize')
parser.add_argument('--log', action='store_true', help='Log the predictions between pytorch and onnx')
args = parser.parse_args()
return args
def assert_args(args):
if args.nemo_model is None and args.pretrained_model is None:
raise ValueError(
"`nemo_model` or `pretrained_model` must be passed ! It is required for decoding the RNNT tokens "
"and ensuring predictions match between Torch and ONNX."
)
if args.nemo_model is not None and args.pretrained_model is not None:
raise ValueError(
"`nemo_model` and `pretrained_model` cannot both be passed ! Only one can be passed to this script."
)
if args.export and (args.onnx_encoder is not None or args.onnx_decoder is not None):
raise ValueError("If `export` is set, then `onnx_encoder` and `onnx_decoder` arguments must be None")
if args.audio_dir is None and args.dataset_manifest is None:
raise ValueError("Both `dataset_manifest` and `audio_dir` cannot be None!")
if args.audio_dir is not None and args.dataset_manifest is not None:
raise ValueError("Submit either `dataset_manifest` or `audio_dir`.")
if int(args.max_symbold_per_step) < 1:
raise ValueError("`max_symbold_per_step` must be an integer > 0")
def export_model_if_required(args, nemo_model):
if args.export:
nemo_model.export("temp_rnnt.onnx")
args.onnx_encoder = "encoder-temp_rnnt.onnx"
args.onnx_decoder = "decoder_joint-temp_rnnt.onnx"
def resolve_audio_filepaths(args):
# get audio filenames
if args.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(args.audio_dir.audio_dir, f"*.{args.audio_type}")))
else:
# get filenames from manifest
filepaths = []
with open(args.dataset_manifest, 'r', encoding='utf-8') as f:
for line in f:
item = json.loads(line)
filepaths.append(item['audio_filepath'])
logging.info(f"\nTranscribing {len(filepaths)} files...\n")
return filepaths
def main():
args = parse_arguments()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Instantiate pytorch model
if args.nemo_model is not None:
nemo_model = args.nemo_model
nemo_model = ASRModel.restore_from(nemo_model, map_location=device) # type: ASRModel
nemo_model.freeze()
elif args.pretrained_model is not None:
nemo_model = args.pretrained_model
nemo_model = ASRModel.from_pretrained(nemo_model, map_location=device) # type: ASRModel
nemo_model.freeze()
else:
raise ValueError("Please pass either `nemo_model` or `pretrained_model` !")
if torch.cuda.is_available():
nemo_model = nemo_model.to('cuda')
export_model_if_required(args, nemo_model)
# Instantiate RNNT Decoding loop
encoder_model = args.onnx_encoder
decoder_model = args.onnx_decoder
max_symbols_per_step = args.max_symbold_per_step
decoding = ONNXGreedyBatchedRNNTInfer(encoder_model, decoder_model, max_symbols_per_step)
audio_filepath = resolve_audio_filepaths(args)
# Evaluate Pytorch Model (CPU/GPU)
actual_transcripts = nemo_model.transcribe(audio_filepath, batch_size=args.batch_size)[0]
# Evaluate ONNX model
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in audio_filepath:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': audio_filepath, 'batch_size': args.batch_size, 'temp_dir': tmpdir}
nemo_model.preprocessor.featurizer.dither = 0.0
nemo_model.preprocessor.featurizer.pad_to = 0
temporary_datalayer = nemo_model._setup_transcribe_dataloader(config)
all_hypothesis = []
for test_batch in tqdm(temporary_datalayer, desc="ONNX Transcribing"):
input_signal, input_signal_length = test_batch[0], test_batch[1]
input_signal = input_signal.to(device)
input_signal_length = input_signal_length.to(device)
# Acoustic features
processed_audio, processed_audio_len = nemo_model.preprocessor(
input_signal=input_signal, length=input_signal_length
)
# RNNT Decoding loop
hypotheses = decoding(audio_signal=processed_audio, length=processed_audio_len)
# Process hypothesis (map char/subword token ids to text)
hypotheses = nemo_model.decoding.decode_hypothesis(hypotheses) # type: List[str]
# Extract text from the hypothesis
texts = [h.text for h in hypotheses]
all_hypothesis += texts
del processed_audio, processed_audio_len
del test_batch
if args.log:
for pt_transcript, onnx_transcript in zip(actual_transcripts, all_hypothesis):
print(f"Pytorch Transcripts : {pt_transcript}")
print(f"ONNX Transcripts : {onnx_transcript}")
print()
# Measure error rate between onnx and pytorch transcipts
pt_onnx_cer = word_error_rate(all_hypothesis, actual_transcripts, use_cer=True)
assert pt_onnx_cer < args.threshold, "Threshold violation !"
print("Character error rate between Pytorch and ONNX :", pt_onnx_cer)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/export/transducer/infer_transducer_onnx.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import tempfile
from argparse import ArgumentParser
import torch
from omegaconf import OmegaConf
from tqdm import tqdm
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.parts.submodules.rnnt_greedy_decoding import TorchscriptGreedyBatchedRNNTInfer
from nemo.utils import logging
"""
Script to compare the outputs of a NeMo Pytorch based RNNT Model and its Torchscript exported representation.
# Compare a NeMo and Torchscript model
python infer_transducer_ts.py \
--nemo_model="<path to a .nemo file>" \
OR
--pretrained_model="<name of a pretrained model>" \
--ts_encoder="<path to ts encoder file>" \
--ts_decoder="<path to ts decoder-joint file>" \
--ts_cfg="<path to a export ts model's config file>" \
--dataset_manifest="<Either pass a manifest file path here>" \
--audio_dir="<Or pass a directory containing preprocessed monochannel audio files>" \
--max_symbold_per_step=5 \
--batch_size=32 \
--log
# Export and compare a NeMo and Torchscript model
python infer_transducer_ts.py \
--nemo_model="<path to a .nemo file>" \
OR
--pretrained_model="<name of a pretrained model>" \
--export \
--dataset_manifest="<Either pass a manifest file path here>" \
--audio_dir="<Or pass a directory containing preprocessed monochannel audio files>" \
--max_symbold_per_step=5 \
--batch_size=32 \
--log
"""
def parse_arguments():
parser = ArgumentParser()
parser.add_argument(
"--nemo_model", type=str, default=None, required=False, help="Path to .nemo file",
)
parser.add_argument(
'--pretrained_model', type=str, default=None, required=False, help='Name of a pretrained NeMo file'
)
parser.add_argument('--ts_encoder', type=str, default=None, required=False, help="Path to ts encoder model")
parser.add_argument(
'--ts_decoder', type=str, default=None, required=False, help="Path to ts decoder + joint model"
)
parser.add_argument(
'--ts_cfg', type=str, default=None, required=False, help='Path to the yaml config of the exported model'
)
parser.add_argument('--threshold', type=float, default=0.01, required=False)
parser.add_argument('--dataset_manifest', type=str, default=None, required=False, help='Path to dataset manifest')
parser.add_argument('--audio_dir', type=str, default=None, required=False, help='Path to directory of audio files')
parser.add_argument('--audio_type', type=str, default='wav', help='File format of audio')
parser.add_argument(
'--export', action='store_true', help="Whether to export the model into torchscript prior to eval"
)
parser.add_argument('--max_symbold_per_step', type=int, default=5, required=False, help='Number of decoding steps')
parser.add_argument('--batch_size', type=int, default=32, help='Batchsize')
parser.add_argument('--log', action='store_true', help='Log the predictions between pytorch and torchscript')
args = parser.parse_args()
return args
def assert_args(args):
if args.nemo_model is None and args.pretrained_model is None:
raise ValueError(
"`nemo_model` or `pretrained_model` must be passed ! It is required for decoding the RNNT tokens "
"and ensuring predictions match between Torch and Torchscript."
)
if args.nemo_model is not None and args.pretrained_model is not None:
raise ValueError(
"`nemo_model` and `pretrained_model` cannot both be passed ! Only one can be passed to this script."
)
if args.ts_cfg is None:
raise ValueError(
"Must provide the yaml config of the exported model. You can obtain it by loading the "
"nemo model and then using OmegaConf.save(model.cfg, 'cfg.yaml')"
)
if args.export and (args.ts_encoder is not None or args.ts_decoder is not None):
raise ValueError("If `export` is set, then `ts_encoder` and `ts_decoder` arguments must be None")
if args.audio_dir is None and args.dataset_manifest is None:
raise ValueError("Both `dataset_manifest` and `audio_dir` cannot be None!")
if args.audio_dir is not None and args.dataset_manifest is not None:
raise ValueError("Submit either `dataset_manifest` or `audio_dir`.")
if int(args.max_symbold_per_step) < 1:
raise ValueError("`max_symbold_per_step` must be an integer > 0")
def export_model_if_required(args, nemo_model):
if args.export:
nemo_model.export(output="temp_rnnt.ts", check_trace=True)
OmegaConf.save(nemo_model.cfg, "ts_cfg.yaml")
args.ts_encoder = "encoder-temp_rnnt.ts"
args.ts_decoder = "decoder_joint-temp_rnnt.ts"
args.ts_cfg = "ts_cfg.yaml"
def resolve_audio_filepaths(args):
# get audio filenames
if args.audio_dir is not None:
filepaths = list(glob.glob(os.path.join(args.audio_dir.audio_dir, f"*.{args.audio_type}")))
else:
# get filenames from manifest
filepaths = []
with open(args.dataset_manifest, 'r', encoding='utf-8') as f:
for line in f:
item = json.loads(line)
filepaths.append(item['audio_filepath'])
logging.info(f"\nTranscribing {len(filepaths)} files...\n")
return filepaths
def main():
args = parse_arguments()
device = 'cuda' if torch.cuda.is_available() else 'cpu'
# Instantiate pytorch model
if args.nemo_model is not None:
nemo_model = args.nemo_model
nemo_model = ASRModel.restore_from(nemo_model, map_location=device) # type: ASRModel
nemo_model.freeze()
elif args.pretrained_model is not None:
nemo_model = args.pretrained_model
nemo_model = ASRModel.from_pretrained(nemo_model, map_location=device) # type: ASRModel
nemo_model.freeze()
else:
raise ValueError("Please pass either `nemo_model` or `pretrained_model` !")
if torch.cuda.is_available():
nemo_model = nemo_model.to('cuda')
export_model_if_required(args, nemo_model)
# Instantiate RNNT Decoding loop
encoder_model = args.ts_encoder
decoder_model = args.ts_decoder
ts_cfg = OmegaConf.load(args.ts_cfg)
max_symbols_per_step = args.max_symbold_per_step
decoding = TorchscriptGreedyBatchedRNNTInfer(encoder_model, decoder_model, ts_cfg, device, max_symbols_per_step)
audio_filepath = resolve_audio_filepaths(args)
# Evaluate Pytorch Model (CPU/GPU)
actual_transcripts = nemo_model.transcribe(audio_filepath, batch_size=args.batch_size)[0]
# Evaluate Torchscript model
with tempfile.TemporaryDirectory() as tmpdir:
with open(os.path.join(tmpdir, 'manifest.json'), 'w', encoding='utf-8') as fp:
for audio_file in audio_filepath:
entry = {'audio_filepath': audio_file, 'duration': 100000, 'text': 'nothing'}
fp.write(json.dumps(entry) + '\n')
config = {'paths2audio_files': audio_filepath, 'batch_size': args.batch_size, 'temp_dir': tmpdir}
nemo_model.preprocessor.featurizer.dither = 0.0
nemo_model.preprocessor.featurizer.pad_to = 0
temporary_datalayer = nemo_model._setup_transcribe_dataloader(config)
all_hypothesis = []
for test_batch in tqdm(temporary_datalayer, desc="Torchscript Transcribing"):
input_signal, input_signal_length = test_batch[0], test_batch[1]
input_signal = input_signal.to(device)
input_signal_length = input_signal_length.to(device)
# Acoustic features
processed_audio, processed_audio_len = nemo_model.preprocessor(
input_signal=input_signal, length=input_signal_length
)
# RNNT Decoding loop
hypotheses = decoding(audio_signal=processed_audio, length=processed_audio_len)
# Process hypothesis (map char/subword token ids to text)
hypotheses = nemo_model.decoding.decode_hypothesis(hypotheses) # type: List[str]
# Extract text from the hypothesis
texts = [h.text for h in hypotheses]
all_hypothesis += texts
del processed_audio, processed_audio_len
del test_batch
if args.log:
for pt_transcript, ts_transcript in zip(actual_transcripts, all_hypothesis):
print(f"Pytorch Transcripts : {pt_transcript}")
print(f"Torchscript Transcripts : {ts_transcript}")
print()
# Measure error rate between torchscript and pytorch transcipts
pt_ts_cer = word_error_rate(all_hypothesis, actual_transcripts, use_cer=True)
assert pt_ts_cer < args.threshold, "Threshold violation !"
print("Character error rate between Pytorch and Torchscript :", pt_ts_cer)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/export/transducer/infer_transducer_ts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Adapting the model
python train_asr_adapter.py \
--config-path="../conf/asr_adapters" \
--config-name="asr_adaptation.yaml" \
model.pretrained_model=null \
model.nemo_model=null \
model.adapter.adapter_name=<Unique adapter name> \
model.adapter.adapter_type="<linear, tiny_attn, or others from config sub-sections of `adapter`>" \
model.adapter.adapter_module_name=<null, or str module. Type: encoder, decoder, joint, or multiple with + between them> \
model.adapter.linear.in_features=<dimension of the layer outputs of the model> \
model.adapter.linear.dim=32 \
model.adapter.linear.dropout=0.0 \
model.train_ds.manifest_filepath=<Path to manifest> \
model.train_ds.batch_size=16 \
model.validation_ds.manifest_filepath=<Path to manifest> \
model.validation_ds.batch_size=16 \
model.optim.lr=0.001 \
model.optim.weight_decay=0.0 \
model.optim.sched.warmup_steps=100 \
trainer.max_steps=300 \
trainer.devices=1 \
trainer.precision=32 \
exp_manager.exp_dir=<Some directory for experiment manager>
# Hyper Parmaeter Search
python train_asr_adapter.py \
--config-path="../conf/asr_adapters" \
--config-name="asr_adaptation_hp.yaml" \
-m \
model.pretrained_model=null \
model.nemo_model=null \
model.adapter.adapter_name=<Unique adapter name> \
model.adapter.adapter_type="<linear, tiny_attn, or others from config sub-sections of `adapter`>" \
model.adapter.adapter_module_name=<null, or str module. Type: encoder, decoder, joint, or multiple with + between them> \
model.adapter.linear.in_features=<dimension of the layer outputs of the model> \
model.train_ds.manifest_filepath=<Path to manifest> \
model.train_ds.batch_size=16 \
model.validation_ds.manifest_filepath=<Path to manifest> \
model.validation_ds.batch_size=16 \
exp_manager.exp_dir="<some directory>" \
exp_manager.create_wandb_logger=true \
exp_manager.wandb_logger_kwargs.project="<Project Name>" \
++delete_ckpt_after_train=True
# Fine-tune a model
While adaptation is very efficient for low-resource datasets, it imposes several restrictions -
- The vocabulary of the new dataset must be supported by the pre-existing vocabulary or tokenizer.
If tokens exist outside this scope, the adapter will have to learn UNK tokens (or fail entirely
for character based models).
- As a consequence of the above, the language of the new dataset must be the same as the original model.
There is ongoing research to enable more sophisticated adapters for other languages.
When adapters cannot be readily used due to the above limitations, fine-tuning may be a better alternative.
For documentation on fine-tuning a model, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/configs.html#fine-tuning-configurations
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html
"""
import os
from dataclasses import is_dataclass
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.collections.asr.models import ASRModel
from nemo.core import adapter_mixins
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import clean_exp_ckpt, exp_manager
def update_model_config_to_support_adapter(model_cfg, current_cfg):
with open_dict(model_cfg):
# Override prediction logging in config
model_cfg.log_prediction = current_cfg.model.get('log_prediction', False)
# Update encoder adapter compatible config
adapter_metadata = adapter_mixins.get_registered_adapter(model_cfg.encoder._target_)
if adapter_metadata is not None:
model_cfg.encoder._target_ = adapter_metadata.adapter_class_path
def update_model_cfg(original_cfg, new_cfg):
with open_dict(original_cfg), open_dict(new_cfg):
# force inject some keys into the config
whitelist_keys = ['num_workers', 'pin_memory']
for wkey in whitelist_keys:
if wkey in new_cfg:
original_cfg[wkey] = new_cfg[wkey]
print(f"Injecting white listed key `{wkey}` into config")
# drop keys which don't exist in old config and are not whitelisted
new_keys = list(new_cfg.keys())
for key in new_keys:
if key not in original_cfg:
new_cfg.pop(key)
print("Removing unavailable key from config :", key)
new_cfg = OmegaConf.merge(original_cfg, new_cfg)
return new_cfg
def add_global_adapter_cfg(model, global_adapter_cfg):
# Convert to DictConfig from dict or Dataclass
if is_dataclass(global_adapter_cfg):
global_adapter_cfg = OmegaConf.structured(global_adapter_cfg)
if not isinstance(global_adapter_cfg, DictConfig):
global_adapter_cfg = DictConfig(global_adapter_cfg)
# Update the model.cfg with information about the new adapter global cfg
with open_dict(global_adapter_cfg), open_dict(model.cfg):
if 'adapters' not in model.cfg:
model.cfg.adapters = OmegaConf.create({})
# Add the global config for adapters to the model's internal config
model.cfg.adapters[model.adapter_global_cfg_key] = global_adapter_cfg
# Update all adapter modules (that already exist) with this global adapter config
model.update_adapter_cfg(model.cfg.adapters)
@hydra_runner(config_path="../conf/asr_adapters", config_name="asr_adaptation.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if cfg.model.pretrained_model is None and cfg.model.nemo_model is None:
raise ValueError("Either set `cfg.model.nemo_model` or `cfg.model.pretrained_model`")
if cfg.model.pretrained_model is not None and cfg.model.nemo_model is not None:
raise ValueError("Cannot set both `cfg.model.nemo_model` and `cfg.model.pretrained_model`. Select one only.")
trainer = pl.Trainer(**cfg.trainer)
exp_log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
if cfg.model.pretrained_model is not None:
model_cfg = ASRModel.from_pretrained(cfg.model.pretrained_model, return_config=True)
update_model_config_to_support_adapter(model_cfg, cfg)
model = ASRModel.from_pretrained(cfg.model.pretrained_model, override_config_path=model_cfg, trainer=trainer)
else:
model_cfg = ASRModel.restore_from(cfg.model.nemo_model, return_config=True)
update_model_config_to_support_adapter(model_cfg, cfg)
model = ASRModel.restore_from(cfg.model.nemo_model, override_config_path=model_cfg, trainer=trainer)
# Setup model for finetuning (train and validation only)
cfg.model.train_ds = update_model_cfg(model.cfg.train_ds, cfg.model.train_ds)
model.setup_training_data(cfg.model.train_ds)
if 'validation_ds' in cfg.model:
cfg.model.validation_ds = update_model_cfg(model.cfg.validation_ds, cfg.model.validation_ds)
model.setup_multiple_validation_data(cfg.model.validation_ds)
# Setup optimizer
model.setup_optimization(cfg.model.optim)
# Setup spec augmentation
if 'spec_augment' in cfg.model:
model.spec_augmentation = model.from_config_dict(cfg.model.spec_augment)
else:
model.spec_augmentation = None
del model.cfg.spec_augment
# Setup adapters
with open_dict(cfg.model.adapter):
# Extract the name of the adapter (must be give for training)
adapter_name = cfg.model.adapter.pop("adapter_name")
adapter_type = cfg.model.adapter.pop("adapter_type")
adapter_module_name = cfg.model.adapter.pop("adapter_module_name", None)
adapter_state_dict_name = cfg.model.adapter.pop("adapter_state_dict_name", None)
# Resolve the config of the specified `adapter_type`
if adapter_type not in cfg.model.adapter.keys():
raise ValueError(
f"Adapter type ({adapter_type}) config could not be found. Adapter setup config - \n"
f"{OmegaConf.to_yaml(cfg.model.adapter)}"
)
adapter_type_cfg = cfg.model.adapter[adapter_type]
print(f"Found `{adapter_type}` config :\n" f"{OmegaConf.to_yaml(adapter_type_cfg)}")
# Augment adapter name with module name, if not provided by user
if adapter_module_name is not None and ':' not in adapter_name:
adapter_name = f'{adapter_module_name}:{adapter_name}'
# Extract the global adapter config, if provided
adapter_global_cfg = cfg.model.adapter.pop(model.adapter_global_cfg_key, None)
if adapter_global_cfg is not None:
add_global_adapter_cfg(model, adapter_global_cfg)
model.add_adapter(adapter_name, cfg=adapter_type_cfg)
assert model.is_adapter_available()
# Disable all other adapters, enable just the current adapter.
model.set_enabled_adapters(enabled=False) # disable all adapters prior to training
model.set_enabled_adapters(adapter_name, enabled=True) # enable just one adapter by name
# First, Freeze all the weights of the model (not just encoder, everything)
model.freeze()
# Activate dropout() and other modules that depend on train mode.
model = model.train()
# Then, Unfreeze just the adapter weights that were enabled above (no part of encoder/decoder/joint/etc)
model.unfreeze_enabled_adapters()
# Update model config prior to training
model.cfg = model.cfg
# Finally, train model
trainer.fit(model)
# Save the adapter state dict
if adapter_state_dict_name is not None:
state_path = exp_log_dir if exp_log_dir is not None else os.getcwd()
ckpt_path = os.path.join(state_path, "checkpoints")
if os.path.exists(ckpt_path):
state_path = ckpt_path
state_path = os.path.join(state_path, adapter_state_dict_name)
# Save the adapter modules in a seperate file
model.save_adapters(str(state_path))
if 'delete_ckpt_after_train' in cfg:
delete_ckpt_after_train = cfg.delete_ckpt_after_train
if delete_ckpt_after_train:
# Remove PTL ckpt file, and potentially also remove .nemo file to conserve storage space.
clean_exp_ckpt(exp_log_dir, remove_ckpt=True, remove_nemo=False)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_adapters/train_asr_adapter.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to analyze the results of the experiments from a CSV file.
Basic Usage:
To perform analysis on the adapters experiment results::
python scoring_and_analysis.py \
--csv <path to cleaned result csv file> \
--dataset_type_column <column name in csv with the dataset types>
To perform analysis on the finetuning experiment results::
$ python scoring_and_analysis.py \
--csv <path to csv> \
--dataset_type_column <column name in csv with the dataset types> \
-ft
Advanced Usage:
The script by default shows only the best hyperparameters for each crietria.
To see a ranking of all the hyperparameters for each criteria in order to visualize
how the results were selected use the `--show_analysis` flag. Moreover, instead of
displaying only the best hyperparameters, you can use the `--topk` flag to show the
top *k* hyperparameters::
$ python scoring_and_analysis.py \
--csv <path to csv> \
--dataset_type_column <dataset_group_column_name> \
--show_analysis \
--topk 3
Instead of doing the analysis over all possible combinations of all the hyperparameters,
you can restrict the search space only to a subset of experiments. This can be achieved
by the `-uargs` and the `-cargs` flag for the unconstrained and the constrained
experiments respectively::
$ python scoring_and_analysis.py \
--csv <path to csv> \
--dataset_type_column <dataset_group_column_name> \
-cargs 'Adapter Position' encoder \
-cargs 'Adapter Dropout' 0.5 \
-uargs 'Train Steps' 5000
"""
import argparse
from typing import Tuple
import numpy as np
import pandas as pd
# CHANGE: Specify the column names and their attributes to consider for the selection
# of the best results
UNCONSTRAINED_EXP_KEY = {'name': 'WER: Test', 'attribute': min}
CONSTRAINED_EXP_KEY = {'name': 'Score', 'attribute': max}
# CHANGE: Hyperparamters of the best run to display in the output
ADAPTER_HYPERPARAMTER_COLUMNS = ['Adapter Dimensions', 'Adapter Dropout', 'Stochastic Depth', 'Train Steps']
FINETUNING_HYPERPARAMETER_COLUMNS = ['Train Steps', 'Learning Rate']
# CHANGE: Column name for the test set WER on the new domain
TEST_WER_COLUMN = 'WER: Test'
# CHANGE: Column name for the test set WER on the original domain
ORIGINAL_TEST_WER_COLUMN = 'WER: Librispeech Test Other'
# CHANGE: Based on the experiment type, get the column name for categorizing the results
EXP_CATEGORY_KEY = {'adapters': 'Adapter Position', 'finetuning': 'Frozen Module'}
# CHANGE: Maximum absolute WER degradation allowed in the original domain
MAX_DEGRADATION_PERCENTAGE = 3
# CHANGE: Baseline WER in the original domain
BASELINE_ORIGINAL_WER = 5.118
# CHANGE: Baseline WER in the domain to be adapted
# The keys of this dictionary should cover all values of the `dataset_type_column`
BASELINE_ADAPTED_WER = {
'irish_english_male': 20.690,
'midlands_english_female': 9.612,
'midlands_english_male': 11.253,
'northern_english_female': 11.108,
'northern_english_male': 10.180,
'scottish_english_female': 12.309,
'scottish_english_male': 11.942,
'southern_english_female': 9.701,
'southern_english_male': 10.215,
'welsh_english_female': 8.514,
'welsh_english_male': 11.463,
}
def calculate_original_scale(original_wer):
wer_do = abs(original_wer - BASELINE_ORIGINAL_WER)
return (MAX_DEGRADATION_PERCENTAGE - min(MAX_DEGRADATION_PERCENTAGE, wer_do)) / MAX_DEGRADATION_PERCENTAGE
def calculate_adapt_werr(adapted_wer, group):
return max(BASELINE_ADAPTED_WER[group] - adapted_wer, 0) / BASELINE_ADAPTED_WER[group]
def parse_results(filepath: str, dataset_type_col: str, exp_type: str) -> Tuple[pd.DataFrame]:
"""Calculate the scoring metric for each experiment
Args:
filepath: Path to the csv file containing the results
dataset_type_col: Name of the column containing the dataset types
exp_type: Type of experiments in the csv file
Returns:
Dataframes of all the experiments with scores
"""
global UNCONSTRAINED_EXP_KEY, TEST_WER_COLUMN
df = pd.read_csv(filepath)
df.drop(columns=['Model', 'Model Size'], errors='ignore', inplace=True) # Drop columns if exists
if exp_type == 'finetuning':
df['Frozen Module'] = df['Frozen Module'].replace('-', 'null')
if 'Score' not in df:
# Calculate the selection scoring metric
df['Original Scale'] = df.apply(lambda x: calculate_original_scale(x[ORIGINAL_TEST_WER_COLUMN]), axis=1)
df['Adapt WERR'] = df.apply(lambda x: calculate_adapt_werr(x[TEST_WER_COLUMN], x[dataset_type_col]), axis=1)
df['Score'] = df['Original Scale'] * df['Adapt WERR']
# Round off the values to 4 decimal places
df = df.round({'Original Scale': 4, 'Adapt WERR': 4, 'Score': 4})
# Save the updated csv with scores
df.to_csv(filepath, index=False)
return df
def display_analysis_table(df_analysis: pd.DataFrame, key_info: dict):
"""Display the analysis table used to select the best hyperparameter configuration
Args:
df_analysis: Dataframe of the analysis table
key_info: Dictionary containing the name of the column and the attribute to use for analysis
"""
# Calculate each column length for the table
column_lengths = {x: max(len(x), df_analysis[x].map(str).apply(len).max()) for x in df_analysis.columns}
print(' | '.join([f'{x:^{column_lengths[x]}}' for x in df_analysis.columns]))
print('-' * sum([column_lengths[x] + 3 for x in df_analysis.columns]))
for idx in range(len(df_analysis)):
row_str = []
for column in df_analysis.columns:
row_str.append(f'{df_analysis.iloc[idx][column]:^{column_lengths[column]}}')
print(' | '.join(row_str))
def display_results(df_all: pd.DataFrame, category: str, best_config: pd.Series, dataset_type_col: str, exp_type: str):
"""Display the Test and the Librispeech Test Other WER for the best configuration.
Args:
df_all: Dataframe of all the experiments
category: Adapter position or frozen module in case of finetuning
best_config: Best hyperparameter configurations
dataset_type_col: Name of the column containing the dataset types
exp_type: Type of experiments in the dataframe
"""
test_wer_values, ls_test_other_wer_values = [], []
print(f'{dataset_type_col:^25} | {TEST_WER_COLUMN:<20} | {ORIGINAL_TEST_WER_COLUMN:<20}')
print('-' * 70)
for dtype in df_all[dataset_type_col].unique():
df_filtered = df_all[(df_all[dataset_type_col] == dtype) & (df_all[EXP_CATEGORY_KEY[exp_type]] == category)]
for col in ADAPTER_HYPERPARAMTER_COLUMNS if exp_type == 'adapters' else FINETUNING_HYPERPARAMETER_COLUMNS:
df_filtered = df_filtered[df_filtered[col] == best_config[col]]
if len(df_filtered) == 0:
continue
if len(df_filtered) > 1:
raise ValueError(f'More than one row found for dtype: {dataset_type_col} and category: {category}')
dtype_data = df_filtered.iloc[0]
test_wer_values.append(dtype_data[TEST_WER_COLUMN])
ls_test_other_wer_values.append(dtype_data[ORIGINAL_TEST_WER_COLUMN])
print(
f'{dtype_data[dataset_type_col]:^25} | {dtype_data[TEST_WER_COLUMN]:^20} | {dtype_data[ORIGINAL_TEST_WER_COLUMN]:^20}'
)
print('-' * 70)
print(f'{"Average":^25} | {np.mean(test_wer_values):^20} | {np.mean(ls_test_other_wer_values):^20}')
print('\n')
def get_best_config(
df_exp: pd.DataFrame, dataset_type_col: str, key_info: dict, topk: int, show_analysis: bool, exp_type: str,
):
"""Get the best hyperparameter configuration for a given subset of experiments.
Args:
df_exp: Dataframe of all experiments
dataset_type_col: Name of the column containing the dataset types
key_info: Dictionary containing the name of the column and the attribute to use for analysis
topk: Number of top-k results to display
show_analysis: Whether to display the analysis table
exp_type: Type of experiments in the dataframe
"""
# Columns to consider for hyperparameter combinations
hyperparamter_cols = ADAPTER_HYPERPARAMTER_COLUMNS if exp_type == 'adapters' else FINETUNING_HYPERPARAMETER_COLUMNS
# Columns to display in the analysis table
analysis_columns = list(set([key_info['name'], TEST_WER_COLUMN, ORIGINAL_TEST_WER_COLUMN]))
df_analyze = df_exp.drop(
columns=[
x
for x in df_exp.columns
if x not in set(hyperparamter_cols + [EXP_CATEGORY_KEY[exp_type]] + analysis_columns)
]
)
for category in df_exp[EXP_CATEGORY_KEY[exp_type]].unique():
# Group all hyperparameter configurations and do mean across all speakers
df_category_mean = (
df_analyze[df_analyze[EXP_CATEGORY_KEY[exp_type]] == category]
.groupby(hyperparamter_cols, as_index=False)[analysis_columns]
.mean()
)
# Sort the values by the key in order to get the top-k results
df_category_mean.sort_values(
by=key_info['name'], ascending=True if key_info['attribute'].__qualname__ == 'min' else False, inplace=True
)
print('=' * len(category))
print(category.upper())
print('=' * len(category) + '\n')
if show_analysis:
display_analysis_table(df_category_mean, key_info)
print('\n')
for idx in range(min(topk, len(df_category_mean))):
print('-----')
print(f'Top-{idx + 1}')
print('-----')
df_category_best = df_category_mean.iloc[idx]
print(f'\nHyperparamters')
print('---------------\n')
for hyperparamter in hyperparamter_cols + [key_info['name']]:
print(f'{hyperparamter:<20}: {df_category_best[hyperparamter]}')
print()
print('\nResults')
print('-------\n')
display_results(df_exp, category, df_category_best, dataset_type_col, exp_type)
def analyze_results(
df_exp: pd.DataFrame,
fixed_hyperparameters: list,
title: str,
dataset_type_col: str,
key_info: dict,
topk: int,
show_analysis: bool,
exp_type: str,
):
"""Perform analysis on a given subset of experiments
Args:
df_exp: Dataframe of all experiments
fixed_hyperparameters: List of pair of hyperparamters and their values to fix in the analysis
title: Title of the analysis (for logging)
dataset_type_col: Name of the column containing the dataset types
key_info: Dictionary containing the name of the column and the attribute to use for analysis
topk: Number of top-k results to display
show_analysis: Whether to display the analysis table
exp_type: Type of experiments in the dataframe
"""
# Filter experiments based on the fixed hyperparameters
for hyperparameter_name, hyperparameter_value in fixed_hyperparameters:
df_exp = df_exp[df_exp[hyperparameter_name] == hyperparameter_value]
# Perform analysis
print('+' * len(title))
print(title)
print('+' * len(title) + '\n')
get_best_config(df_exp, dataset_type_col, key_info, topk, show_analysis, exp_type)
print()
def __validate_arg_type(arg):
"""Validate the type of the command line argument value."""
dtype = float if '.' in arg else int
try:
return dtype(arg)
except ValueError:
return arg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--csv', required=True, help='Path to the cleaned results CSV file')
parser.add_argument(
'-dtype',
'--dataset_type_column',
required=True,
help='Name of the column containing the dataset type. Example: For SLR83 it is "Group", for GSC it is "Dataset Size"',
)
parser.add_argument(
'-cargs',
'--constrained_args',
nargs=2,
action='append',
default=[],
type=__validate_arg_type,
help='Hyperparameters to fix for the constrained experiments',
)
parser.add_argument(
'-uargs',
'--unconstrained_args',
nargs=2,
action='append',
default=[],
type=__validate_arg_type,
help='Hyperparameters to fix for the unconstrained experiments',
)
parser.add_argument('-k', '--topk', type=int, default=1, help='Number of top-k results to display')
parser.add_argument(
'-ft', '--finetuning', action='store_true', help='True if the CSV contains Finetuning experiments'
)
parser.add_argument(
'-s', '--show_analysis', action='store_true', help='Show the key values of all the dataset types'
)
args = parser.parse_args()
# Get the experiment type
exp_type = 'finetuning' if args.finetuning else 'adapters'
# Parse CSV file
df = parse_results(args.csv, args.dataset_type_column, exp_type)
# Perform analysis - Constrained Adaptation
analyze_results(
df,
args.constrained_args,
'Constrained Experiment Results',
args.dataset_type_column,
CONSTRAINED_EXP_KEY,
args.topk,
args.show_analysis,
exp_type,
)
# Perform analysis - Unconstrained Adaptation
analyze_results(
df,
args.unconstrained_args,
'Unconstrained Experiment Results',
args.dataset_type_column,
UNCONSTRAINED_EXP_KEY,
args.topk,
args.show_analysis,
exp_type,
)
| NeMo-main | examples/asr/asr_adapters/scoring_and_analysis.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Evaluate an adapted model
python eval_asr_adapter.py \
--config-path="../conf/asr_adapters" \
--config-name="asr_adaptation.yaml" \
model.pretrained_model=null \
model.nemo_model=null \
model.adapter.adapter_name=<name of the adapter to evaluate> \
model.test_ds.manifest_filepath="<Path to validation/test manifest>" \
model.test_ds.batch_size=16 \
model.train_ds.manifest_filepath=null \
model.validation_ds.manifest_filepath=null \
model.adapter.in_features=null \
trainer.devices=1 \
trainer.precision=32
# Pretrained Models
For documentation on existing pretrained models, please visit -
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/results.html
"""
import pytorch_lightning as pl
from omegaconf import OmegaConf, open_dict
from nemo.collections.asr.models import ASRModel
from nemo.core import adapter_mixins
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
def update_encoder_config_to_support_adapter(model_cfg):
with open_dict(model_cfg):
adapter_metadata = adapter_mixins.get_registered_adapter(model_cfg.encoder._target_)
if adapter_metadata is not None:
model_cfg.encoder._target_ = adapter_metadata.adapter_class_path
def update_model_cfg(original_cfg, new_cfg):
with open_dict(new_cfg):
# drop keys which dont exist in old config
new_keys = list(new_cfg.keys())
for key in new_keys:
if key not in original_cfg:
new_cfg.pop(key)
print("Removing unavailable key from config :", key)
new_cfg = OmegaConf.merge(original_cfg, new_cfg)
return new_cfg
@hydra_runner(config_path="../conf/asr_adapters", config_name="asr_adaptation.yaml")
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if cfg.model.pretrained_model is None and cfg.model.nemo_model is None:
raise ValueError("Either set `cfg.model.nemo_model` or `cfg.model.pretrained_model`")
if cfg.model.pretrained_model is not None and cfg.model.nemo_model is not None:
raise ValueError("Cannot set `cfg.model.nemo_model` and `cfg.model.pretrained_model`. Select one only.")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if cfg.model.pretrained_model is not None:
model_cfg = ASRModel.from_pretrained(cfg.model.pretrained_model, return_config=True)
update_encoder_config_to_support_adapter(model_cfg)
model = ASRModel.from_pretrained(cfg.model.pretrained_model, override_config_path=model_cfg, trainer=trainer)
else:
model_cfg = ASRModel.restore_from(cfg.model.nemo_model, return_config=True)
update_encoder_config_to_support_adapter(model_cfg)
model = ASRModel.restore_from(cfg.model.nemo_model, override_config_path=model_cfg, trainer=trainer)
# Setup model for finetuning (train and validation only)
cfg.model.test_ds = update_model_cfg(model.cfg.test_ds, cfg.model.test_ds)
# Call the dataloaders and optimizer + scheduler
model.setup_multiple_test_data(cfg.model.test_ds)
# Setup adapters
with open_dict(cfg.model.adapter):
adapter_name = cfg.model.adapter.pop("adapter_name", None)
# Disable all other adapters, enable just the current adapter.
model.set_enabled_adapters(enabled=False) # disable all adapters prior to training
if adapter_name is not None:
model.set_enabled_adapters(adapter_name, enabled=True) # enable just one adapter by name if provided
# First, Freeze all the weights of the model (not just encoder, everything)
model.freeze()
# Finally, train model
trainer.test(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/asr/asr_adapters/eval_asr_adapter.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import FastPitchModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="fastpitch_align_v1.05")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = FastPitchModel(cfg=cfg.model, trainer=trainer)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/fastpitch.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import AlignerModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path='conf', config_name='aligner')
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get('exp_manager', None))
model = AlignerModel(cfg=cfg.model, trainer=trainer)
trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) # noqa
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/aligner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models.radtts import RadTTSModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
def freeze(model):
for p in model.parameters():
p.requires_grad = False
def unfreeze(model):
for p in model.parameters():
p.requires_grad = True
def prepare_model_weights(model, unfreeze_modules):
if unfreeze_modules != 'all':
model.freeze() # freeze everything
logging.info("module freezed, about to unfreeze modules to be trained")
if 'dur' in unfreeze_modules and hasattr(model.model, 'dur_pred_layer'):
logging.info("Training duration prediction")
unfreeze(model.model.dur_pred_layer)
if 'f0' in unfreeze_modules and hasattr(model.model, 'f0_pred_module'):
logging.info("Training F0 prediction")
unfreeze(model.model.f0_pred_module)
if 'energy' in unfreeze_modules and hasattr(model.model, 'energy_pred_module'):
logging.info("Training energy prediction")
unfreeze(model.model.energy_pred_module)
if 'vpred' in unfreeze_modules and hasattr(model.model, 'v_pred_module'):
logging.info("Training voiced prediction")
unfreeze(model.model.v_pred_module)
if hasattr(model, 'v_embeddings'):
logging.info("Training voiced embeddings")
unfreeze(model.model.v_embeddings)
if 'unvbias' in unfreeze_modules and hasattr(model.model, 'unvoiced_bias_module'):
logging.info("Training unvoiced bias")
unfreeze(model.model.unvoiced_bias_module)
else:
logging.info("Training everything")
@hydra_runner(config_path="conf", config_name="rad-tts_dec")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get('exp_manager', None))
model = RadTTSModel(cfg=cfg.model, trainer=trainer).cuda()
if cfg.model.load_from_checkpoint:
model.maybe_init_from_pretrained_checkpoint(cfg=cfg.model)
prepare_model_weights(model, cfg.model.trainerConfig.unfreeze_modules)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model.cuda())
if __name__ == '__main__':
main()
| NeMo-main | examples/tts/radtts.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.