python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018 The Google AI Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Most of the code here has been copied from:
# https://github.com/google-research/albert/blob/master/create_pretraining_data.py
# with some modifications.
import collections
import os
import subprocess
import time
from typing import Any
import numpy as np
import torch
from omegaconf import OmegaConf
from omegaconf.dictconfig import DictConfig
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
get_train_valid_test_split_,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import deallocate_indexed_dataset_memory
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import make_dataset as make_indexed_dataset
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import make_indexed_dataset_compatibility
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.lm_adapted_t5_dataset import T5LMAdaptedDataset
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
DSET_TYPE_BERT = 'standard_bert'
DSET_TYPE_ICT = 'ict'
DSET_TYPE_T5 = 't5'
DSET_TYPE_T5_LM = 't5_prefix_lm'
DSET_TYPE_BART = 'bart'
DSET_TYPE_UL2 = 'ul2'
DSET_TYPES = [DSET_TYPE_BERT, DSET_TYPE_ICT, DSET_TYPE_T5, DSET_TYPE_T5_LM, DSET_TYPE_BART, DSET_TYPE_UL2]
def compile_helper():
"""Compile helper function ar runtime. Make sure this
is invoked on a single process."""
path = os.path.abspath(os.path.dirname(__file__))
ret = subprocess.run(['make', '-C', path])
if ret.returncode != 0:
logging.error("Making C++ dataset helpers module failed, exiting.")
import sys
sys.exit(1)
def get_a_and_b_segments(sample, np_rng):
"""Divide sample into a and b segments."""
# Number of sentences in the sample.
n_sentences = len(sample)
# Make sure we always have two sentences.
assert n_sentences > 1, 'make sure each sample has at least two sentences.'
# First part:
# `a_end` is how many sentences go into the `A`.
a_end = 1
if n_sentences >= 3:
# Note that randin in numpy is exclusive.
a_end = np_rng.randint(1, n_sentences)
tokens_a = []
for j in range(a_end):
tokens_a.extend(sample[j])
# Second part:
tokens_b = []
for j in range(a_end, n_sentences):
tokens_b.extend(sample[j])
# Random next:
is_next_random = False
if np_rng.random() < 0.5:
is_next_random = True
tokens_a, tokens_b = tokens_b, tokens_a
return tokens_a, tokens_b, is_next_random
def truncate_segments(tokens_a, tokens_b, len_a, len_b, max_num_tokens, np_rng):
"""Truncates a pair of sequences to a maximum sequence length."""
# print(len_a, len_b, max_num_tokens)
assert len_a > 0
if len_a + len_b <= max_num_tokens:
return False
while len_a + len_b > max_num_tokens:
if len_a > len_b:
len_a -= 1
tokens = tokens_a
else:
len_b -= 1
tokens = tokens_b
if np_rng.random() < 0.5:
del tokens[0]
else:
tokens.pop()
return True
def create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id):
"""Merge segments A and B, add [CLS] and [SEP] and build tokentypes."""
tokens = []
tokentypes = []
# [CLS].
tokens.append(cls_id)
tokentypes.append(0)
# Segment A.
for token in tokens_a:
tokens.append(token)
tokentypes.append(0)
# [SEP].
tokens.append(sep_id)
tokentypes.append(0)
# Segment B.
for token in tokens_b:
tokens.append(token)
tokentypes.append(1)
if tokens_b:
# [SEP].
tokens.append(sep_id)
tokentypes.append(1)
return tokens, tokentypes
MaskedLmInstance = collections.namedtuple("MaskedLmInstance", ["index", "label"])
def is_start_piece(piece):
"""Check if the current word piece is the starting piece. (BERT)"""
# When a word has been split into
# WordPieces, the first token does not have any marker and any subsequence
# tokens are prefixed with ##. So whenever we see the ## token, we
# append it to the previous set of word indexes.
return not piece.startswith("##")
def create_masked_lm_predictions(
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
max_ngram_size=3,
mean_ngram_size=None,
whole_word_masking=True,
favor_long_ngrams=False,
permutation=False,
geometric_dist=False,
masking_style="bert",
tokenizer_type="wordpiece",
skip_masking_id=None,
):
"""Creates the predictions for the masked LM objective.
Note: Tokens here are vocab ids and not text tokens."""
if not geometric_dist and mean_ngram_size is not None:
raise ValueError(f"Mean ngram size is only supported for geometric distribution.")
cand_indexes = []
# Note(mingdachen): We create a list for recording if the piece is
# the starting piece of current token, where 1 means true, so that
# on-the-fly whole word masking is possible.
token_boundary = [0] * len(tokens)
skip_mask_idx = None # Store the index of token that cannot be masked.
for (i, token) in enumerate(tokens):
if token == skip_masking_id:
skip_mask_idx = i
if token == cls_id or token == sep_id:
token_boundary[i] = 1
continue
# Whole Word Masking means that if we mask all of the wordpieces
# corresponding to an original word.
#
# Note that Whole Word Masking does *not* change the training code
# at all -- we still predict each WordPiece independently, softmaxed
# over the entire vocabulary.
if whole_word_masking and len(cand_indexes) >= 1 and not is_start_piece(vocab_id_to_token_dict[token]):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
if is_start_piece(vocab_id_to_token_dict[token]):
token_boundary[i] = 1
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
if masked_lm_prob == 0:
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary)
num_to_predict = min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob))))
if masking_style != "bert":
num_to_predict = max(1, num_to_predict)
if num_to_predict < 1:
logging.warning(
F'Number of tokens is : {len(tokens)} and mask_probability is {masked_lm_prob}. None of the tokens will be masked'
)
ngrams = np.arange(1, max_ngram_size + 1, dtype=np.int64)
if not geometric_dist:
# Note(mingdachen):
# By default, we set the probilities to favor shorter ngram sequences.
pvals = 1.0 / np.arange(1, max_ngram_size + 1)
pvals /= pvals.sum(keepdims=True)
if favor_long_ngrams:
pvals = pvals[::-1]
ngram_indexes = []
for idx in range(len(cand_indexes)):
ngram_index = {}
for n in ngrams:
# Skip this ngram if it contains the index of token that should not be masked.
# TODO: (sandeepsub) Generalize this to be a list of tokens that cannot be masked.
if skip_mask_idx is not None and skip_mask_idx >= idx and skip_mask_idx <= idx + n:
continue
ngram_index[n] = cand_indexes[idx : idx + n]
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[1]:
for index in index_set:
if index in covered_indexes:
continue
if not geometric_dist:
# Not all ngrams are available because of skip_masking_id that prevents a certain ID from being masked.
available_ngrams = list(cand_index_set.keys())
# n - 1 because pvals is 0-indexed and available ngrams are 1-indexed.
pvals_current = np.array([pvals[n - 1] for n in available_ngrams])
n = np_rng.choice(available_ngrams, p=pvals_current / pvals_current.sum(keepdims=True),)
else:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
# The expectation of a geometric distribution is E[X] = 1 / p
p = 1 / mean_ngram_size if mean_ngram_size is not None else 0.2
n = min(np_rng.geometric(p), max_ngram_size)
# n may not be in the candidate index set because of skip_masking_id.
# we try to find the nearest one in the candidate index set.
if n not in cand_index_set:
n = _truncate_to_nearest(cand_index_set, n)
index_set = sum(cand_index_set[n], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n == 0:
break
if n - 1 in cand_index_set:
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_token = None
if masking_style == "bert":
# 80% of the time, replace with [MASK]
if np_rng.random() < 0.8:
masked_token = mask_id
else:
# 10% of the time, keep original
if np_rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_id_list[np_rng.randint(0, len(vocab_id_list))]
elif masking_style == "t5":
masked_token = mask_id
elif masking_style == "bart":
masked_token = mask_id
else:
raise ValueError("invalid value of masking style")
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_spans.append(MaskedLmInstance(index=index_set, label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
select_indexes = set()
if permutation:
if skip_masking_id is not None:
raise ValueError(f"permutation=True is not supported when skip_masking_id is not None.")
for cand_index_set in ngram_indexes:
if len(select_indexes) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[0]:
for index in index_set:
if index in covered_indexes or index in select_indexes:
continue
n = np.random.choice(
ngrams[: len(cand_index_set)],
p=pvals[: len(cand_index_set)] / pvals[: len(cand_index_set)].sum(keepdims=True),
)
index_set = sum(cand_index_set[n - 1], [])
n -= 1
while len(select_indexes) + len(index_set) > num_to_predict:
if n == 0:
break
index_set = sum(cand_index_set[n - 1], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(select_indexes) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes or index in select_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
select_indexes.add(index)
assert len(select_indexes) <= num_to_predict
select_indexes = sorted(select_indexes)
permute_indexes = list(select_indexes)
np_rng.shuffle(permute_indexes)
orig_token = list(output_tokens)
for src_i, tgt_i in zip(select_indexes, permute_indexes):
output_tokens[src_i] = orig_token[tgt_i]
masked_lms.append(MaskedLmInstance(index=src_i, label=orig_token[src_i]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, token_boundary, masked_spans)
def _truncate_to_nearest(cand_index_set, n):
min_dist = 9999
for key in cand_index_set:
if abs(key - n) < min_dist:
n = key
min_dist = abs(key - n)
return n
def create_extreme_masked_lm_predictions(
tokens,
masked_lm_prob,
mask_id,
max_predictions_per_seq,
np_rng,
max_ngram_size=10,
min_ngram_size=2,
mean_ngram_size=5,
span_length_distribution=LengthDistribution.uniform,
skip_masking_id=None,
):
"""Creates the predictions for the extreme span-masking UL2 objective.
Note: Tokens here are vocab ids and not text tokens."""
output_tokens = list(tokens)
masked_lm_positions = []
masked_lm_labels = []
num_to_predict = int(min(max_predictions_per_seq, max(1, int(round(len(tokens) * masked_lm_prob)))))
# If the number of tokens to predict is less than the min ngram size, clam it to max predictions.
min_ngram_size = int(min(num_to_predict, min_ngram_size))
ngrams = np.arange(min_ngram_size, max_ngram_size + 1, dtype=np.int64)
if span_length_distribution == "uniform":
pvals = np.array([1.0 / (max_ngram_size - min_ngram_size + 1)] * (max_ngram_size - min_ngram_size + 1))
ngram_indexes = []
if skip_masking_id is not None:
skip_mask_idx = None
for idx in range(len(tokens)):
if tokens[idx] == skip_masking_id:
skip_mask_idx = idx
break
else:
skip_mask_idx = None
cand_indexes = [[i] for i in range(len(tokens))]
for idx in range(len(cand_indexes)):
ngram_index = {}
for n in ngrams:
# Skip this ngram if it contains the index of token that should not be masked.
# TODO: (sandeepsub) Generalize this to be a list of tokens that cannot be masked.
if skip_mask_idx is not None and skip_mask_idx >= idx and skip_mask_idx <= idx + n:
continue
ngram_index[n] = cand_indexes[idx : idx + n]
ngram_indexes.append(ngram_index)
np_rng.shuffle(ngram_indexes)
(masked_lms, masked_spans) = ([], [])
covered_indexes = set()
for cand_index_set in ngram_indexes:
if len(masked_lms) >= num_to_predict:
break
if not cand_index_set:
continue
# Note(mingdachen):
# Skip current piece if they are covered in lm masking or previous ngrams.
for index_set in cand_index_set[min_ngram_size]:
for index in index_set:
if index in covered_indexes:
continue
if span_length_distribution == LengthDistribution.uniform:
available_ngrams = list(cand_index_set.keys())
pvals_current = np.array([pvals[n] for n in available_ngrams])
n = np_rng.choice(available_ngrams, p=pvals_current / pvals_current.sum(keepdims=True),)
elif span_length_distribution == LengthDistribution.geometric:
# Sampling "n" from the geometric distribution and clipping it to
# the max_ngrams. Using p=0.2 default from the SpanBERT paper
# https://arxiv.org/pdf/1907.10529.pdf (Sec 3.1)
# The expectation of a geometric distribution is E[X] = 1 / p
p = 1 / mean_ngram_size if mean_ngram_size is not None else 0.2
n = min(np_rng.geometric(p), max_ngram_size)
# n may not be in the candidate index set because of skip_masking_id.
# we try to find the nearest one in the candidate index set.
if n not in cand_index_set:
n = _truncate_to_nearest(cand_index_set, n)
n = int(np.clip(n, min_ngram_size, max_ngram_size))
elif span_length_distribution == LengthDistribution.truncated_normal:
# Sampling "n" from a truncated normal distribution.
mu = mean_ngram_size if mean_ngram_size is not None else (max_ngram_size - min_ngram_size) // 2
n = int(np.clip(np_rng.normal(loc=mu, scale=np.sqrt(mu)), min_ngram_size, max_ngram_size))
if n not in cand_index_set:
n = _truncate_to_nearest(cand_index_set, n)
n = int(np.clip(n, min_ngram_size, max_ngram_size))
index_set = sum(cand_index_set[n], [])
n -= 1
# Note(mingdachen):
# Repeatedly looking for a candidate that does not exceed the
# maximum number of predictions by trying shorter ngrams.
while len(masked_lms) + len(index_set) > num_to_predict:
if n < min_ngram_size:
break
if n in cand_index_set:
index_set = sum(cand_index_set[n], [])
n -= 1
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
output_tokens[index] = mask_id
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_spans.append(MaskedLmInstance(index=index_set, label=[tokens[index] for index in index_set]))
assert len(masked_lms) <= num_to_predict
np_rng.shuffle(ngram_indexes)
masked_lms = sorted(masked_lms, key=lambda x: x.index)
# Sort the spans by the index of the first span
masked_spans = sorted(masked_spans, key=lambda x: x.index[0])
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels, masked_spans)
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
def get_dataset(
indexed_dataset,
start_index,
end_index,
cfg,
trainer,
num_samples,
masked_lm_prob,
short_seq_prob,
binary_head,
max_seq_length_dec,
dataset_type='standard_bert',
tokenizer=None,
max_ngram_size=3,
mean_ngram_size=None,
geometric_dist=True,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
delete_mask_prob=0, # This flag is used in BART only, and will not have effect on T5/BERT
respect_document_boundaries=True,
**kwargs,
):
if dataset_type not in DSET_TYPES:
raise ValueError("Invalid dataset_type: ", dataset_type)
# from nemo.collections.nlp.data.language_modeling.megatron.ict_dataset import ICTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
from nemo.collections.nlp.data.language_modeling.megatron.ul2_dataset import UL2Dataset
if dataset_type == DSET_TYPE_ICT:
raise NotImplementedError("ICT dataset is not implemented yet.")
'''
dataset = ICTDataset(
block_dataset=indexed_dataset,
title_dataset=title_dataset,
query_in_block_prob=args.query_in_block_prob,
use_one_sent_docs=args.use_one_sent_docs,
binary_head=binary_head,
**kwargs,
)
'''
elif dataset_type == DSET_TYPE_T5:
assert tokenizer is not None, "Tokenizer is required for T5 dataset"
logging.info("Instatiating T5 Dataset ...")
documents = np.arange(start=start_index, stop=end_index, step=1, dtype=np.int32)
dataset = T5Dataset(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
max_seq_length_dec=max_seq_length_dec,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
documents=documents,
respect_document_boundaries=respect_document_boundaries,
**kwargs,
)
elif dataset_type == DSET_TYPE_BERT:
logging.info("Instatiating BERT Dataset ...")
dataset = BertDataset(
cfg=cfg,
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
binary_head=binary_head,
tokenizer=tokenizer,
**kwargs,
)
elif dataset_type == DSET_TYPE_T5_LM:
documents = np.arange(start=start_index, stop=end_index, step=1, dtype=np.int32)
logging.info("Instatiating T5 Prefix-LM Dataset ...")
dataset = T5LMAdaptedDataset(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
documents=documents,
indexed_dataset=indexed_dataset,
num_samples=num_samples,
max_seq_length_encoder=kwargs["max_seq_length"],
max_seq_length_decoder=max_seq_length_dec,
**kwargs,
)
elif dataset_type == DSET_TYPE_BART:
assert tokenizer is not None, "Tokenizer is required for BART dataset"
documents = np.arange(start=start_index, stop=end_index, step=1, dtype=np.int32)
logging.info("Instatiating BART Dataset ...")
dataset = BARTDataset(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
documents=documents,
respect_document_boundaries=respect_document_boundaries,
**kwargs,
)
elif dataset_type == DSET_TYPE_UL2:
assert tokenizer is not None, "Tokenizer is required for UL2 dataset"
documents = np.arange(start=start_index, stop=end_index, step=1, dtype=np.int32)
logging.info("Instatiating UL2 Dataset ...")
extreme_ngram_span_length_distribution = cfg.data.get(
"extreme_ngram_span_length_distribution", "truncated_normal"
)
ngram_span_length_distribution = cfg.data.get("ngram_span_length_distribution", "geometric")
if extreme_ngram_span_length_distribution == "truncated_normal":
extreme_ngram_span_length_distribution = LengthDistribution.truncated_normal
elif extreme_ngram_span_length_distribution == "uniform":
extreme_ngram_span_length_distribution = LengthDistribution.uniform
elif extreme_ngram_span_length_distribution == "geometric":
extreme_ngram_span_length_distribution = LengthDistribution.geometric
if ngram_span_length_distribution == "truncated_normal":
ngram_span_length_distribution = LengthDistribution.truncated_normal
elif ngram_span_length_distribution == "uniform":
ngram_span_length_distribution = LengthDistribution.uniform
elif ngram_span_length_distribution == "geometric":
ngram_span_length_distribution = LengthDistribution.geometric
dataset = UL2Dataset(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
indexed_dataset=indexed_dataset,
masked_lm_prob=masked_lm_prob,
max_seq_length_dec=max_seq_length_dec,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
ngram_span_length_distribution=ngram_span_length_distribution,
extreme_ngram_span_length_distribution=extreme_ngram_span_length_distribution,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
extreme_masked_lm_prob=cfg.data.get("extreme_masked_lm_prob", 0.5),
extreme_max_ngram_size=cfg.data.get("extreme_max_ngram_size", 128),
extreme_mean_ngram_size=cfg.data.get("extreme_mean_ngram_size", 64),
extreme_min_ngram_size=cfg.data.get("extreme_min_ngram_size", 32),
prefix_lm_pivot_mean=cfg.data.get("prefix_lm_pivot_mean", 0.25),
respect_document_boundaries=respect_document_boundaries,
documents=documents,
**kwargs,
)
else:
raise NotImplementedError(f"Dataset type {dataset_type} not fully implemented.")
return dataset
def build_dataset(
cfg,
trainer,
data_prefix,
data_impl,
num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
name,
dataset_type,
tokenizer,
max_ngram_size,
mean_ngram_size,
geometric_dist,
permutation,
whole_word_masking,
favor_long_ngrams,
delete_mask_prob,
respect_document_boundaries,
data_impl_kwargs,
):
def _build_dataset(current_data_prefix, current_num_samples):
indexed_dataset = get_indexed_dataset_(
current_data_prefix, data_impl, skip_warmup, data_impl_kwargs=data_impl_kwargs
)
total_num_of_documents = indexed_dataset.sizes.shape[0]
# Print stats about the splits.
logging.info(' > dataset split:')
logging.info(' Total {} documents is : {} '.format(name, total_num_of_documents))
if hasattr(indexed_dataset, 'get_doc_idx'):
doc_idx_ptr = indexed_dataset.get_doc_idx()
indexed_dataset.set_doc_idx(doc_idx_ptr[0:total_num_of_documents])
kwargs = dict(
name=name,
data_prefix=current_data_prefix,
num_epochs=None,
max_num_samples=int(current_num_samples),
max_seq_length=max_seq_length,
seed=seed,
)
dataset = get_dataset(
indexed_dataset,
0,
total_num_of_documents,
cfg,
trainer,
current_num_samples,
masked_lm_prob,
short_seq_prob,
binary_head,
max_seq_length_dec,
dataset_type,
tokenizer,
max_ngram_size,
mean_ngram_size,
geometric_dist,
permutation,
whole_word_masking,
favor_long_ngrams,
delete_mask_prob,
respect_document_boundaries,
**kwargs,
)
# Set the original pointer so dataset remains the main dataset.
if hasattr(indexed_dataset, 'set_doc_idx'):
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == (total_num_of_documents + 1)
return dataset
if len(data_prefix) == 1:
return _build_dataset(data_prefix[0], num_samples)
else:
output = get_datasets_weights_and_num_samples(data_prefix, num_samples)
prefixes, weights, datasets_num_samples = output
datasets = []
for i in range(len(prefixes)):
dataset = _build_dataset(prefixes[i], datasets_num_samples[i])
datasets.append(dataset)
return BlendableDataset(datasets, weights, num_samples)
def build_train_valid_test_datasets(
cfg,
trainer,
data_prefix,
data_impl,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head=False,
max_seq_length_dec=None,
dataset_type='standard_bert',
tokenizer=None,
max_ngram_size=3,
mean_ngram_size=None,
geometric_dist=True,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
delete_mask_prob=0,
respect_document_boundaries=True,
data_impl_kwargs={},
):
# for VSC and text memmap we need to provide a tokenizer, if not given
if data_impl in ["text_mmap", "csv_mmap"]:
if "tokenizer" not in data_impl_kwargs:
if isinstance(data_impl_kwargs, DictConfig):
data_impl_kwargs = OmegaConf.to_object(data_impl_kwargs)
else:
# prevent updating the default
data_impl_kwargs = data_impl_kwargs.copy()
data_impl_kwargs["tokenizer"] = tokenizer
if not respect_document_boundaries and data_impl_kwargs != {}:
raise ValueError(
"respect_document_boundaries=False is not compatible with text_memmap and csv_memmap (data_impl_kwargs != {})"
)
if data_impl in ["mock"]:
logging.info(f'Initializing mock dataset, type {dataset_type}, for train, validate, and test')
if len(data_prefix) != 0:
# Files from this location will not be read; mock data will be generated instead.
logging.warning(f"Requested data_impl={data_impl}, so ignoring data_prefix setting: {data_prefix}")
if dataset_type == DSET_TYPE_T5:
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import MockT5Dataset
if tokenizer is None:
# Tokenizer is used to infer vocabulary size for mock data.
raise ValueError("Tokenizer is required for a mock T5 dataset")
train_ds = MockT5Dataset(
cfg,
tokenizer,
"train",
int(train_valid_test_num_samples[0]),
max_seq_length,
max_seq_length_dec,
seed,
)
valid_ds = MockT5Dataset(
cfg,
tokenizer,
"valid",
int(train_valid_test_num_samples[1]),
max_seq_length,
max_seq_length_dec,
seed,
)
test_ds = MockT5Dataset(
cfg, tokenizer, "test", int(train_valid_test_num_samples[2]), max_seq_length, max_seq_length_dec, seed,
)
return train_ds, valid_ds, test_ds
else:
raise NotImplementedError(f"Mock dataset is not implemented for requested type: {dataset_type}")
if isinstance(data_prefix, DictConfig):
assert (
data_prefix.get('train') is not None
and data_prefix.get('test') is not None
and data_prefix.get('validation') is not None
), f"Data prefix dictionary should have train, test and validation keys. data_prefix currently has only {data_prefix.keys()}"
if cfg.data.splits_string is not None:
logging.warning(cfg.data.splits_string + " ignored since data prefix is of type dictionary.")
train_ds = build_dataset(
cfg,
trainer,
data_prefix["train"],
data_impl,
int(train_valid_test_num_samples[0]),
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
"train",
dataset_type=dataset_type,
tokenizer=tokenizer,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
respect_document_boundaries=respect_document_boundaries,
data_impl_kwargs=data_impl_kwargs,
)
validation_ds = build_dataset(
cfg,
trainer,
data_prefix["validation"],
data_impl,
int(train_valid_test_num_samples[1]),
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
"valid",
dataset_type=dataset_type,
tokenizer=tokenizer,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
respect_document_boundaries=respect_document_boundaries,
data_impl_kwargs=data_impl_kwargs,
)
test_ds = build_dataset(
cfg,
trainer,
data_prefix["test"],
data_impl,
int(train_valid_test_num_samples[2]),
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
"test",
dataset_type=dataset_type,
tokenizer=tokenizer,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
respect_document_boundaries=respect_document_boundaries,
data_impl_kwargs=data_impl_kwargs,
)
return train_ds, validation_ds, test_ds
else:
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix[0],
data_impl,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type=dataset_type,
tokenizer=tokenizer,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
respect_document_boundaries=respect_document_boundaries,
data_impl_kwargs=data_impl_kwargs,
)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
train_n, valid_n, test_n = map(sum, zip(*datasets_train_valid_test_num_samples))
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
cfg,
trainer,
prefixes[i],
data_impl,
splits_string,
datasets_train_valid_test_num_samples[i],
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type=dataset_type,
tokenizer=tokenizer,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
delete_mask_prob=delete_mask_prob,
respect_document_boundaries=respect_document_boundaries,
data_impl_kwargs=data_impl_kwargs,
)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights, train_n)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_n)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights, test_n)
return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
def _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix,
data_impl,
splits_string,
train_valid_test_num_samples,
max_seq_length,
masked_lm_prob,
short_seq_prob,
seed,
skip_warmup,
binary_head,
max_seq_length_dec,
dataset_type='standard_bert',
tokenizer=None,
max_ngram_size=3,
mean_ngram_size=None,
geometric_dist=True,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
delete_mask_prob=0, # This flag is used in BART only, and will not have effect on T5/BERT
respect_document_boundaries=True,
data_impl_kwargs={},
):
# Indexed dataset.
indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup, data_impl_kwargs=data_impl_kwargs)
# if dataset_type == DSET_TYPE_ICT:
# title_dataset = get_indexed_dataset_(args.titles_data_path, data_impl, skip_warmup)
# Get start and end indices of train/valid/train into doc-idx
# Note that doc-idx is desinged to be num-docs + 1 so we can
# easily iterate over it.
total_num_of_documents = indexed_dataset.doc_idx.shape[0] - 1
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
start_index = indexed_dataset.doc_idx[splits[index]]
end_index = indexed_dataset.doc_idx[splits[index + 1]]
logging.info(
' sentence indices in [{}, {}) total of {} '
'sentences'.format(start_index, end_index, end_index - start_index)
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
# from nemo.collections.nlp.data.language_modeling.megatron.ict_dataset import ICTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bart_dataset import BARTDataset
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
from nemo.collections.nlp.data.language_modeling.megatron.ul2_dataset import UL2Dataset
dataset = None
if splits[index + 1] > splits[index]:
# Get the pointer to the original doc-idx so we can set it later.
if hasattr(indexed_dataset, 'get_doc_idx'):
doc_idx_ptr = indexed_dataset.get_doc_idx()
# Slice the doc-idx
start_index = splits[index]
# Add +1 so we can index into the dataset to get the upper bound.
end_index = splits[index + 1] + 1
# New doc_idx view.
if hasattr(indexed_dataset, 'set_doc_idx'):
indexed_dataset.set_doc_idx(doc_idx_ptr[start_index:end_index])
# Build the dataset accordingly.
kwargs = dict(
name=name,
data_prefix=data_prefix,
num_epochs=None,
max_num_samples=int(train_valid_test_num_samples[index]),
max_seq_length=max_seq_length,
seed=seed,
)
dataset = get_dataset(
indexed_dataset,
splits[index],
splits[index + 1],
cfg,
trainer,
int(train_valid_test_num_samples[index]),
masked_lm_prob,
short_seq_prob,
binary_head,
max_seq_length_dec,
dataset_type,
tokenizer,
max_ngram_size,
mean_ngram_size,
geometric_dist,
permutation,
whole_word_masking,
favor_long_ngrams,
delete_mask_prob,
respect_document_boundaries,
**kwargs,
)
# Set the original pointer so dataset remains the main dataset.
if hasattr(indexed_dataset, 'set_doc_idx'):
indexed_dataset.set_doc_idx(doc_idx_ptr)
# Checks.
if getattr(indexed_dataset, 'doc_idx', None) is not None:
assert indexed_dataset.doc_idx[0] == 0
assert indexed_dataset.doc_idx.shape[0] == (total_num_of_documents + 1)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup, data_impl_kwargs={}):
logging.info(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup, impl_kwargs=data_impl_kwargs)
if data_impl in ['text_mmap', 'csv_mmap']:
# make csv/text memmap compatible with Megatron sampling
make_indexed_dataset_compatibility(indexed_dataset)
assert indexed_dataset.sizes.shape[0] == indexed_dataset.doc_idx[-1]
logging.info(' > finished creating indexed dataset in {:4f} ' 'seconds'.format(time.time() - start_time))
logging.info(' > indexed dataset stats:')
logging.info(' number of documents: {}'.format(indexed_dataset.doc_idx.shape[0] - 1))
logging.info(' number of sentences: {}'.format(indexed_dataset.sizes.shape[0]))
return indexed_dataset
def get_samples_mapping(
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
name,
binary_head,
index_mapping_dir: str = None,
samples_mapping: Any = None,
):
"""Get a list that maps a sample index to a starting sentence index, end sentence index, and length"""
if not num_epochs:
if not max_num_samples:
raise ValueError("Need to specify either max_num_samples " "or num_epochs")
num_epochs = np.iinfo(np.int32).max - 1
if not max_num_samples:
max_num_samples = np.iinfo(np.int64).max - 1
# Filename of the index mapping
if index_mapping_dir is not None:
indexmap_filename = os.path.join(index_mapping_dir, os.path.basename(data_prefix))
else:
indexmap_filename = data_prefix
indexmap_filename += '_{}_indexmap'.format(name)
if num_epochs != (np.iinfo(np.int32).max - 1):
indexmap_filename += '_{}ep'.format(num_epochs)
if max_num_samples != (np.iinfo(np.int64).max - 1):
indexmap_filename += '_{}mns'.format(max_num_samples)
indexmap_filename += '_{}msl'.format(max_seq_length)
indexmap_filename += '_{:0.2f}ssp'.format(short_seq_prob)
indexmap_filename += '_{}s'.format(seed)
indexmap_filename += '.npy'
# Build the indexed mapping if not exist and not provided externally.
if samples_mapping is None and torch.distributed.get_rank() == 0 and not os.path.isfile(indexmap_filename):
# Fake index mapping if missing
if (getattr(indexed_dataset, 'doc_idx', None) is None) and (getattr(indexed_dataset, 'sizes', None) is None):
make_indexed_dataset_compatibility(indexed_dataset)
print(
' > WARNING: could not find index map file {}, building '
'the indices on rank 0 ...'.format(indexmap_filename)
)
# Make sure the types match the helpers input types.
assert indexed_dataset.doc_idx.dtype == np.int64
assert indexed_dataset.sizes.dtype == np.int32
# Build samples mapping
verbose = torch.distributed.get_rank() == 0
start_time = time.time()
logging.info(' > building samples index mapping for {} ...'.format(name))
# First compile and then import.
try:
if is_global_rank_zero():
compile_helper()
from nemo.collections.nlp.data.language_modeling.megatron import helpers
except ImportError:
raise ImportError(
f'Could not compile megatron dataset C++ helper functions and therefore cannot import helpers python file.'
)
samples_mapping = helpers.build_mapping(
indexed_dataset.doc_idx,
indexed_dataset.sizes,
num_epochs,
max_num_samples,
max_seq_length,
short_seq_prob,
seed,
verbose,
2 if binary_head else 1,
)
logging.info(' > done building samples index maping')
np.save(indexmap_filename, samples_mapping, allow_pickle=True)
logging.info(' > saved the index mapping in {}'.format(indexmap_filename))
# Make sure all the ranks have built the mapping
logging.info(
' > elasped time to build and save samples mapping ' '(seconds): {:4f}'.format(time.time() - start_time)
)
torch.distributed.barrier()
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=parallel_state.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=parallel_state.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size()
// torch.distributed.get_world_size(group=parallel_state.get_tensor_model_parallel_group())
)
# Load indexed dataset if not given externally.
if samples_mapping is None:
logging.info(' > loading indexed mapping from {}'.format(indexmap_filename))
start_time = time.time()
samples_mapping = np.load(indexmap_filename, allow_pickle=True, mmap_mode='r')
logging.info(' loaded indexed file in {:3.3f} seconds'.format(time.time() - start_time))
logging.info(' total number of samples: {}'.format(samples_mapping.shape[0]))
# Deallocate temporary numpy arrays that were created for `get_samples_mapping()` when needed
if hasattr(indexed_dataset, 'doc_idx') and hasattr(indexed_dataset, 'sizes'):
deallocate_indexed_dataset_memory(indexed_dataset)
return samples_mapping
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/dataset_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""UL2 Style dataset from https://arxiv.org/abs/2205.05131"""
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import create_extreme_masked_lm_predictions
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.lm_adapted_t5_dataset import T5LMAdaptedDataset
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
class UL2Dataset(T5Dataset):
""" UL2 Dataset from https://arxiv.org/abs/2205.05131.
Consists of three different objectives:
1. Short span masking with small probabilities (ex: T5). Typically max ngram size of 5 with 0.15 mask prob.
2. Extreme span masking with either large probabilities or large ngram sizes or both.
3. Prefx-LM as in the T5 or LM-adapted T5 (prompt-tuning paper).
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
max_seq_length_dec,
seed,
masked_lm_prob=0.15,
extreme_masked_lm_prob=0.5,
short_seq_prob=0.0,
min_ngram_size=2,
max_ngram_size=10,
mean_ngram_size=3,
extreme_max_ngram_size=128,
extreme_min_ngram_size=32,
extreme_mean_ngram_size=64,
prefix_lm_pivot_mean=0.25, # This is represented as a percentage of the total length.
ngram_span_length_distribution=LengthDistribution.geometric,
extreme_ngram_span_length_distribution=LengthDistribution.truncated_normal,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
respect_document_boundaries=True,
documents=None,
):
super().__init__(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
name=name,
indexed_dataset=indexed_dataset,
data_prefix=data_prefix,
num_epochs=num_epochs,
max_num_samples=max_num_samples,
max_seq_length=max_seq_length - 1, # -1 to account for the added mask type token
max_seq_length_dec=max_seq_length_dec,
seed=seed,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=None, # TODO: Determin if we want to actually pass mean ngram as an override to max here.
geometric_dist=ngram_span_length_distribution == LengthDistribution.geometric,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
respect_document_boundaries=respect_document_boundaries,
documents=documents,
)
self.mean_ngram_size = mean_ngram_size
self.min_ngram_size = min_ngram_size
self.extreme_masked_lm_prob = extreme_masked_lm_prob
self.extreme_min_ngram_size = extreme_min_ngram_size
self.extreme_max_ngram_size = extreme_max_ngram_size
self.extreme_mean_ngram_size = extreme_mean_ngram_size
self.ngram_span_length_distribution = ngram_span_length_distribution
self.extreme_ngram_span_length_distribution = extreme_ngram_span_length_distribution
self.prefix_lm_pivot_mean = prefix_lm_pivot_mean
@classmethod
def get_r_masking_training_sample(
cls,
sample,
tokenizer,
np_rng,
target_seq_length: int,
max_seq_length: int,
max_seq_length_dec: int,
masked_lm_prob: float,
vocab_id_list: list,
vocab_id_to_token_dict: dict,
max_ngram_size: int,
mean_ngram_size: int,
whole_word_masking: bool,
favor_long_ngrams: bool,
permutation: bool,
geometric_dist: bool,
tokenizer_type: str,
sentinel_tokens: list,
skip_masking_id: int,
):
# Call T5's build training sample for regular short span masking.
sample = T5Dataset.build_training_sample(
sample=sample,
target_seq_length=target_seq_length,
np_rng=np_rng,
max_seq_length=max_seq_length,
max_seq_length_dec=max_seq_length_dec,
masked_lm_prob=masked_lm_prob,
vocab_id_list=vocab_id_list,
vocab_id_to_token_dict=vocab_id_to_token_dict,
cls_id=tokenizer.cls_id,
sep_id=tokenizer.sep_id,
mask_id=tokenizer.mask_id,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
permutation=permutation,
geometric_dist=geometric_dist,
tokenizer_type=tokenizer_type,
sentinel_tokens=sentinel_tokens,
bos_id=tokenizer.bos_id,
eos_id=tokenizer.eos_id,
pad_id=tokenizer.pad_id,
skip_masking_id=skip_masking_id,
)
sample = UL2Dataset._prepend_mask_type_token(tokenizer, sample, '<extra_id_r>')
return sample
@classmethod
def get_s_masking_training_sample(
cls,
sample,
np_rng,
max_seq_length_encoder: int,
max_seq_length_decoder: int,
tokenizer: TokenizerSpec,
prefix_lm_pivot_mean: float,
pivot_distribution: LengthDistribution,
add_eos: bool = False,
):
sample = [token for sentence in sample for token in sentence]
sample = T5LMAdaptedDataset.get_prefix_lm_sample(
sample=sample,
max_seq_length_encoder=max_seq_length_encoder,
max_seq_length_decoder=max_seq_length_decoder, # We don't use max_seq_length_decoder here since we typically want to use long decoder sequences for better LM performance and we can do +1 because we don't need to add the UL2 token here.
np_rng=np_rng,
tokenizer=tokenizer,
pivot_mean=prefix_lm_pivot_mean,
pivot_distribution=pivot_distribution,
add_eos=add_eos,
)
sample = UL2Dataset._prepend_mask_type_token(tokenizer, sample, '<extra_id_s>')
return sample
@classmethod
def get_x_masking_training_sample(
cls,
sample,
tokenizer,
np_rng,
target_seq_length: int,
max_seq_length: int,
max_seq_length_dec: int,
masked_lm_prob: float,
extreme_masked_lm_prob: float,
max_ngram_size: int,
min_ngram_size: int,
mean_ngram_size: int,
extreme_max_ngram_size: int,
extreme_min_ngram_size: int,
extreme_mean_ngram_size: int,
extreme_ngram_span_length_distribution: LengthDistribution,
sentinel_tokens: list,
skip_masking_id: int,
):
sample = UL2Dataset.build_extreme_masking_training_sample(
sample=sample,
target_seq_length=target_seq_length,
np_rng=np_rng,
max_seq_length=max_seq_length,
max_seq_length_dec=max_seq_length_dec,
masked_lm_prob=masked_lm_prob,
extreme_masked_lm_prob=extreme_masked_lm_prob,
mask_id=tokenizer.mask_id,
max_ngram_size=max_ngram_size,
min_ngram_size=min_ngram_size,
extreme_max_ngram_size=extreme_max_ngram_size,
extreme_mean_ngram_size=extreme_mean_ngram_size,
extreme_min_ngram_size=extreme_min_ngram_size,
extreme_ngram_span_length_distribution=extreme_ngram_span_length_distribution,
mean_ngram_size=mean_ngram_size,
sentinel_tokens=sentinel_tokens,
bos_id=tokenizer.bos_id,
eos_id=tokenizer.eos_id,
pad_id=tokenizer.pad_id,
skip_masking_id=skip_masking_id,
)
sample = UL2Dataset._prepend_mask_type_token(tokenizer, sample, '<extra_id_x>')
return sample
def __getitem__(self, idx):
sample, seq_length = self._get_sample(idx)
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
masking_type = np_rng.randint(0, 3) # 0: short span masking, 1: extreme masking, 2: prefix-LM
if masking_type == 0:
# Call T5's build training sample for regular short span masking.
return UL2Dataset.get_r_masking_training_sample(
sample=sample,
tokenizer=self.tokenizer,
np_rng=np_rng,
target_seq_length=seq_length,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
masked_lm_prob=self.masked_lm_prob,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
max_ngram_size=self.max_ngram_size,
mean_ngram_size=self.mean_ngram_size,
whole_word_masking=self.whole_word_masking,
favor_long_ngrams=self.favor_long_ngrams,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
tokenizer_type=self.tokenizer_type,
sentinel_tokens=self.sentinel_tokens,
skip_masking_id=None,
)
elif masking_type == 1:
return UL2Dataset.get_x_masking_training_sample(
sample=sample,
tokenizer=self.tokenizer,
np_rng=np_rng,
target_seq_length=seq_length,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
masked_lm_prob=self.masked_lm_prob,
extreme_masked_lm_prob=self.extreme_masked_lm_prob,
max_ngram_size=self.max_ngram_size,
min_ngram_size=self.min_ngram_size,
mean_ngram_size=self.mean_ngram_size,
extreme_max_ngram_size=self.extreme_max_ngram_size,
extreme_min_ngram_size=self.extreme_min_ngram_size,
extreme_mean_ngram_size=self.extreme_mean_ngram_size,
extreme_ngram_span_length_distribution=self.extreme_ngram_span_length_distribution,
sentinel_tokens=self.sentinel_tokens,
skip_masking_id=None,
)
elif masking_type == 2:
return UL2Dataset.get_s_masking_training_sample(
sample=sample,
np_rng=np_rng,
max_seq_length_encoder=self.max_seq_length,
max_seq_length_decoder=self.max_seq_length_dec,
tokenizer=self.tokenizer,
prefix_lm_pivot_mean=self.prefix_lm_pivot_mean,
pivot_distribution=self.extreme_ngram_span_length_distribution,
)
@classmethod
def _prepend_mask_type_token(cls, tokenizer, sample, token):
token_id = tokenizer.text_to_ids(token)
assert len(token_id) == 1, token
token_id = token_id[0]
text_enc = np.concatenate([[token_id], sample['text_enc']])
sample['text_enc'] = text_enc
if 'enc_mask' in sample:
sample['enc_mask'] = np.concatenate([[1], sample['enc_mask']])
return sample
@classmethod
def build_extreme_masking_training_sample(
cls,
sample,
target_seq_length,
np_rng,
max_seq_length,
max_seq_length_dec,
masked_lm_prob,
extreme_masked_lm_prob,
mask_id,
max_ngram_size,
min_ngram_size,
mean_ngram_size,
extreme_max_ngram_size,
extreme_mean_ngram_size,
extreme_min_ngram_size,
extreme_ngram_span_length_distribution,
sentinel_tokens,
bos_id,
eos_id,
pad_id,
skip_masking_id=None,
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
bos_id: start of decoder example id
eos_id: end of generation id
sentinel_tokens: unique value to be substituted for every replaced span
tokenizer_type: wordpiece (BERT-style) or sentencepiece tokenizer. Used for whole word masking logic.
max_ngram_size: maximum size of ngrams to be masked.
mean_ngram_size: mean size of ngrams to be masked (only used if geometric_dist=True).
geometric_dist: Uses a geometric distribution to sample ngram size.
permutation: Permutes the ngrams.
whole_word_masking: Always masks entire words instead of individual sub-word tokens.
favor_long_ngrams: Favor longer ngrams over shorter ones.
skip_masking_id: id of the token to that will never be masked.
"""
assert target_seq_length <= max_seq_length
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
tokens = tokens[:max_num_tokens]
# Determine if we have a lot of masking or little masking. There are three cases:
# 1. Small masking prob, large spans.
# 2. Large masking prob, small spans.
# 3. Large masking prob, large spans.
task_type = np_rng.randint(0, 3)
if task_type == 0:
# Large spans, small masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
extreme_max_ngram_size,
extreme_mean_ngram_size,
extreme_min_ngram_size,
masked_lm_prob,
)
elif task_type == 1:
# Small spans, large masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
max_ngram_size,
mean_ngram_size,
min_ngram_size,
extreme_masked_lm_prob,
)
else:
# Large spans, large masking prob
max_ngram_size, mean_ngram_size, min_ngram_size, masked_lm_prob = (
extreme_max_ngram_size,
extreme_mean_ngram_size,
extreme_mean_ngram_size,
extreme_masked_lm_prob,
)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
lm_pred = create_extreme_masked_lm_predictions(
tokens=tokens,
masked_lm_prob=masked_lm_prob,
mask_id=mask_id,
max_predictions_per_seq=max_predictions_per_seq,
np_rng=np_rng,
max_ngram_size=max_ngram_size,
min_ngram_size=min_ngram_size,
mean_ngram_size=mean_ngram_size,
span_length_distribution=extreme_ngram_span_length_distribution,
skip_masking_id=skip_masking_id,
)
if masked_lm_prob == 0:
(output_tokens, masked_positions, masked_labels) = lm_pred
masked_spans = None
else:
(output_tokens, masked_positions, masked_labels, masked_spans) = lm_pred
# Padding.
tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask = T5Dataset.pad_and_convert_to_numpy(
output_tokens=output_tokens,
masked_positions=masked_positions,
masked_labels=masked_labels,
masked_spans=masked_spans,
sentinel_tokens=sentinel_tokens,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
max_seq_length=max_seq_length,
max_seq_length_dec=max_seq_length_dec,
)
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/ul2_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
class LengthDistribution(enum.Enum):
uniform = 1
geometric = 2
truncated_normal = 3
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/length_distribution_type.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
def get_datasets_weights_and_num_samples(data_prefix, num_samples):
# The data prefix should be in the format of:
# weight-1, data-prefix-1, weight-2, data-prefix-2, ..
assert len(data_prefix) % 2 == 0
num_datasets = len(data_prefix) // 2
weights = [0] * num_datasets
prefixes = [0] * num_datasets
for i in range(num_datasets):
weights[i] = float(data_prefix[2 * i])
prefixes[i] = (data_prefix[2 * i + 1]).strip()
# Normalize weights
weight_sum = 0.0
for weight in weights:
weight_sum += weight
assert weight_sum > 0.0
weights = [weight / weight_sum for weight in weights]
# Add 0.5% (the 1.005 factor) so in case the bleding dataset does
# not uniformly distribute the number of samples, we still have
# samples left to feed to the network.
# TODO: check data leakage between train/val/test?
datasets_train_valid_test_num_samples = []
for weight in weights:
# Comes here when we have seperate train,test and validation datasets.
if isinstance(num_samples, int):
datasets_train_valid_test_num_samples.append(int(math.ceil(num_samples * weight * 1.005)))
else:
datasets_train_valid_test_num_samples.append([int(math.ceil(val * weight * 1.005)) for val in num_samples])
return prefixes, weights, datasets_train_valid_test_num_samples
def get_train_valid_test_split_(splits_string, size):
""" Get dataset splits from comma or '/' separated string list."""
splits = []
if splits_string.find(',') != -1:
splits = [float(s) for s in splits_string.split(',')]
elif splits_string.find('/') != -1:
splits = [float(s) for s in splits_string.split('/')]
else:
splits = [float(splits_string)]
if len(splits) != 3:
raise ValueError(f"Invalid splits string: {splits_string}. Expected 3 comma separated values.")
while len(splits) < 3:
splits.append(0.0)
splits = splits[:3]
splits_sum = sum(splits)
assert splits_sum > 0.0
splits = [split / splits_sum for split in splits]
splits_index = [0]
for index, split in enumerate(splits):
splits_index.append(splits_index[index] + int(round(split * float(size))))
diff = splits_index[-1] - size
for index in range(1, len(splits_index)):
splits_index[index] -= diff
assert len(splits_index) == 4
assert splits_index[-1] == size
return splits_index
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/base_dataset_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T5 Style dataset."""
import collections
import os
import numpy as np
import torch
from nemo.collections.common.tokenizers import SentencePieceTokenizer, YouTokenToMeTokenizer
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import (
create_masked_lm_predictions,
get_samples_mapping,
)
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import _build_index_mappings
from nemo.core import Dataset
class T5Dataset(Dataset):
# account for added tokens
MAX_SEQ_LENGTH_DELTA = 2
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
max_seq_length_dec,
seed,
masked_lm_prob=0.15,
short_seq_prob=0.1,
max_ngram_size=10,
mean_ngram_size=None,
geometric_dist=True,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
respect_document_boundaries=True,
documents=None,
):
super().__init__()
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.max_seq_length_dec = max_seq_length_dec
self.short_seq_prob = short_seq_prob
self.max_ngram_size = max_ngram_size
self.mean_ngram_size = mean_ngram_size
self.geometric_dist = geometric_dist
self.permutation = permutation
self.whole_word_masking = whole_word_masking
self.favor_long_ngrams = favor_long_ngrams
self.respect_document_boundaries = respect_document_boundaries
# Dataset.
self.indexed_dataset = indexed_dataset
# save index mappings to a configurable dir
self.index_mapping_dir = cfg.data.get('index_mapping_dir', None)
# create index_mapping_dir on rank 0
if torch.distributed.is_available() and torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.index_mapping_dir is not None and not os.path.isdir(self.index_mapping_dir):
os.makedirs(self.index_mapping_dir)
torch.distributed.barrier()
# Build the samples mapping.
if not respect_document_boundaries:
# Build index mappings.
assert documents is not None
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
name=self.name,
data_prefix=data_prefix,
documents=documents,
sizes=self.indexed_dataset.sizes,
num_samples=max_num_samples,
seq_length=self.max_seq_length - self.MAX_SEQ_LENGTH_DELTA,
seed=self.seed,
index_mapping_dir=self.index_mapping_dir,
)
else:
self.samples_mapping = get_samples_mapping(
indexed_dataset=self.indexed_dataset,
data_prefix=data_prefix,
num_epochs=num_epochs,
max_num_samples=max_num_samples,
max_seq_length=self.max_seq_length - self.MAX_SEQ_LENGTH_DELTA, # account for added tokens
short_seq_prob=self.short_seq_prob,
seed=self.seed,
name=self.name,
binary_head=False,
index_mapping_dir=self.index_mapping_dir,
)
self.tokenizer = tokenizer
self.tokenizer_type = T5Dataset._determine_tokenizer_type(tokenizer, whole_word_masking=whole_word_masking)
self.cls_id = tokenizer.cls_id
self.sep_id = tokenizer.sep_id
self.mask_id = tokenizer.mask_id
self.pad_id = tokenizer.pad_id
self.bos_id = tokenizer.bos_id
self.eos_id = tokenizer.eos_id
self.vocab_id_list = self.tokenizer.vocab
self.vocab_id_to_token_dict = {idx: token for idx, token in enumerate(self.vocab_id_list)}
self._build()
def _build(self):
"""
Class-specific build method to be overridden by child classes.
"""
self.sentinel_tokens = self.tokenizer.additional_special_tokens_ids
assert len(self.sentinel_tokens) > 0
def __len__(self):
if self.respect_document_boundaries:
return self.samples_mapping.shape[0]
else:
return self.sample_idx.shape[0] - 1
def _get_sample(self, idx):
if self.respect_document_boundaries:
start_index, end_index, seq_length = self.samples_mapping[idx]
sample = []
for index in range(start_index, end_index):
sample.append(self.indexed_dataset[index])
else:
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1
)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1))
sample = np.concatenate(sample_list)
sample.astype(np.int64)
seq_length = len(sample)
sample = [sample]
return sample, seq_length
def __getitem__(self, idx):
sample, seq_length = self._get_sample(idx)
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
np_rng = np.random.RandomState(seed=(self.seed + idx))
training_sample = T5Dataset.build_training_sample(
sample=sample,
target_seq_length=seq_length,
np_rng=np_rng,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
masked_lm_prob=self.masked_lm_prob,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
cls_id=self.cls_id,
sep_id=self.sep_id,
mask_id=self.mask_id,
max_ngram_size=self.max_ngram_size,
mean_ngram_size=self.mean_ngram_size,
whole_word_masking=self.whole_word_masking,
favor_long_ngrams=self.favor_long_ngrams,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
tokenizer_type=self.tokenizer_type,
sentinel_tokens=self.sentinel_tokens,
bos_id=self.bos_id,
eos_id=self.eos_id,
pad_id=self.pad_id,
)
return training_sample
@classmethod
def _determine_tokenizer_type(cls, tokenizer, whole_word_masking=False):
tokenizer_type = 'wordpiece' # TODO: better checks for tokenizer types. How do we do this for HF tokenizers that are not BERT?
if isinstance(tokenizer, YouTokenToMeTokenizer):
raise ValueError(f"YTTM does not support special tokens and cannot be used with T5 datasets.")
if isinstance(tokenizer, SentencePieceTokenizer):
if not tokenizer.legacy:
raise ValueError("Sentencepiece Tokenizer must have legacy = False to add special tokens.")
tokenizer_type = 'sentencepiece'
if whole_word_masking:
raise ValueError(
"Whole word masking is not supported with sentencepiece tokenizers and only with wordpiece tokenizers. Please set it to False."
)
return tokenizer_type
@classmethod
def build_training_sample(
cls,
sample,
target_seq_length,
np_rng,
max_seq_length,
max_seq_length_dec,
masked_lm_prob,
vocab_id_list,
vocab_id_to_token_dict,
cls_id,
sep_id,
mask_id,
max_ngram_size,
whole_word_masking,
favor_long_ngrams,
permutation,
mean_ngram_size,
geometric_dist,
tokenizer_type,
sentinel_tokens,
bos_id,
eos_id,
pad_id,
skip_masking_id=None,
):
"""Build training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
bos_id: start of decoder example id
eos_id: end of generation id
sentinel_tokens: unique value to be substituted for every replaced span
tokenizer_type: wordpiece (BERT-style) or sentencepiece tokenizer. Used for whole word masking logic.
max_ngram_size: maximum size of ngrams to be masked.
mean_ngram_size: mean size of ngrams to be masked (only used if geometric_dist=True).
geometric_dist: Uses a geometric distribution to sample ngram size.
permutation: Permutes the ngrams.
whole_word_masking: Always masks entire words instead of individual sub-word tokens.
favor_long_ngrams: Favor longer ngrams over shorter ones.
skip_masking_id: An id that will not be masked. TODO: Add supported for a list of IDs.
"""
assert target_seq_length <= max_seq_length
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
tokens = tokens[:max_num_tokens]
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
lm_pred = create_masked_lm_predictions(
tokens=tokens,
vocab_id_list=vocab_id_list,
vocab_id_to_token_dict=vocab_id_to_token_dict,
masked_lm_prob=masked_lm_prob,
cls_id=cls_id,
sep_id=sep_id,
mask_id=mask_id,
max_predictions_per_seq=max_predictions_per_seq,
np_rng=np_rng,
max_ngram_size=max_ngram_size,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
mean_ngram_size=mean_ngram_size,
permutation=permutation,
geometric_dist=geometric_dist,
masking_style="t5",
tokenizer_type=tokenizer_type,
skip_masking_id=skip_masking_id,
)
if masked_lm_prob == 0:
(output_tokens, masked_positions, masked_labels, _) = lm_pred
masked_spans = None
else:
(output_tokens, masked_positions, masked_labels, _, masked_spans) = lm_pred
# Padding.
tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask = T5Dataset.pad_and_convert_to_numpy(
output_tokens=output_tokens,
masked_positions=masked_positions,
masked_labels=masked_labels,
masked_spans=masked_spans,
sentinel_tokens=sentinel_tokens,
bos_id=bos_id,
eos_id=eos_id,
pad_id=pad_id,
max_seq_length=max_seq_length,
max_seq_length_dec=max_seq_length_dec,
)
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
@classmethod
def pad_and_convert_to_numpy(
cls,
output_tokens,
masked_positions,
masked_labels,
sentinel_tokens,
bos_id,
eos_id,
pad_id,
max_seq_length,
max_seq_length_dec,
masked_spans=None,
):
"""Pad sequences and convert them to numpy."""
sentinel_tokens = collections.deque(sentinel_tokens)
t5_input = []
(t5_decoder_in, t5_decoder_out) = ([bos_id], [])
(start_index, end_index) = (0, None)
if masked_spans is not None:
for span in masked_spans:
flag = sentinel_tokens.popleft()
# Append the same tokens in decoder input and output
t5_decoder_in.append(flag)
t5_decoder_in.extend(span.label)
t5_decoder_out.append(flag)
t5_decoder_out.extend(span.label)
end_index = span.index[0]
t5_input.extend(output_tokens[start_index:end_index])
t5_input.append(flag)
# the next start index is the token after the last span token
start_index = span.index[-1] + 1
# Add <eos> token to the t5_decoder_out
t5_decoder_out.append(eos_id)
# Add the remaining tokens to the t5 input
t5_input.extend(output_tokens[start_index:])
# assert (len(t5_input) - len(masked_spans)) + \
# (len(t5_decoder_in) - (len(masked_spans) + 1)) == len(tokens)
# Some checks.
# Encoder-side padding mask.
num_tokens = len(t5_input)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0, padding_length
assert len(masked_positions) == len(masked_labels)
# Tokens..
filler = [pad_id] * padding_length
tokens_enc = np.array(t5_input + filler, dtype=np.int64)
# Decoder-side padding mask.
num_tokens_dec = len(t5_decoder_in)
padding_length_dec = max_seq_length_dec - num_tokens_dec
assert padding_length_dec >= 0, (padding_length_dec, max_seq_length_dec, num_tokens_dec)
filler_dec = [pad_id] * padding_length_dec
tokens_dec_in = np.array(t5_decoder_in + filler_dec, dtype=np.int64)
# Create attention masks
enc_mask = (tokens_enc != pad_id).astype(np.int64)
dec_mask = (tokens_dec_in != pad_id).astype(np.int64)
# Labels mask.
labels = t5_decoder_out + ([pad_id] * padding_length_dec)
labels = np.array(labels, dtype=np.int64)
# Loss mask
loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec)
loss_mask = np.array(loss_mask, dtype=np.int64)
return tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask
class MockT5Dataset(Dataset):
def __init__(
self, cfg, tokenizer, name, num_samples, max_seq_length, max_seq_length_dec, seed,
):
super().__init__()
self.name = name
self.max_seq_length = max_seq_length
self.max_seq_length_dec = max_seq_length_dec
self.vocab_size = tokenizer.vocab_size
self.length = num_samples
self.seed = seed
def __len__(self):
return self.length
def _get_sample(self, idx):
np_gen = np.random.default_rng(seed=(self.seed + idx))
sample = np_gen.integers(self.vocab_size, size=[self.max_seq_length], dtype=np.int64)
return [sample], self.max_seq_length
def __getitem__(self, idx):
# Generate output values randomly with the expected size and datatype
np_gen = np.random.default_rng(seed=(self.seed + idx))
tokens_enc = np_gen.integers(self.vocab_size, size=[self.max_seq_length], dtype=np.int64)
tokens_dec_in = np_gen.integers(self.vocab_size, size=[self.max_seq_length_dec], dtype=np.int64)
labels = np_gen.integers(self.vocab_size, size=[self.max_seq_length_dec], dtype=np.int64)
enc_mask = np.ones(shape=[self.max_seq_length], dtype=np.int64)
dec_mask = np.ones(shape=[self.max_seq_length_dec], dtype=np.int64)
loss_mask = np.ones(shape=[self.max_seq_length_dec], dtype=np.int64)
training_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return training_sample
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/t5_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import torch
from tqdm.auto import tqdm
from nemo.collections.nlp.data.language_modeling.megatron.base_prompt_learning_dataset import BasePromptLearningDataset
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import T5Sentinel
from nemo.collections.nlp.modules.common import VirtualPromptSource
from nemo.collections.nlp.modules.common.megatron.utils import build_position_ids
from nemo.utils import logging
__all__ = ['T5PromptLearningDataset']
class T5PromptLearningDataset(BasePromptLearningDataset):
"""
The dataset class for prompt-tuning or p-tuning pretrained T5 models.
"""
def __init__(
self,
datasets,
tokenizer,
virtual_prompt_source: VirtualPromptSource,
task_templates: dict,
pseudo_tokens,
pad_token_id: str,
max_seq_length: int,
min_seq_length: int = 1,
add_bos: bool = False,
add_eos: bool = True,
for_train: bool = True,
decoder_starts_with_pad: bool = False,
add_eos_to_decoder_output: bool = True,
add_sentinel_to_input: bool = True,
ul2_prompt_token: str = None,
):
# These two variables need to be set before calling super().__init__() because the parent class calls `load_data()` which requires these attributes.
self.decoder_starts_with_pad = decoder_starts_with_pad
self.add_eos_to_decoder_output = add_eos_to_decoder_output
self.add_sentinel_to_input = add_sentinel_to_input
self.ul2_prompt_token = ul2_prompt_token
super().__init__(
datasets=datasets,
tokenizer=tokenizer,
virtual_prompt_source=virtual_prompt_source,
task_templates=task_templates,
pseudo_tokens=pseudo_tokens,
pad_token_id=pad_token_id,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
add_bos=add_bos,
add_eos=add_eos,
for_train=for_train,
)
def load_data(self, dataset):
"""
Loads a dataset by filling in the task templates specified in the config file
with the information from each training/inference example. Converts all input
text into token ids. Also replaces the <|VIRTUAL_PROMPT_#|> placeholders in
the task templates with the actual virtual prompt token ids.
params:
dataset: A list of json objects or a dictionary objects each
containing the information needed for a training example
"""
skipped = 0
for json_line in tqdm(dataset):
# Read example dict or load the information for a single example from .json file
if type(json_line) == dict:
doc = json_line
else:
doc = json.loads(json_line)
taskname = doc["taskname"]
prompt_template = self.task_templates[taskname]["prompt_template"]
prompt_template_fields = self.task_templates[taskname]["prompt_template_fields"]
total_virtual_tokens = self.task_templates[taskname]["total_virtual_tokens"]
virtual_token_splits = self.task_templates[taskname]["virtual_token_splits"]
truncation_field = self.task_templates[taskname]['truncate_field']
answer_field = self.task_templates[taskname]["answer_field"]
input_example = prompt_template
self._input_sanity_checks(
total_virtual_tokens=total_virtual_tokens,
virtual_token_splits=virtual_token_splits,
prompt_template=prompt_template,
prompt_template_fields=prompt_template_fields,
truncation_field=truncation_field,
answer_field=answer_field,
doc=doc,
)
# Format the input example according to the template
input_example = self._insert_text_in_template(input_example, prompt_template_fields, doc, answer_field)
input_example = self._insert_virtual_token_placeholders(input_example, virtual_token_splits)
# a trick to align with the data format in t5 pretraining
input_ids = self.tokenizer.text_to_ids(input_example)
if self.add_sentinel_to_input:
input_ids = input_ids + self.tokenizer.text_to_ids(T5Sentinel.FIRST.value)
# Add BOS/EOS to the input of encoder if desired, adds EOS by default
if self.ul2_prompt_token is not None:
ul2_prompt_token_id = self.tokenizer.text_to_ids(self.ul2_prompt_token)
assert len(ul2_prompt_token_id) == 1
input_ids = ul2_prompt_token_id + input_ids
if self.add_bos:
input_ids = [self.tokenizer.bos_id] + input_ids
if self.add_eos:
input_ids = input_ids + [self.tokenizer.eos_id]
# Try to truncate input text to fit into the max sequence length
if len(input_ids) > self.max_seq_length:
input_ids = self._truncate_input(truncation_field, input_ids, taskname, doc, total_virtual_tokens)
# get answer ids
if answer_field in doc.keys(): # training and validation
answer_text = doc[answer_field]
if self.decoder_starts_with_pad:
answer_text_ids = [self.tokenizer.pad_id]
else:
answer_text_ids = [self.tokenizer.bos_id]
# a trick to align with the data format in t5 pretraining
if self.add_sentinel_to_input:
answer_text_ids += self.tokenizer.text_to_ids(T5Sentinel.FIRST.value)
answer_text_ids += self.tokenizer.text_to_ids(answer_text)
if self.add_eos_to_decoder_output:
answer_text_ids += [self.tokenizer.eos_id]
else:
answer_text_ids += self.tokenizer.text_to_ids(T5Sentinel.END.value)
# Skip example if the final length doesn't fit length requirements even after truncation
if self.min_seq_length <= len(input_ids) <= self.max_seq_length:
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
taskname_id = self.tokenizer.text_to_ids(taskname)
elif (
self.virtual_prompt_source == VirtualPromptSource.NO_PROMPT
): # TODO (@adithyare) this class and GPTPromptLearningDataset should be merged.
taskname_id = -1
else:
raise ValueError("Invalid virtual prompt source specified")
dec_input = None
dec_labels = None
if answer_field in doc.keys(): # training and validation
dec_input = answer_text_ids[:-1]
dec_labels = answer_text_ids[1:]
self.examples.append((taskname_id, input_ids, dec_input, dec_labels))
else:
skipped += 1
logging.info(f'Skipped {skipped} sentences, sequence length too short or too long even after truncation')
def _insert_text_in_template(self, input_example, prompt_template_fields, doc, answer_field):
""" Format the input example according to the template """
for field in prompt_template_fields:
# discard the last one, {label} / {answer}
# Or if some fields from the template aren't present, e.g. {answer} during inference
# just remove that field from the template, leaving the space blank
if field == answer_field or field not in doc.keys():
input_example = input_example.replace('{' + field + '}', "")
else:
field_text = doc[field]
input_example = input_example.replace('{' + field + '}', field_text)
return input_example.strip(" ")
def collate_fn(self, batch):
""" Prepares enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids for global batch """
taskname_ids, enc_input, dec_input, dec_labels = zip(*batch)
taskname_ids = self.pad_taskname_ids(taskname_ids)
enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask = self.pad_batch_and_build_loss_mask(
enc_input, dec_input, dec_labels
)
position_ids = build_position_ids(enc_input).contiguous()
return enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask, position_ids, taskname_ids
def pad_batch_and_build_loss_mask(self, enc_input, dec_input, dec_labels):
""" Pad enc_input, dec_input, labels in batch to max batch length while building loss_mask, enc_mask, and dec_mask """
# have labels (during training and validation)
if dec_input[0] and dec_labels[0]:
max_dec_input_length = max([len(item) for item in dec_input]) if dec_input[0] else 0
max_label_length = max([len(item) for item in dec_labels]) if dec_labels[0] else 0
loss_mask = [([1] * (len(item))) + ([0] * (max_label_length - len(item))) for item in dec_labels]
dec_input = [item + [self.tokenizer.pad_id] * (max_dec_input_length - len(item)) for item in dec_input]
labels = [item + [self.tokenizer.pad_id] * (max_label_length - len(item)) for item in dec_labels]
dec_input = torch.LongTensor(dec_input).contiguous()
labels = torch.LongTensor(labels).contiguous()
loss_mask = torch.LongTensor(loss_mask).contiguous()
dec_mask = (dec_input != self.tokenizer.pad_id).long().contiguous()
# during inference
else:
dec_input, labels, loss_mask, dec_mask = None, None, None, None
# for all training, validation, and inference
max_enc_query_length = max([len(item) for item in enc_input]) if enc_input[0] else 0
enc_input = [item + [self.tokenizer.pad_id] * (max_enc_query_length - len(item)) for item in enc_input]
enc_input = torch.LongTensor(enc_input).contiguous()
enc_mask = (enc_input != self.tokenizer.pad_id).long().contiguous()
return enc_input, dec_input, labels, loss_mask, enc_mask, dec_mask
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/t5_prompt_learning_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Most of the code here has been copied from:
# fairseq/fairseq/data/indexed_dataset.py
# with some modifications:
# Removed IndexedRawTextDataset since it relied on Fairseq dictionary
# other slight modifications to remove fairseq dependencies
# Added document index to index file and made it accessible.
# An empty sentence no longer separates documents.
import os
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
MMapRetrievalIndexedDataset,
MMapRetrievalIndexedDatasetBuilder,
)
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import CSVMemMapDataset, TextMemMapDataset
from nemo.utils import logging
def __best_fitting_dtype(vocab_size=None):
if vocab_size is not None and vocab_size < 65500:
return np.uint16
else:
return np.int32
def get_available_dataset_impl():
return ['lazy', 'cached', 'mmap', "retmmap"]
def infer_dataset_impl(path):
if IndexedDataset.exists(path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
if magic == IndexedDataset._HDR_MAGIC:
return 'cached'
elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]:
return 'mmap'
elif magic == MMapRetrievalIndexedDataset.Index._HDR_MAGIC[:8]:
return 'retmmap'
else:
return None
else:
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
def make_builder(out_file, impl, vocab_size=None, chunk_size=64, pad_id=0, retrieval_db=False, stride=64):
if impl == 'mmap':
return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size))
elif impl == 'retmmap':
return MMapRetrievalIndexedDatasetBuilder(
out_file,
chunk_size=chunk_size,
pad_id=pad_id,
retrieval_db=retrieval_db,
dtype=__best_fitting_dtype(vocab_size),
stride=stride,
)
else:
return IndexedDatasetBuilder(out_file)
def make_indexed_dataset_compatibility(ds):
"""Make any dataset compatible with IndexedDataset for Megatron samples mapping."""
if (getattr(ds, 'doc_idx', None) is not None) or (getattr(ds, 'sizes', None) is not None):
raise AttributeError("Dataset already has doc_idx or sizes attributes.")
ds.doc_idx = np.arange(len(ds) + 1, dtype=np.int64)
ds.sizes = np.ones(len(ds), dtype=np.int32)
return ds
def deallocate_indexed_dataset_memory(indexed_dataset):
"""Deallocate memory of an IndexedDataset."""
if isinstance(indexed_dataset, MMapIndexedDataset):
# for MMapIndexedDataset we cannot release any memory of sizes
indexed_dataset._index._doc_idx = None
else:
indexed_dataset.sizes = None
indexed_dataset.doc_idx = None
def make_dataset(path, impl, skip_warmup=False, impl_kwargs={}, delay_data_mmap=False):
# first handle text memap
if impl == 'text_mmap':
return TextMemMapDataset(path, **impl_kwargs)
elif impl == 'csv_mmap':
return CSVMemMapDataset(path, **impl_kwargs)
# now handle bin memap
if not IndexedDataset.exists(path):
print(f"Dataset does not exist: {path}")
print("Path should be a basename that both .idx and .bin can be appended to get full filenames.")
return None
if impl == 'infer':
impl = infer_dataset_impl(path)
if impl == 'lazy' and IndexedDataset.exists(path):
return IndexedDataset(path)
elif impl == 'cached' and IndexedDataset.exists(path):
return IndexedCachedDataset(path)
elif impl == 'mmap' and MMapIndexedDataset.exists(path):
return MMapIndexedDataset(path, skip_warmup, delay_data_mmap)
elif impl == 'retmmap':
return MMapRetrievalIndexedDataset(path, skip_warmup)
raise ValueError(f"Unknown dataset implementation: {impl}")
def dataset_exists(path, impl):
if impl == 'mmap':
return MMapIndexedDataset.exists(path)
elif impl == 'retmmap':
return MMapRetrievalIndexedDataset.exists(path)
else:
return IndexedDataset.exists(path)
def read_longs(f, n):
a = np.empty(n, dtype=np.int64)
f.readinto(a)
return a
def write_longs(f, a):
f.write(np.array(a, dtype=np.int64))
dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float64, 7: np.double, 8: np.uint16}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def create_doc_idx(sizes):
doc_idx = [0]
for i, s in enumerate(sizes):
if s == 0:
doc_idx.append(i + 1)
return doc_idx
class IndexedDataset(torch.utils.data.Dataset):
"""Loader for IndexedDataset"""
_HDR_MAGIC = b'TNTIDX\x00\x00'
def __init__(self, path):
super().__init__()
self.path = path
self.data_file = None
self.read_index(path)
def read_index(self, path):
with open(index_file_path(path), 'rb') as f:
magic = f.read(8)
assert magic == self._HDR_MAGIC, (
'Index file doesn\'t match expected format. ' 'Make sure that --dataset-impl is configured properly.'
)
version = f.read(8)
assert struct.unpack('<Q', version) == (1,)
code, self.element_size = struct.unpack('<QQ', f.read(16))
self.dtype = dtypes[code]
self._len, self.s = struct.unpack('<QQ', f.read(16))
self.doc_count = struct.unpack('<Q', f.read(8))
self.dim_offsets = read_longs(f, self._len + 1)
self.data_offsets = read_longs(f, self._len + 1)
self.sizes = read_longs(f, self.s)
self.doc_idx = read_longs(f, self.doc_count)
def read_data(self, path):
self.data_file = open(data_file_path(path), 'rb', buffering=0)
def check_index(self, i):
if i < 0 or i >= self._len:
raise IndexError('index out of range')
def __del__(self):
if self.data_file:
self.data_file.close()
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if not self.data_file:
self.read_data(self.path)
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
return a
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
sizes = self.sizes[self.dim_offsets[start] : self.dim_offsets[stop]]
size = sum(sizes)
a = np.empty(size, dtype=self.dtype)
self.data_file.seek(self.data_offsets[start] * self.element_size)
self.data_file.readinto(a)
offsets = list(accumulate(sizes))
sents = np.split(a, offsets[:-1])
return sents
def __len__(self):
return self._len
def num_tokens(self, index):
return self.sizes[index]
def size(self, index):
return self.sizes[index]
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
@property
def supports_prefetch(self):
return False # avoid prefetching to save memory
class IndexedCachedDataset(IndexedDataset):
def __init__(self, path):
super().__init__(path)
self.cache = None
self.cache_index = {}
@property
def supports_prefetch(self):
return True
def prefetch(self, indices):
if all(i in self.cache_index for i in indices):
return
if not self.data_file:
self.read_data(self.path)
indices = sorted(set(indices))
total_size = 0
for i in indices:
total_size += self.data_offsets[i + 1] - self.data_offsets[i]
self.cache = np.empty(total_size, dtype=self.dtype)
ptx = 0
self.cache_index.clear()
for i in indices:
self.cache_index[i] = ptx
size = self.data_offsets[i + 1] - self.data_offsets[i]
a = self.cache[ptx : ptx + size]
self.data_file.seek(self.data_offsets[i] * self.element_size)
self.data_file.readinto(a)
ptx += size
if self.data_file:
# close and delete data file after prefetch so we can pickle
self.data_file.close()
self.data_file = None
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
i = idx
self.check_index(i)
tensor_size = self.sizes[self.dim_offsets[i] : self.dim_offsets[i + 1]]
a = np.empty(tensor_size, dtype=self.dtype)
ptx = self.cache_index[i]
np.copyto(a, self.cache[ptx : ptx + a.size])
return a
elif isinstance(idx, slice):
# Hack just to make this work, can optimizer later if necessary
sents = []
for i in range(*idx.indices(len(self))):
sents.append(self[i])
return sents
class IndexedDatasetBuilder(object):
element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float64: 4, np.double: 8}
def __init__(self, out_file, dtype=np.int32):
self.out_file = open(out_file, 'wb')
self.dtype = dtype
self.data_offsets = [0]
self.dim_offsets = [0]
self.sizes = []
self.element_size = self.element_sizes[self.dtype]
self.doc_idx = [0]
def add_item(self, tensor):
bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype))
self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size)
for s in tensor.size():
self.sizes.append(s)
self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size()))
def end_document(self):
self.doc_idx.append(len(self.sizes))
def merge_file_(self, another_file):
index = IndexedDataset(another_file)
assert index.dtype == self.dtype
begin = self.data_offsets[-1]
for offset in index.data_offsets[1:]:
self.data_offsets.append(begin + offset)
self.sizes.extend(index.sizes)
begin = self.dim_offsets[-1]
for dim_offset in index.dim_offsets[1:]:
self.dim_offsets.append(begin + dim_offset)
with open(data_file_path(another_file), 'rb') as f:
while True:
data = f.read(1024)
if data:
self.out_file.write(data)
else:
break
def finalize(self, index_file):
self.out_file.close()
index = open(index_file, 'wb')
index.write(b'TNTIDX\x00\x00')
index.write(struct.pack('<Q', 1))
index.write(struct.pack('<QQ', code(self.dtype), self.element_size))
index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes)))
index.write(struct.pack('<Q', len(self.doc_idx)))
write_longs(index, self.dim_offsets)
write_longs(index, self.data_offsets)
write_longs(index, self.sizes)
write_longs(index, self.doc_idx)
index.close()
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class MMapIndexedDataset(torch.utils.data.Dataset):
class Index(object):
_HDR_MAGIC = b'MMIDIDX\x00\x00'
@classmethod
def writer(cls, path, dtype):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<B', code(dtype)))
return self
@staticmethod
def _get_pointers(sizes):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
return pointers
def write(self, sizes, doc_idx):
pointers = self._get_pointers(sizes)
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', len(doc_idx)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
doc_idx = np.array(doc_idx, dtype=np.int64)
self._file.write(doc_idx.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=False):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
(dtype_code,) = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self._doc_count = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
logging.info(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
logging.info(" reading sizes...")
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
logging.info(" reading pointers...")
self._pointers = np.frombuffer(
self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes
)
logging.info(" reading document index...")
self._doc_idx = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._doc_count,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
return self._dtype
@property
def sizes(self):
return self._sizes
@property
def doc_idx(self):
return self._doc_idx
@lru_cache(maxsize=8)
def __getitem__(self, i):
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=False, delay_data_mmap=False):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._delay_data_mmap = delay_data_mmap
self._skip_warmup = skip_warmup
self._do_init(path, skip_warmup, delay_data_mmap)
def __getstate__(self):
return self._path
def __setstate__(self, state):
self._do_init(state)
def _do_init(self, path, skip_warmup=True, delay_data_mmap=False):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not delay_data_mmap:
self._create_data_mmap(skip_warmup)
else:
logging.info(" skip creating data numpy buffer of mmap...")
self._bin_buffer_mmap = None
self._bin_buffer = None
def _create_data_mmap(self, skip_warmup):
if not skip_warmup:
logging.info(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
logging.info(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
logging.info(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
if self._bin_buffer_mmap is not None:
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
if isinstance(idx, int):
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
sizes = self._index._sizes[idx]
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
return np_array
def create_data_mmap(self):
self._create_data_mmap(self._skip_warmup)
@property
def sizes(self):
return self._index.sizes
@property
def doc_idx(self):
return self._index.doc_idx
def get_doc_idx(self):
return self._index._doc_idx
def set_doc_idx(self, doc_idx_):
self._index._doc_idx = doc_idx_
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
class MMapIndexedDatasetBuilder(object):
def __init__(self, out_file, dtype=np.int64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self._sizes = []
self._doc_idx = [0]
def add_item(self, tensor):
np_array = np.array(tensor.numpy(), dtype=self._dtype)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(np_array.size)
def end_document(self):
self._doc_idx.append(len(self._sizes))
def merge_file_(self, another_file):
# Concatenate index
index = MMapIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
self._data_file.close()
with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index:
index.write(self._sizes, self._doc_idx)
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/indexed_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BART Style dataset."""
import numpy as np
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import (
create_masked_lm_predictions,
get_samples_mapping,
)
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
class BARTDataset(T5Dataset):
# account for added tokens
MAX_SEQ_LENGTH_DELTA = 2
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
max_seq_length,
seed,
masked_lm_prob=0.15,
short_seq_prob=0.1,
max_ngram_size=10,
mean_ngram_size=None,
geometric_dist=True,
permutation=False,
whole_word_masking=True,
favor_long_ngrams=False,
delete_mask_prob=0,
respect_document_boundaries=True,
documents=None,
):
super().__init__(
cfg=cfg,
trainer=trainer,
tokenizer=tokenizer,
name=name,
indexed_dataset=indexed_dataset,
data_prefix=data_prefix,
num_epochs=num_epochs,
max_num_samples=max_num_samples,
max_seq_length=max_seq_length,
max_seq_length_dec=None,
seed=seed,
masked_lm_prob=masked_lm_prob,
short_seq_prob=short_seq_prob,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
geometric_dist=geometric_dist,
permutation=permutation,
whole_word_masking=whole_word_masking,
favor_long_ngrams=favor_long_ngrams,
respect_document_boundaries=respect_document_boundaries,
documents=documents,
)
# Params to store.
self.delete_mask_prob = delete_mask_prob
def _build(self):
"""
Class-specific build method to be overridden by child classes.
"""
pass
def __getitem__(self, idx):
np_rng = np.random.RandomState(seed=(self.seed + idx))
sample, seq_length = self._get_sample(idx)
# flatten sentences into one list
tokens = [token for sentence in sample for token in sentence]
# Truncate to `target_sequence_length`.
max_num_tokens = seq_length
tokens = tokens[:max_num_tokens]
# Masking.
max_predictions_per_seq = self.masked_lm_prob * max_num_tokens
lm_pred = create_masked_lm_predictions(
tokens=tokens,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
masked_lm_prob=self.masked_lm_prob,
cls_id=self.cls_id,
sep_id=self.sep_id,
mask_id=self.mask_id,
max_predictions_per_seq=max_predictions_per_seq,
np_rng=np_rng,
max_ngram_size=self.max_ngram_size,
whole_word_masking=self.whole_word_masking,
favor_long_ngrams=self.favor_long_ngrams,
mean_ngram_size=self.mean_ngram_size,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
masking_style="t5",
tokenizer_type=self.tokenizer_type,
)
if self.masked_lm_prob == 0:
(output_tokens, masked_positions, masked_labels, _) = lm_pred
masked_spans = None
else:
(output_tokens, masked_positions, masked_labels, _, masked_spans) = lm_pred
# Padding.
tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask = self.pad_and_convert_to_numpy(
tokens=tokens,
output_tokens=output_tokens,
masked_positions=masked_positions,
masked_labels=masked_labels,
masked_spans=masked_spans,
np_rng=np_rng,
)
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
def pad_and_convert_to_numpy(
self, tokens, output_tokens, masked_positions, masked_labels, masked_spans=None, np_rng=None,
):
"""Pad sequences and convert them to numpy."""
bart_decoder_in = [self.bos_id] + tokens
bart_decoder_out = tokens + [self.eos_id]
if masked_spans is not None:
# construct bart input by collapsing multiple <mask> into one, and delete randomly
bart_input = []
(start_index, end_index) = (0, None)
for span in masked_spans:
end_index = span.index[0]
bart_input.extend(output_tokens[start_index:end_index])
# delete mask with probability delete_mask_prob
if np_rng.rand() >= self.delete_mask_prob:
bart_input.append(self.mask_id)
# the next start index is the token after the last span token
start_index = span.index[-1] + 1
# Add the remaining tokens to the BART input
bart_input.extend(output_tokens[start_index:])
else:
bart_input = output_tokens
# Some checks.
# Encoder-side padding mask.
num_tokens = len(bart_input)
padding_length = self.max_seq_length - num_tokens
assert padding_length >= 0
assert len(masked_positions) == len(masked_labels)
# Tokens..
filler = [self.pad_id] * padding_length
tokens_enc = np.array(bart_input + filler, dtype=np.int64)
# Decoder-side padding mask.
num_tokens_dec = len(bart_decoder_in)
padding_length_dec = self.max_seq_length - num_tokens_dec
assert padding_length_dec >= 0
filler_dec = [self.pad_id] * padding_length_dec
tokens_dec_in = np.array(bart_decoder_in + filler_dec, dtype=np.int64)
# Create attention masks
enc_mask = (tokens_enc != self.pad_id).astype(np.int64)
dec_mask = (tokens_dec_in != self.pad_id).astype(np.int64)
# Labels mask.
labels = bart_decoder_out + ([-1] * padding_length_dec)
labels = np.array(labels, dtype=np.int64)
# Loss mask
loss_mask = ([1] * num_tokens_dec) + ([0] * padding_length_dec)
loss_mask = np.array(loss_mask, dtype=np.int64)
return tokens_enc, tokens_dec_in, labels, enc_mask, dec_mask, loss_mask
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/bart_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import exists, join
import pytest
class TestDataDir:
@pytest.mark.unit
def test_test_data_dir(self, test_data_dir):
"""Just a dummy tests showing how to use the test_data_dir fixture."""
# test_data_dir contains the absolute path to nemo -> tests/.data
assert exists(test_data_dir)
assert exists(join(test_data_dir, "test_data.tar.gz"))
| NeMo-main | tests/test_data_dir.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os.path
import shutil
import tarfile
import tempfile
import urllib.request
from os import mkdir
from os.path import dirname, exists, getsize, join
from pathlib import Path
from shutil import rmtree
from typing import Tuple
import pytest
from tests.fixtures.tts import *
# Those variables probably should go to main NeMo configuration file (config.yaml).
__TEST_DATA_FILENAME = "test_data.tar.gz"
__TEST_DATA_URL = "https://github.com/NVIDIA/NeMo/releases/download/v1.0.0rc1/"
__TEST_DATA_SUBDIR = ".data"
def pytest_addoption(parser):
"""
Additional command-line arguments passed to pytest.
For now:
--cpu: use CPU during testing (DEFAULT: GPU)
--use_local_test_data: use local test data/skip downloading from URL/GitHub (DEFAULT: False)
"""
parser.addoption(
'--cpu', action='store_true', help="pass that argument to use CPU during testing (DEFAULT: False = GPU)"
)
parser.addoption(
'--use_local_test_data',
action='store_true',
help="pass that argument to use local test data/skip downloading from URL/GitHub (DEFAULT: False)",
)
parser.addoption(
'--with_downloads',
action='store_true',
help="pass this argument to active tests which download models from the cloud.",
)
parser.addoption(
'--relax_numba_compat',
action='store_false',
help="numba compatibility checks will be relaxed to just availability of cuda, "
"without cuda compatibility matrix check",
)
parser.addoption(
"--nightly",
action="store_true",
help="pass this argument to activate tests which have been marked as nightly for nightly quality assurance.",
)
@pytest.fixture
def device(request):
""" Simple fixture returning string denoting the device [CPU | GPU] """
if request.config.getoption("--cpu"):
return "CPU"
else:
return "GPU"
@pytest.fixture(autouse=True)
def run_only_on_device_fixture(request, device):
if request.node.get_closest_marker('run_only_on'):
if request.node.get_closest_marker('run_only_on').args[0] != device:
pytest.skip('skipped on this device: {}'.format(device))
@pytest.fixture(autouse=True)
def downloads_weights(request, device):
if request.node.get_closest_marker('with_downloads'):
if not request.config.getoption("--with_downloads"):
pytest.skip(
'To run this test, pass --with_downloads option. It will download (and cache) models from cloud.'
)
@pytest.fixture(autouse=True)
def run_nightly_test_for_qa(request, device):
if request.node.get_closest_marker('nightly'):
if not request.config.getoption("--nightly"):
pytest.skip(
'To run this test, pass --nightly option. It will run any tests marked with "nightly". Currently, These tests are mostly used for QA.'
)
@pytest.fixture(autouse=True)
def cleanup_local_folder():
# Asserts in fixture are not recommended, but I'd rather stop users from deleting expensive training runs
assert not Path("./lightning_logs").exists()
assert not Path("./NeMo_experiments").exists()
assert not Path("./nemo_experiments").exists()
yield
if Path("./lightning_logs").exists():
rmtree('./lightning_logs', ignore_errors=True)
if Path("./NeMo_experiments").exists():
rmtree('./NeMo_experiments', ignore_errors=True)
if Path("./nemo_experiments").exists():
rmtree('./nemo_experiments', ignore_errors=True)
@pytest.fixture(scope="session")
def test_data_dir():
"""
Fixture returns test_data_dir.
Use the highest fixture scope `session` to allow other fixtures with any other scope to use it.
"""
# Test dir.
test_data_dir_ = join(dirname(__file__), __TEST_DATA_SUBDIR)
return test_data_dir_
def extract_data_from_tar(test_dir, test_data_archive, url=None, local_data=False):
# Remove .data folder.
if exists(test_dir):
if not local_data:
rmtree(test_dir)
else:
with tempfile.TemporaryDirectory() as temp_dir:
print("Copying local tarfile to temporary storage..")
shutil.copy2(test_data_archive, temp_dir)
print("Deleting test dir to cleanup old data")
rmtree(test_dir)
mkdir(test_dir)
print("Restoring local tarfile to test dir")
shutil.copy2(os.path.join(temp_dir, os.path.basename(test_data_archive)), test_data_archive)
# Create one .data folder.
if not exists(test_dir):
mkdir(test_dir)
# Download (if required)
if url is not None and not local_data:
urllib.request.urlretrieve(url, test_data_archive)
# Extract tar
print("Extracting the `{}` test archive, please wait...".format(test_data_archive))
tar = tarfile.open(test_data_archive)
tar.extractall(path=test_dir)
tar.close()
@pytest.fixture(scope="session")
def k2_is_appropriate() -> Tuple[bool, str]:
try:
from nemo.core.utils.k2_guard import k2 # noqa: E402
return True, "k2 is appropriate."
except Exception as e:
logging.exception(e, exc_info=True)
return False, "k2 is not available or does not meet the requirements."
@pytest.fixture(scope="session")
def k2_cuda_is_enabled(k2_is_appropriate) -> Tuple[bool, str]:
if not k2_is_appropriate[0]:
return k2_is_appropriate
import torch # noqa: E402
from nemo.core.utils.k2_guard import k2 # noqa: E402
if torch.cuda.is_available() and k2.with_cuda:
return True, "k2 supports CUDA."
elif torch.cuda.is_available():
return False, "k2 does not support CUDA. Consider using a k2 build with CUDA support."
else:
return False, "k2 needs CUDA to be available in torch."
def pytest_configure(config):
"""
Initial configuration of conftest.
The function checks if test_data.tar.gz is present in tests/.data.
If so, compares its size with github's test_data.tar.gz.
If file absent or sizes not equal, function downloads the archive from github and unpacks it.
"""
config.addinivalue_line(
"markers", "run_only_on(device): runs the test only on a given device [CPU | GPU]",
)
config.addinivalue_line(
"markers", "with_downloads: runs the test using data present in tests/.data",
)
config.addinivalue_line(
"markers", "nightly: runs the nightly test for QA.",
)
# Test dir and archive filepath.
test_dir = join(dirname(__file__), __TEST_DATA_SUBDIR)
test_data_archive = join(dirname(__file__), __TEST_DATA_SUBDIR, __TEST_DATA_FILENAME)
# Get size of local test_data archive.
try:
test_data_local_size = getsize(test_data_archive)
except:
# File does not exist.
test_data_local_size = -1
if config.option.use_local_test_data:
if test_data_local_size == -1:
pytest.exit("Test data `{}` is not present in the system".format(test_data_archive))
else:
print(
"Using the local `{}` test archive ({}B) found in the `{}` folder.".format(
__TEST_DATA_FILENAME, test_data_local_size, test_dir
)
)
# Get size of remote test_data archive.
url = None
if not config.option.use_local_test_data:
try:
url = __TEST_DATA_URL + __TEST_DATA_FILENAME
u = urllib.request.urlopen(url)
except:
# Couldn't access remote archive.
if test_data_local_size == -1:
pytest.exit("Test data not present in the system and cannot access the '{}' URL".format(url))
else:
print(
"Cannot access the '{}' URL, using the test data ({}B) found in the `{}` folder.".format(
url, test_data_local_size, test_dir
)
)
return
# Get metadata.
meta = u.info()
test_data_remote_size = int(meta["Content-Length"])
# Compare sizes.
if test_data_local_size != test_data_remote_size:
print(
"Downloading the `{}` test archive from `{}`, please wait...".format(
__TEST_DATA_FILENAME, __TEST_DATA_URL
)
)
extract_data_from_tar(test_dir, test_data_archive, url=url, local_data=config.option.use_local_test_data)
else:
print(
"A valid `{}` test archive ({}B) found in the `{}` folder.".format(
__TEST_DATA_FILENAME, test_data_local_size, test_dir
)
)
else:
# untar local test data
extract_data_from_tar(test_dir, test_data_archive, local_data=config.option.use_local_test_data)
if config.option.relax_numba_compat is not None:
from nemo.core.utils import numba_utils
print("Setting numba compat :", config.option.relax_numba_compat)
numba_utils.set_numba_compat_strictness(strict=config.option.relax_numba_compat)
| NeMo-main | tests/conftest.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | tests/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A script to check that copyright headers exists"""
import argparse
import re
import sys
from datetime import datetime
from pathlib import Path
EXCLUSIONS = ["scripts/get_commonvoice_data.py"]
def get_top_comments(_data):
# Get all lines where comments should exist
lines_to_extract = []
for i, line in enumerate(_data):
# If empty line, skip
if line in ["", "\n", "", "\r", "\r\n"]:
continue
# If it is a comment line, we should get it
if line.startswith("#"):
lines_to_extract.append(i)
# Assume all copyright headers occur before any import statements not enclosed in a comment block
elif "import" in line:
break
comments = []
for line in lines_to_extract:
comments.append(_data[line])
return comments
def main():
parser = argparse.ArgumentParser(description="Usage for copyright header insertion script")
parser.add_argument(
'--dir',
help='Path to source files to add copyright header to. Will recurse through subdirectories',
required=True,
type=str,
)
args = parser.parse_args()
current_year = int(datetime.today().year)
starting_year = 2020
python_header_path = "tests/py_cprheader.txt"
with open(python_header_path, 'r', encoding='utf-8') as original:
pyheader = original.read().split("\n")
pyheader_lines = len(pyheader)
problematic_files = []
for filename in Path(args.dir).rglob('*.py'):
if str(filename) in EXCLUSIONS:
continue
with open(str(filename), 'r', encoding='utf-8') as original:
data = original.readlines()
data = get_top_comments(data)
if len(data) < pyheader_lines:
print(f"{filename} has less header lines than the copyright template")
problematic_files.append(filename)
continue
found = False
for i, line in enumerate(data):
if re.search(re.compile("Copyright.*NVIDIA.*", re.IGNORECASE), line):
# if re.search(re.compile("Copyright.*", re.IGNORECASE), line):
found = True
# Check 1st line manually
year_good = False
for year in range(starting_year, current_year + 1):
year_line = pyheader[0].format(CURRENT_YEAR=year)
if year_line in data[i]:
year_good = True
break
year_line_aff = year_line.split(".")
year_line_aff = year_line_aff[0] + " & AFFILIATES." + year_line_aff[1]
if year_line_aff in data[i]:
year_good = True
break
if not year_good:
problematic_files.append(filename)
print(f"{filename} had an error with the year")
break
while "opyright" in data[i]:
i += 1
for j in range(1, pyheader_lines):
if pyheader[j] not in data[i + j - 1]:
problematic_files.append(filename)
print(f"{filename} missed the line: {pyheader[j]}")
break
if found:
break
if not found:
print(f"{filename} did not match the regex: `Copyright.*NVIDIA.*`")
problematic_files.append(filename)
if len(problematic_files) > 0:
print("check_copyright_headers.py found the following files that might not have a copyright header:")
for _file in problematic_files:
print(_file)
sys.exit(1)
if __name__ == '__main__':
main()
| NeMo-main | tests/check_copyright_header.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
WARNING: Running this test will download ALL pre-trained NeMo models.
This is bandwidth and disk space consuming.
"""
import nemo.collections.asr as nemo_asr
import nemo.collections.nlp as nemo_nlp
import nemo.collections.tts as nemo_tts
def testclass_downloads(cls, refresh_cache, model_names=None):
for model_info in cls.list_available_models():
model = cls.from_pretrained(model_name=model_info.pretrained_model_name, refresh_cache=refresh_cache)
assert isinstance(model, cls)
if model_names is not None:
assert set(model_names) == set([m.pretrained_model_name for m in cls.list_available_models()])
for refresh_cache in [True, False]:
# Test ASR collection
testclass_downloads(
nemo_asr.models.EncDecCTCModel,
refresh_cache,
[
'QuartzNet15x5Base-En',
'QuartzNet15x5Base-Zh',
'QuartzNet5x5LS-En',
'QuartzNet15x5NR-En',
'Jasper10x5Dr-En',
],
)
testclass_downloads(nemo_asr.models.EncDecCTCModelBPE, refresh_cache, ['ContextNet-192-WPE-1024-8x-Stride'])
testclass_downloads(
nemo_asr.models.EncDecClassificationModel,
refresh_cache,
[
'MatchboxNet-3x1x64-v1',
'MatchboxNet-3x2x64-v1',
'MatchboxNet-3x1x64-v2',
'MatchboxNet-3x1x64-v2',
'MatchboxNet-3x1x64-v2-subset-task',
'MatchboxNet-3x2x64-v2-subset-task',
'MatchboxNet-VAD-3x2',
],
)
testclass_downloads(
nemo_asr.models.EncDecSpeakerLabelModel,
refresh_cache,
[
'speakerrecognition_speakernet',
'speakerverification_speakernet',
'speakerdiarization_speakernet',
'ecapa_tdnn',
],
)
# Test NLP collection
testclass_downloads(nemo_nlp.models.TokenClassificationModel, refresh_cache, ['NERModel'])
testclass_downloads(
nemo_nlp.models.PunctuationCapitalizationModel,
refresh_cache,
['Punctuation_Capitalization_with_BERT', 'Punctuation_Capitalization_with_DistilBERT'],
)
testclass_downloads(
nemo_nlp.models.QAModel,
refresh_cache,
[
'BERTBaseUncasedSQuADv1.1',
'BERTBaseUncasedSQuADv2.0',
'BERTLargeUncasedSQuADv1.1',
'BERTLargeUncasedSQuADv2.0',
],
)
# testclass_downloads(nemo_nlp.models.IntentSlotClassificationModel, refresh_cache, ['Joint_Intent_Slot_Assistant'])
# Test TTS collection
testclass_downloads(nemo_tts.models.Tacotron2Model, refresh_cache, ['Tacotron2-22050Hz'])
testclass_downloads(nemo_tts.models.WaveGlowModel, refresh_cache, ['WaveGlow-22050Hz'])
testclass_downloads(nemo_tts.models.SqueezeWaveModel, refresh_cache, ['SqueezeWave-22050Hz'])
testclass_downloads(nemo_tts.models.GlowTTSModel, refresh_cache, ['GlowTTS-22050Hz'])
print("############ THAT'S ALL FOLKS! ############")
| NeMo-main | tests/manualtest_model_downloads.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any
import pytest
import pytorch_lightning as ptl
from pytorch_lightning.callbacks.early_stopping import EarlyStopping
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import config_utils
from nemo.utils.exp_manager import EarlyStoppingParams
@pytest.fixture()
def cls():
class DummyClass:
def __init__(self, a, b=5, c: int = 0, d: 'ABC' = None):
pass
return DummyClass
class TestConfigUtils:
@pytest.mark.unit
def test_all_args_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_all_args_dont_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert not signatures_match
assert len(cls_subset) > 0
assert len(dataclass_subset) == 0
@pytest.mark.unit
def test_extra_args_exist(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
e: float = 0.0
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass)
signatures_match, cls_subset, dataclass_subset = result
assert not signatures_match
assert len(cls_subset) == 0
assert len(dataclass_subset) > 0
@pytest.mark.unit
def test_extra_args_exist_but_is_ignored(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
d: Any = None
e: float = 0.0 # Assume ignored
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass, ignore_args=['e'])
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_args_exist_but_is_remapped(self, cls):
@dataclass
class DummyDataClass:
a: int = -1
b: int = 5
c: int = 0
e: Any = None # Assume remapped
result = config_utils.assert_dataclass_signature_match(cls, DummyDataClass, remap_args={'e': 'd'})
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_ptl_config(self):
PTL_DEPRECATED = []
result = config_utils.assert_dataclass_signature_match(ptl.Trainer, TrainerConfig, PTL_DEPRECATED)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_early_stopping_config(self,):
result = config_utils.assert_dataclass_signature_match(EarlyStopping, EarlyStoppingParams)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
| NeMo-main | tests/core/test_config_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | tests/core/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import numpy as np
import pytest
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.models import EncDecCTCModel
try:
from eff.cookbooks import NeMoCookbook
_EFF_PRESENT_ = True
except ImportError:
_EFF_PRESENT_ = False
# A decorator marking the EFF requirement.
requires_eff = pytest.mark.skipif(not _EFF_PRESENT_, reason="Export File Format library required to run test")
@pytest.fixture()
def asr_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {
'feat_in': 1024,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
class TestFileIO:
@pytest.mark.unit
def test_to_from_config_file(self, asr_model):
"""" Test makes sure that the second instance created with the same configuration (BUT NOT checkpoint)
has different weights. """
with tempfile.NamedTemporaryFile() as fp:
yaml_filename = fp.name
asr_model.to_config_file(path2yaml_file=yaml_filename)
next_instance = EncDecCTCModel.from_config_file(path2yaml_file=yaml_filename)
assert isinstance(next_instance, EncDecCTCModel)
assert len(next_instance.decoder.vocabulary) == 28
assert asr_model.num_weights == next_instance.num_weights
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = next_instance.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert not np.array_equal(w1, w2)
@pytest.mark.unit
def test_save_restore_from_nemo_file(self, asr_model):
"""" Test makes sure that the second instance created from the same configuration AND checkpoint
has the same weights. """
with tempfile.NamedTemporaryFile() as fp:
filename = fp.name
# Save model (with random artifact).
with tempfile.NamedTemporaryFile() as artifact:
asr_model.register_artifact(config_path="abc", src=artifact.name)
asr_model.save_to(save_path=filename)
# Restore the model.
asr_model2 = EncDecCTCModel.restore_from(restore_path=filename)
assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)
assert asr_model.num_weights == asr_model2.num_weights
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert np.array_equal(w1, w2)
@requires_eff
@pytest.mark.unit
def test_eff_save_restore_from_nemo_file_encrypted(self, asr_model):
"""" Test makes sure that after encrypted save-restore the model has the same weights. """
with tempfile.NamedTemporaryFile() as fp:
filename = fp.name
# Set key - use checkpoint encryption.
NeMoCookbook.set_encryption_key("test_key")
# Save model (with random artifact).
with tempfile.NamedTemporaryFile() as artifact:
asr_model.register_artifact(config_path="abc", src=artifact.name)
asr_model.save_to(save_path=filename)
# Try to restore the encrypted archive (weights) without the encryption key.
NeMoCookbook.set_encryption_key(None)
with pytest.raises(PermissionError):
# Restore the model.
asr_model2 = EncDecCTCModel.restore_from(restore_path=filename)
# Restore the model.
NeMoCookbook.set_encryption_key("test_key")
asr_model3 = EncDecCTCModel.restore_from(restore_path=filename)
# Reset encryption so it won't mess up with other save/restore.
NeMoCookbook.set_encryption_key(None)
assert asr_model.num_weights == asr_model3.num_weights
@pytest.mark.unit
def test_save_restore_from_nemo_file_with_override(self, asr_model, tmpdir):
"""" Test makes sure that the second instance created from the same configuration AND checkpoint
has the same weights.
Args:
tmpdir: fixture providing a temporary directory unique to the test invocation.
"""
# Name of the archive in tmp folder.
filename = os.path.join(tmpdir, "eff.nemo")
# Get path where the command is executed - the artifacts will be "retrieved" there.
# (original .nemo behavior)
cwd = os.getcwd()
with tempfile.NamedTemporaryFile(mode='a+') as conf_fp:
# Create a "random artifact".
with tempfile.NamedTemporaryFile(mode="w", delete=False) as artifact:
artifact.write("magic content 42")
# Remember the filename of the artifact.
_, artifact_filename = os.path.split(artifact.name)
# Add artifact to model.
asr_model.register_artifact(config_path="abc", src=artifact.name)
# Save model (with "random artifact").
asr_model.save_to(save_path=filename)
# Modify config slightly
cfg = asr_model.cfg
cfg.encoder.activation = 'swish'
yaml_cfg = OmegaConf.to_yaml(cfg)
conf_fp.write(yaml_cfg)
conf_fp.seek(0)
# Restore the model.
asr_model2 = EncDecCTCModel.restore_from(restore_path=filename, override_config_path=conf_fp.name)
assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)
assert asr_model.num_weights == asr_model2.num_weights
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert np.array_equal(w1, w2)
assert asr_model2.cfg.encoder.activation == 'swish'
@pytest.mark.unit
def test_save_model_level_pt_ckpt(self, asr_model):
with tempfile.TemporaryDirectory() as ckpt_dir:
nemo_file = os.path.join(ckpt_dir, 'asr.nemo')
asr_model.save_to(nemo_file)
# Save model level PT checkpoint
asr_model.extract_state_dict_from(nemo_file, ckpt_dir)
ckpt_path = os.path.join(ckpt_dir, 'model_weights.ckpt')
assert os.path.exists(ckpt_path)
# Restore the model.
asr_model2 = EncDecCTCModel.restore_from(restore_path=nemo_file)
assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)
assert asr_model.num_weights == asr_model2.num_weights
# Change weights values
asr_model2.encoder.encoder[0].mconv[0].conv.weight.data += 1.0
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert not np.array_equal(w1, w2)
# Restore from checkpoint
asr_model2.load_state_dict(torch.load(ckpt_path))
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert np.array_equal(w1, w2)
@pytest.mark.unit
def test_save_module_level_pt_ckpt(self, asr_model):
with tempfile.TemporaryDirectory() as ckpt_dir:
nemo_file = os.path.join(ckpt_dir, 'asr.nemo')
asr_model.save_to(nemo_file)
# Save model level PT checkpoint
asr_model.extract_state_dict_from(nemo_file, ckpt_dir, split_by_module=True)
encoder_path = os.path.join(ckpt_dir, 'encoder.ckpt')
decoder_path = os.path.join(ckpt_dir, 'decoder.ckpt')
preprocessor_path = os.path.join(ckpt_dir, 'preprocessor.ckpt')
assert os.path.exists(encoder_path)
assert os.path.exists(decoder_path)
assert os.path.exists(preprocessor_path)
# Restore the model.
asr_model2 = EncDecCTCModel.restore_from(restore_path=nemo_file)
assert len(asr_model.decoder.vocabulary) == len(asr_model2.decoder.vocabulary)
assert asr_model.num_weights == asr_model2.num_weights
# Change weights values
asr_model2.encoder.encoder[0].mconv[0].conv.weight.data += 1.0
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert not np.array_equal(w1, w2)
# Restore from checkpoint
asr_model2.encoder.load_state_dict(torch.load(encoder_path))
w1 = asr_model.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
w2 = asr_model2.encoder.encoder[0].mconv[0].conv.weight.data.detach().cpu().numpy()
assert np.array_equal(w1, w2)
| NeMo-main | tests/core/test_fileio.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filecmp
import os
import shutil
import tempfile
from typing import Dict, Optional, Set, Union
import pytest
import torch
from huggingface_hub.hf_api import ModelFilter
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.collections.asr.models import EncDecCTCModel, EncDecCTCModelBPE
from nemo.collections.nlp.models import PunctuationCapitalizationModel
from nemo.core.classes import ModelPT
from nemo.core.connectors import save_restore_connector
from nemo.utils.app_state import AppState
from nemo.utils.exceptions import NeMoBaseException
def classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
def get_dir_size(path='.'):
total = 0
with os.scandir(path) as it:
for entry in it:
if entry.is_file():
total += entry.stat().st_size
elif entry.is_dir():
total += get_dir_size(entry.path)
return total
def get_size(path='.'):
if os.path.isfile(path):
return os.path.getsize(path)
elif os.path.isdir(path):
return get_dir_size(path)
def getattr2(object, attr):
if not '.' in attr:
return getattr(object, attr)
else:
arr = attr.split('.')
return getattr2(getattr(object, arr[0]), '.'.join(arr[1:]))
class MockModel(ModelPT):
def __init__(self, cfg, trainer=None):
super(MockModel, self).__init__(cfg=cfg, trainer=trainer)
self.w = torch.nn.Linear(10, 1)
# mock temp file
if 'temp_file' in self.cfg and self.cfg.temp_file is not None:
self.setup_data_from_file(self.cfg.temp_file)
else:
self.temp_file = None
self.temp_data = None
def setup_data_from_file(self, temp_file):
"""
Load data from temp_file to `self.temp_data`
Allows to test changing resource after instantiation
"""
with open_dict(self.cfg):
self.cfg.temp_file = temp_file
self.temp_file = self.register_artifact('temp_file', self.cfg.temp_file)
with open(self.temp_file, 'r', encoding='utf-8') as f:
self.temp_data = f.readlines()
def change_stub_number(self, new_number: int):
"""
Change stub number in config, useful for testing nested models,
since child can mutate config independently
"""
self.cfg.stub_number = new_number
def forward(self, x):
y = self.w(x)
return y, self.cfg.temp_file
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
self._train_dl = None
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
self._validation_dl = None
def setup_test_data(self, test_data_config: Union[DictConfig, Dict]):
self._test_dl = None
@classmethod
def list_available_models(cls):
return []
class MockModelWithChildren(MockModel):
"""
Mock Model, can contain 2 children (other NeMo models)
"""
def __init__(self, cfg, trainer=None):
super().__init__(cfg=cfg, trainer=trainer)
# variant 1 for creating nested NeMo model:
# load model directly from config
# variant 2 for creating nested NeMo model:
# - initialize child model from .nemo checkpoint, subconfig will be automatically saved
# - after saving model will be restored directly from subconfig (attribute `config_field` of self.cfg)
# child 1
self.child1_model: Optional[MockModel] # annotate type for IDE autocompletion and type checking
if cfg.get("child1_model") is not None:
self.register_nemo_submodule(
"child1_model", config_field="child1_model", model=MockModel(self.cfg.child1_model),
)
elif cfg.get("child1_model_path") is not None:
self.register_nemo_submodule(
"child1_model", config_field="child1_model", model=MockModel.restore_from(self.cfg.child1_model_path),
)
else:
self.child1_model = None
# child 2
# can have sub-children
self.child2_model: Optional[MockModelWithChildren] # annotate type for IDE autocompletion and type checking
if cfg.get("child2_model") is not None:
self.register_nemo_submodule(
"child2_model", config_field="child2_model", model=MockModelWithChildren(self.cfg.child2_model),
)
elif cfg.get("child2_model_path") is not None:
self.register_nemo_submodule(
"child2_model",
config_field="child2_model",
model=MockModelWithChildren.restore_from(self.cfg.child2_model_path),
)
else:
self.child2_model = None
class MockModelWithChildEncDecCTCBPE(MockModel):
"""
Mock Model, will contain EncDecCTC model as a child
Useful for testing nested models with children initialized from pretrained NeMo models
"""
def __init__(self, cfg, trainer=None):
super().__init__(cfg=cfg, trainer=trainer)
# variant 3 for creating nested NeMo model:
# - initialize child model from pretrained NeMo model, subconfig will be automatically saved
# - after saving model will be restored directly from subconfig (attribute `config_field` of self.cfg)
self.ctc_model: EncDecCTCModelBPE # annotate type for IDE autocompletion and type checking
if cfg.get("ctc_model", None) is not None:
self.register_nemo_submodule(
"ctc_model", config_field="ctc_model", model=EncDecCTCModelBPE(self.cfg.ctc_model),
)
else:
# model is mandatory
assert cfg.get("ctc_model_pretrained", None) is not None
self.register_nemo_submodule(
"ctc_model",
config_field="ctc_model",
model=EncDecCTCModelBPE.from_pretrained(self.cfg.ctc_model_pretrained),
)
class MockModelWithChildCustomConfigPath(MockModel):
"""
Mock Model, can contain 1 child
Path in config is not equal to name of the attribute
Config is stored in `child1_model_config`
Child model is stored in `child1_model` attribute
NB: This is not recommended if it's not necessary. But here we test that it works.
"""
def __init__(self, cfg, trainer=None):
super().__init__(cfg=cfg, trainer=trainer)
self.child1_model: Optional[MockModel] # annotate type for IDE autocompletion and type checking
if cfg.get("child1_model_config") is not None:
self.register_nemo_submodule(
"child1_model", config_field="child1_model_config", model=MockModel(self.cfg.child1_model_config),
)
else:
self.child1_model = None
class MockModelIncorrectWithNemoArtifact(MockModel):
"""
Incorrect model that tries to use .nemo model checkpoint as an artifact
Expected to fail, since it is not supported
"""
def __init__(self, cfg, trainer=None):
super().__init__(cfg=cfg, trainer=trainer)
assert cfg.get("child_model_path") is not None
# this will fail, since .nemo model checkpoint is not supported as an artifact
child_model_path = self.register_artifact("child_model_path", cfg.child_model_path)
self.child_model = ModelPT.restore_from(child_model_path)
def _mock_model_config():
conf = {'temp_file': None, 'target': classpath(MockModel), 'stub_number': 1}
conf = OmegaConf.create({'model': conf})
OmegaConf.set_struct(conf, True)
return conf
def _mock_model_with_children_config(
child1_model_path: Optional[str] = None,
child2_model_path: Optional[str] = None,
child2_model_cfg: Optional[DictConfig] = None,
) -> DictConfig:
"""
Child 1 always constructed from .nemo model checkpoint (optional)
Child 2 can be constructed directly from subconfig (optional) or from .nemo model checkpoint (optional)
"""
conf = {
'temp_file': None,
'target': classpath(MockModelWithChildren),
'child1_model': None,
'child1_model_path': child1_model_path,
'child2_model': child2_model_cfg,
'child2_model_path': child2_model_path,
'stub_number': 1,
}
conf = OmegaConf.create({'model': conf})
OmegaConf.set_struct(conf, True)
return conf
def _mock_model_with_child_encdecctcbpe_config(pretrained_model_name: str) -> DictConfig:
conf = {'temp_file': None, 'ctc_model_pretrained': pretrained_model_name, 'stub_number': 1}
conf = OmegaConf.create({'model': conf})
OmegaConf.set_struct(conf, True)
return conf
def _mock_model_with_child_custom_config_path_config():
conf = {
'temp_file': None,
'child1_model_config': _mock_model_config().model,
'target': classpath(MockModelWithChildCustomConfigPath),
'stub_number': 1,
}
conf = OmegaConf.create({'model': conf})
OmegaConf.set_struct(conf, True)
return conf
def _mock_model_incorrect_with_nemo_artifact_config(child_model_path: str):
conf = {'temp_file': None, 'child_model_path': child_model_path, 'stub_number': 1}
conf = OmegaConf.create({'model': conf})
OmegaConf.set_struct(conf, True)
return conf
class TestSaveRestore:
def __test_restore_elsewhere(
self,
model: ModelPT,
attr_for_eq_check: Set[str] = None,
override_config_path: Optional[Union[str, DictConfig]] = None,
map_location: Optional[Union[torch.device, str]] = None,
strict: bool = False,
return_config: bool = False,
):
"""Test's logic:
1. Save model into temporary folder (save_folder)
2. Copy .nemo file from save_folder to restore_folder
3. Delete save_folder
4. Attempt to restore from .nemo file in restore_folder and compare to original instance
"""
# Create a new temporary directory
with tempfile.TemporaryDirectory() as restore_folder:
with tempfile.TemporaryDirectory() as save_folder:
save_folder_path = save_folder
# Where model will be saved
model_save_path = os.path.join(save_folder, f"{model.__class__.__name__}.nemo")
model.save_to(save_path=model_save_path)
# Where model will be restored from
model_restore_path = os.path.join(restore_folder, f"{model.__class__.__name__}.nemo")
shutil.copy(model_save_path, model_restore_path)
# at this point save_folder should not exist
assert save_folder_path is not None and not os.path.exists(save_folder_path)
assert not os.path.exists(model_save_path)
assert os.path.exists(model_restore_path)
# attempt to restore
model_copy = model.__class__.restore_from(
restore_path=model_restore_path,
map_location=map_location,
strict=strict,
return_config=return_config,
override_config_path=override_config_path,
)
if return_config:
return model_copy
assert model.num_weights == model_copy.num_weights
if attr_for_eq_check is not None and len(attr_for_eq_check) > 0:
for attr in attr_for_eq_check:
assert getattr2(model, attr) == getattr2(model_copy, attr)
return model_copy
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_EncDecCTCModel(self):
# TODO: Switch to using named configs because here we don't really care about weights
qn = EncDecCTCModel.from_pretrained(model_name="QuartzNet15x5Base-En")
self.__test_restore_elsewhere(model=qn, attr_for_eq_check=set(["decoder._feat_in", "decoder._num_classes"]))
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_EncDecCTCModelBPE(self):
# TODO: Switch to using named configs because here we don't really care about weights
cn = EncDecCTCModelBPE.from_pretrained(model_name="stt_en_citrinet_256")
self.__test_restore_elsewhere(model=cn, attr_for_eq_check=set(["decoder._feat_in", "decoder._num_classes"]))
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_EncDecCTCModelBPE_v2(self):
# TODO: Switch to using named configs because here we don't really care about weights
cn = EncDecCTCModelBPE.from_pretrained(model_name="stt_en_conformer_ctc_small")
self.__test_restore_elsewhere(model=cn, attr_for_eq_check=set(["decoder._feat_in", "decoder._num_classes"]))
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_EncDecCTCModelBPE_v3(self):
# TODO: Switch to using named configs because here we don't really care about weights
cn = EncDecCTCModelBPE.from_pretrained(model_name="stt_en_squeezeformer_ctc_xsmall_ls")
self.__test_restore_elsewhere(model=cn, attr_for_eq_check=set(["decoder._feat_in", "decoder._num_classes"]))
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_EncDecCTCModelBPE_HF(self):
# TODO: Switch to using named configs because here we don't really care about weights
# Specifically use ModelPT instead of EncDecCTCModelBPE in order to test target class resolution.
cn = ModelPT.from_pretrained(model_name="nvidia/stt_en_citrinet_256_ls")
self.__test_restore_elsewhere(model=cn, attr_for_eq_check=set(["decoder._feat_in", "decoder._num_classes"]))
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_PunctuationCapitalization(self):
# TODO: Switch to using named configs because here we don't really care about weights
pn = PunctuationCapitalizationModel.from_pretrained(model_name='punctuation_en_distilbert')
self.__test_restore_elsewhere(
model=pn, attr_for_eq_check=set(["punct_classifier.log_softmax", "punct_classifier.log_softmax"])
)
@pytest.mark.unit
def test_mock_save_to_restore_from(self):
with tempfile.NamedTemporaryFile('w') as empty_file:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
assert model.temp_file == empty_file.name
# Save test
model_copy = self.__test_restore_elsewhere(model, map_location='cpu')
# Restore test
diff = model.w.weight - model_copy.w.weight
# because of caching - cache gets prepended
assert os.path.basename(model_copy.temp_file).endswith(os.path.basename(model.temp_file))
assert diff.mean() <= 1e-9
# assert os.path.basename(model.temp_file) == model_copy.temp_file
assert model_copy.temp_data == ["*****\n"]
@pytest.mark.unit
def test_mock_restore_from_config_only(self):
with tempfile.NamedTemporaryFile('w') as empty_file:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = os.path.abspath(empty_file.name)
# Inject arbitrary config arguments (after creating model)
with open_dict(cfg.model):
cfg.model.xyz = "abc"
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
assert model.temp_file == empty_file.name
model_copy = self.__test_restore_elsewhere(model, map_location='cpu', return_config=False)
# because of caching - cache gets prepended
assert os.path.basename(model_copy.temp_file).endswith(os.path.basename(model.temp_file))
# assert filecmp.cmp(model.temp_file, model_copy._cfg.temp_file)
assert model.cfg.xyz == model_copy.cfg.xyz
@pytest.mark.unit
def test_mock_restore_from_config_override_with_OmegaConf(self):
with tempfile.NamedTemporaryFile('w') as empty_file:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
assert model.temp_file == empty_file.name
# Inject arbitrary config arguments (after creating model)
with open_dict(cfg.model):
cfg.model.xyz = "abc"
# Save test (with overriden config as OmegaConf object)
model_copy = self.__test_restore_elsewhere(model, map_location='cpu', override_config_path=cfg)
# Restore test
diff = model.w.weight - model_copy.w.weight
assert diff.mean() <= 1e-9
assert model_copy.temp_data == ["*****\n"]
# Test that new config has arbitrary content
assert model_copy.cfg.xyz == "abc"
@pytest.mark.unit
def test_mock_restore_from_config_override_with_yaml(self):
with tempfile.NamedTemporaryFile('w') as empty_file, tempfile.NamedTemporaryFile('w') as config_file:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
assert model.temp_file == empty_file.name
# Inject arbitrary config arguments (after creating model)
with open_dict(cfg.model):
cfg.model.xyz = "abc"
# Write new config into file
OmegaConf.save(cfg, config_file)
# Save test (with overriden config as OmegaConf object)
model_copy = self.__test_restore_elsewhere(
model, map_location='cpu', override_config_path=config_file.name
)
# Restore test
diff = model.w.weight - model_copy.w.weight
assert diff.mean() <= 1e-9
assert filecmp.cmp(model.temp_file, model_copy.temp_file)
assert model_copy.temp_data == ["*****\n"]
# Test that new config has arbitrary content
assert model_copy.cfg.xyz == "abc"
@pytest.mark.unit
def test_mock_save_to_restore_from_with_target_class(self):
with tempfile.NamedTemporaryFile('w') as empty_file:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu') # type: MockModel
assert model.temp_file == empty_file.name
# Save file using MockModel
with tempfile.TemporaryDirectory() as save_folder:
save_path = os.path.join(save_folder, "temp.nemo")
model.save_to(save_path)
# Restore test (using ModelPT as restorer)
# This forces the target class = MockModel to be used as resolver
model_copy = ModelPT.restore_from(save_path, map_location='cpu')
# because of caching - cache gets prepended
assert os.path.basename(model_copy.temp_file).endswith(os.path.basename(model.temp_file))
# assert filecmp.cmp(model.temp_file, model_copy.temp_file)
# Restore test
diff = model.w.weight - model_copy.w.weight
assert diff.mean() <= 1e-9
assert isinstance(model_copy, MockModel)
assert model_copy.temp_data == ["*****\n"]
@pytest.mark.unit
def test_mock_save_to_restore_from_multiple_models(self):
with tempfile.NamedTemporaryFile('w') as empty_file, tempfile.NamedTemporaryFile('w') as empty_file2:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
empty_file2.writelines(["+++++\n"])
empty_file2.flush()
# Update config + create ,pde;s
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
cfg2 = _mock_model_config()
cfg2.model.temp_file = empty_file2.name
# Create models
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
model2 = MockModel(cfg=cfg2.model, trainer=None)
model2 = model2.to('cpu')
assert model.temp_file == empty_file.name
assert model2.temp_file == empty_file2.name
# Save test
model_copy = self.__test_restore_elsewhere(model, map_location='cpu')
model2_copy = self.__test_restore_elsewhere(model2, map_location='cpu')
# Restore test
assert model_copy.temp_data == ["*****\n"]
assert model2_copy.temp_data == ["+++++\n"]
@pytest.mark.unit
def test_mock_save_to_restore_from_multiple_models_inverted_order(self):
with tempfile.NamedTemporaryFile('w') as empty_file, tempfile.NamedTemporaryFile('w') as empty_file2:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
empty_file2.writelines(["+++++\n"])
empty_file2.flush()
# Update config + create ,pde;s
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
cfg2 = _mock_model_config()
cfg2.model.temp_file = empty_file2.name
# Create models
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
model2 = MockModel(cfg=cfg2.model, trainer=None)
model2 = model2.to('cpu')
assert model.temp_file == empty_file.name
assert model2.temp_file == empty_file2.name
# Save test (inverted order)
model2_copy = self.__test_restore_elsewhere(model2, map_location='cpu')
model_copy = self.__test_restore_elsewhere(model, map_location='cpu')
# Restore test
assert model_copy.temp_data == ["*****\n"]
assert model2_copy.temp_data == ["+++++\n"]
@pytest.mark.unit
def test_mock_save_to_restore_chained(self):
with tempfile.NamedTemporaryFile('w') as empty_file, tempfile.NamedTemporaryFile('w') as empty_file2:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config + create ,pde;s
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create models
model = MockModel(cfg=cfg.model, trainer=None)
model = model.to('cpu')
assert model.temp_file == empty_file.name
def save_copy(model, save_folder, restore_folder):
# Where model will be saved
model_save_path = os.path.join(save_folder, f"{model.__class__.__name__}.nemo")
model.save_to(save_path=model_save_path)
# Where model will be restored from
model_restore_path = os.path.join(restore_folder, f"{model.__class__.__name__}.nemo")
shutil.copy(model_save_path, model_restore_path)
return model_restore_path
# Save test
with tempfile.TemporaryDirectory() as level4:
with tempfile.TemporaryDirectory() as level3:
with tempfile.TemporaryDirectory() as level2:
with tempfile.TemporaryDirectory() as level1:
path = save_copy(model, level1, level2)
model_copy2 = model.__class__.restore_from(path)
path = save_copy(model_copy2, level2, level3)
model_copy3 = model.__class__.restore_from(path)
path = save_copy(model_copy3, level3, level4)
model_copy = model.__class__.restore_from(path)
# Restore test
assert model_copy.temp_data == ["*****\n"]
# AppState test
appstate = AppState()
metadata = appstate.get_model_metadata_from_guid(model_copy.model_guid)
assert metadata.guid != model.model_guid
assert metadata.restoration_path == path
@pytest.mark.unit
def test_mock_save_to_multiple_times(self):
with tempfile.NamedTemporaryFile('w') as empty_file, tempfile.TemporaryDirectory() as tmpdir:
# Write some data
empty_file.writelines(["*****\n"])
empty_file.flush()
# Update config
cfg = _mock_model_config()
cfg.model.temp_file = empty_file.name
# Create model
model = MockModel(cfg=cfg.model, trainer=None) # type: MockModel
model = model.to('cpu')
assert model.temp_file == empty_file.name
# Save test
model.save_to(os.path.join(tmpdir, 'save_0.nemo'))
model.save_to(os.path.join(tmpdir, 'save_1.nemo'))
model.save_to(os.path.join(tmpdir, 'save_2.nemo'))
@pytest.mark.unit
def test_multiple_model_save_restore_connector(self):
class MySaveRestoreConnector(save_restore_connector.SaveRestoreConnector):
def save_to(self, model, save_path: str):
save_path = save_path.replace(".nemo", "_XYZ.nemo")
super(MySaveRestoreConnector, self).save_to(model, save_path)
with tempfile.TemporaryDirectory() as tmpdir:
# Update config
cfg = _mock_model_config()
# Create model
model = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector._save_restore_connector = MySaveRestoreConnector()
model_with_custom_connector.save_to(os.path.join(tmpdir, 'save_custom.nemo'))
assert os.path.exists(os.path.join(tmpdir, 'save_custom_XYZ.nemo'))
assert isinstance(model._save_restore_connector, save_restore_connector.SaveRestoreConnector)
assert isinstance(model_with_custom_connector._save_restore_connector, MySaveRestoreConnector)
assert type(MockModel._save_restore_connector) == save_restore_connector.SaveRestoreConnector
@pytest.mark.unit
def test_restore_from_save_restore_connector(self):
class MySaveRestoreConnector(save_restore_connector.SaveRestoreConnector):
def save_to(self, model, save_path: str):
save_path = save_path.replace(".nemo", "_XYZ.nemo")
super().save_to(model, save_path)
class MockModelV2(MockModel):
pass
with tempfile.TemporaryDirectory() as tmpdir:
# Update config
cfg = _mock_model_config()
# Create model
save_path = os.path.join(tmpdir, 'save_custom.nemo')
model_with_custom_connector = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector._save_restore_connector = MySaveRestoreConnector()
model_with_custom_connector.save_to(save_path)
assert os.path.exists(os.path.join(tmpdir, 'save_custom_XYZ.nemo'))
restored_model = MockModelV2.restore_from(
save_path.replace(".nemo", "_XYZ.nemo"), save_restore_connector=MySaveRestoreConnector()
)
assert type(restored_model) == MockModelV2
assert type(restored_model._save_restore_connector) == MySaveRestoreConnector
@pytest.mark.unit
def test_restore_from_save_restore_connector_return_config(self):
class MySaveRestoreConnector(save_restore_connector.SaveRestoreConnector):
def save_to(self, model, save_path: str):
save_path = save_path.replace(".nemo", "_XYZ.nemo")
super().save_to(model, save_path)
class MockModelV2(MockModel):
pass
with tempfile.TemporaryDirectory() as tmpdir:
# Update config
cfg = _mock_model_config()
# Create model
save_path = os.path.join(tmpdir, 'save_custom.nemo')
model_with_custom_connector = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector._save_restore_connector = MySaveRestoreConnector()
model_with_custom_connector.save_to(save_path)
assert os.path.exists(os.path.join(tmpdir, 'save_custom_XYZ.nemo'))
restored_model_cfg = MockModelV2.restore_from(
save_path.replace(".nemo", "_XYZ.nemo"),
save_restore_connector=MySaveRestoreConnector(),
return_config=True,
)
assert isinstance(restored_model_cfg, DictConfig)
assert model_with_custom_connector.cfg == restored_model_cfg
@pytest.mark.unit
def test_restore_from_save_restore_connector_return_config_partial_tar_extraction(self):
class MySaveRestoreConnector(save_restore_connector.SaveRestoreConnector):
def save_to(self, model, save_path: str):
save_path = save_path.replace(".nemo", "_XYZ.nemo")
super().save_to(model, save_path)
class MockModelV2(MockModel):
pass
with tempfile.TemporaryDirectory() as tmpdir:
# Update config
cfg = _mock_model_config()
# Create model
save_path = os.path.join(tmpdir, 'save_custom.nemo')
model_with_custom_connector = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector._save_restore_connector = MySaveRestoreConnector()
model_with_custom_connector.save_to(save_path)
true_save_path = os.path.join(tmpdir, 'save_custom_XYZ.nemo')
assert os.path.exists(true_save_path)
my_connector = MySaveRestoreConnector()
with tempfile.TemporaryDirectory() as config_tmpdir:
my_connector._unpack_nemo_file(true_save_path, out_folder=config_tmpdir, extract_config_only=True)
current_files = list(os.listdir(config_tmpdir))
assert len(current_files) == 1 # only config file should have been extracted, no pytorch params
config_filepath = current_files[0]
assert config_filepath.endswith(".yaml")
@pytest.mark.unit
def test_mock_model_model_collision(self):
# The usual pipeline is working just fine.
cfg = _mock_model_config()
model = MockModel(cfg=cfg.model, trainer=None) # type: MockModel
model = model.to('cpu')
# Let's create a custom config with a 'model.model' node.
cfg = _mock_model_config()
OmegaConf.set_struct(cfg, False)
cfg.model.model = 'aaa'
OmegaConf.set_struct(cfg, True)
# Failing due to collision.
with pytest.raises(ValueError, match="Creating model config node is forbidden"):
model = MockModel(cfg=cfg.model, trainer=None) # type: MockModel
model = model.to('cpu')
@pytest.mark.unit
@pytest.mark.parametrize("change_child_number", [False, True])
@pytest.mark.parametrize("child2_model_from_path", [False, True])
def test_mock_model_nested(self, change_child_number: bool, child2_model_from_path: bool):
"""
Test model with 2 children
Model and each child can be saved/restored separately
Model is constructed using saved child models (.nemo checkpoints)
Args:
change_child_number: if change_child_number is True, child model changes its config
without notifying parent model, and saved parent model should handle this correctly.
child2_model_from_path: if child2_model_from_path is True, child2 model is restored from .nemo checkpoint,
otherwise constructed directly from config. Child1 model always loaded from checkpoint.
"""
# children - models without sub-children
cfg_child1 = _mock_model_config()
cfg_child2 = _mock_model_with_children_config() # no children
# Create models
child1 = MockModel(cfg=cfg_child1.model, trainer=None)
child1 = child1.to('cpu')
with tempfile.TemporaryDirectory() as tmpdir_parent:
parent_path = os.path.join(tmpdir_parent, "parent.nemo")
with tempfile.TemporaryDirectory() as tmpdir_child:
# save children
child1_path = os.path.join(tmpdir_child, 'child1.nemo')
child1.save_to(child1_path)
if child2_model_from_path:
child2 = MockModelWithChildren(cfg=cfg_child2.model, trainer=None)
child2 = child2.to('cpu')
child2_path = os.path.join(tmpdir_child, 'child2.nemo')
child2.save_to(child2_path)
# create model with children using saved .nemo model checkpoints
cfg_parent = _mock_model_with_children_config(
child1_model_path=child1_path, child2_model_path=child2_path
)
else:
# child 2 model will be directly constructed from subconfig
cfg_parent = _mock_model_with_children_config(
child1_model_path=child1_path, child2_model_path=None, child2_model_cfg=cfg_child2.get("model")
)
parent = MockModelWithChildren(cfg_parent.model)
if change_child_number:
parent.child2_model.change_stub_number(10)
parent.save_to(parent_path)
# restore, separate children checkpoints are not available here (tmpdir_child destroyed)
parent = ModelPT.restore_from(parent_path)
# check model is transparent, child models can be accessed and can be saved/restored separately
_ = self.__test_restore_elsewhere(parent.child1_model, map_location='cpu')
child2 = self.__test_restore_elsewhere(parent.child2_model, map_location='cpu')
if change_child_number:
assert child2.cfg.stub_number == 10
# check model itself can be saved/restored
parent = self.__test_restore_elsewhere(parent, map_location='cpu')
if change_child_number:
assert parent.child2_model.cfg.stub_number == 10
@pytest.mark.unit
@pytest.mark.parametrize("change_child_resource", [False, True])
@pytest.mark.parametrize("child2_model_from_path", [False, True])
def test_mock_model_nested_with_resources(self, change_child_resource: bool, child2_model_from_path: bool):
"""
Test nested model with 2 children: model and each child can be saved/restored separately
child models and parent model itself contain resources
Args:
change_child_resource: if change_child_resource is True,
child model resources are changed after instantiation parent model.
child2_model_from_path: if child2_model_from_path is True, child2 model is restored from .nemo checkpoint,
otherwise constructed directly from config. Child1 model always loaded from checkpoint.
"""
with tempfile.NamedTemporaryFile('w') as file_child1, tempfile.NamedTemporaryFile(
'w'
) as file_child2, tempfile.NamedTemporaryFile('w') as file_child2_other, tempfile.NamedTemporaryFile(
'w'
) as file_parent:
# write text data, use these files as resources
parent_data = ["*****\n"]
child1_data = ["+++++\n"]
child2_data = ["-----\n"]
child2_data_other = [".....\n"]
file_parent.writelines(parent_data)
file_parent.flush()
file_child1.writelines(child1_data)
file_child1.flush()
file_child2.writelines(child2_data)
file_child2.flush()
file_child2_other.writelines(child2_data_other)
file_child2_other.flush()
# construct child models with resources
# create configs
cfg_child1 = _mock_model_config()
cfg_child1.model.temp_file = file_child1.name
cfg_child2 = _mock_model_with_children_config() # no sub-children
cfg_child2.model.temp_file = file_child2.name
# create child models
child1 = MockModel(cfg=cfg_child1.model, trainer=None)
child1 = child1.to('cpu')
with tempfile.TemporaryDirectory() as tmpdir_parent:
parent_path = os.path.join(tmpdir_parent, "parent.nemo")
with tempfile.TemporaryDirectory() as tmpdir_child:
# save children
child1_path = os.path.join(tmpdir_child, 'child1.nemo')
child1.save_to(child1_path)
if child2_model_from_path:
child2 = MockModelWithChildren(cfg=cfg_child2.model, trainer=None)
child2 = child2.to('cpu')
child2_path = os.path.join(tmpdir_child, 'child2.nemo')
child2.save_to(child2_path)
# create model with children using saved .nemo model checkpoints
cfg_parent = _mock_model_with_children_config(
child1_model_path=child1_path, child2_model_path=child2_path
)
else:
# child 2 model will be directly constructed from subconfig
cfg_parent = _mock_model_with_children_config(
child1_model_path=child1_path,
child2_model_path=None,
child2_model_cfg=cfg_child2.get("model"),
)
cfg_parent.model.temp_file = file_parent.name # add resource
parent = MockModelWithChildren(cfg_parent.model)
if change_child_resource:
parent.child2_model.setup_data_from_file(file_child2_other.name)
parent.save_to(parent_path)
# restore, separate children checkpoints are not available here (tmpdir_child destroyed)
parent = ModelPT.restore_from(parent_path)
# check model is transparent, child models can be accessed and can be saved/restored separately
child1 = self.__test_restore_elsewhere(parent.child1_model, map_location='cpu')
child2 = self.__test_restore_elsewhere(parent.child2_model, map_location='cpu')
# test parent save/restore
parent = self.__test_restore_elsewhere(parent, map_location='cpu')
# test resources
# check separately restored child models
assert child1.temp_data == child1_data
if change_child_resource:
assert child2.temp_data == child2_data_other
else:
assert child2.temp_data == child2_data
# test parent model + child models
assert parent.temp_data == parent_data
assert parent.child1_model.temp_data == child1_data
if change_child_resource:
assert parent.child2_model.temp_data == child2_data_other
else:
assert parent.child2_model.temp_data == child2_data
@pytest.mark.unit
def test_mock_model_nested_with_resources_multiple_passes(self):
"""
Test nested model with 2 children: multiple save-restore passes
child models and parent model itself contain resources
"""
with tempfile.NamedTemporaryFile('w') as file_child1, tempfile.NamedTemporaryFile(
'w'
) as file_child2, tempfile.NamedTemporaryFile('w') as file_child2_other, tempfile.NamedTemporaryFile(
'w'
) as file_parent:
# write text data, use these files as resources
parent_data = ["*****\n"]
child1_data = ["+++++\n"]
child2_data = ["-----\n"]
child2_data_other = [".....\n"]
file_parent.writelines(parent_data)
file_parent.flush()
file_child1.writelines(child1_data)
file_child1.flush()
file_child2.writelines(child2_data)
file_child2.flush()
file_child2_other.writelines(child2_data_other)
file_child2_other.flush()
# construct child models with resources
# create configs
cfg_child1 = _mock_model_config()
cfg_child1.model.temp_file = file_child1.name
cfg_child2 = _mock_model_with_children_config() # no sub-children
cfg_child2.model.temp_file = file_child2.name
# create child models
child1 = MockModel(cfg=cfg_child1.model, trainer=None)
child1 = child1.to('cpu')
child2 = MockModelWithChildren(cfg=cfg_child2.model, trainer=None)
child2 = child2.to('cpu')
with tempfile.TemporaryDirectory() as tmpdir_parent1, tempfile.TemporaryDirectory() as tmpdir_parent2, tempfile.TemporaryDirectory() as tmpdir_parent3, tempfile.TemporaryDirectory() as tmpdir_parent4:
parent_path1 = os.path.join(tmpdir_parent1, "parent.nemo")
parent_path2 = os.path.join(tmpdir_parent2, "parent.nemo")
with tempfile.TemporaryDirectory() as tmpdir_child:
# save children
child1_path = os.path.join(tmpdir_child, 'child1.nemo')
child1.save_to(child1_path)
child2_path = os.path.join(tmpdir_child, 'child2.nemo')
child2.save_to(child2_path)
# create model with children using saved "nemo" checkpoints
cfg_parent = _mock_model_with_children_config(
child1_model_path=child1_path, child2_model_path=child2_path
)
cfg_parent.model.temp_file = file_parent.name # add resource
parent = MockModelWithChildren(cfg_parent.model)
# save-restore first pass
# save to different locations
parent.save_to(parent_path1)
parent.save_to(parent_path2)
# restore, separate children checkpoints are not available here (tmpdir_child destroyed)
parent1 = ModelPT.restore_from(parent_path1)
parent2 = ModelPT.restore_from(parent_path2)
# check resources
for parent in (parent1, parent2):
assert parent.temp_data == parent_data
assert parent.child1_model.temp_data == child1_data
assert parent.child2_model.temp_data == child2_data
del parent2 # use parent1 for second pass
# save-restore second pass
parent_path3 = os.path.join(tmpdir_parent3, "parent.nemo")
parent_path4 = os.path.join(tmpdir_parent4, "parent.nemo")
parent1.save_to(parent_path3)
parent1.save_to(parent_path4)
parent3 = ModelPT.restore_from(parent_path3)
parent4 = ModelPT.restore_from(parent_path4)
# check resources
for parent in (parent3, parent4):
assert parent.temp_data == parent_data
assert parent.child1_model.temp_data == child1_data
assert parent.child2_model.temp_data == child2_data
@pytest.mark.unit
def test_mock_model_nested_double_with_resources(self):
"""
test nested model: parent -> child_with_child -> child; model and each child can be saved/restored separately
all models can contain resources
"""
with tempfile.NamedTemporaryFile('w') as file_child, tempfile.NamedTemporaryFile(
'w'
) as file_child_with_child, tempfile.NamedTemporaryFile('w') as file_parent:
# write text data, use these files as resources
parent_data = ["*****\n"]
child_with_child_data = ["+++++\n"]
child_data = ["-----\n"]
file_parent.writelines(parent_data)
file_parent.flush()
file_child_with_child.writelines(child_with_child_data)
file_child_with_child.flush()
file_child.writelines(child_data)
file_child.flush()
# construct child model (leaf) with resource
cfg_child = _mock_model_config()
cfg_child.model.temp_file = file_child.name
child = MockModel(cfg=cfg_child.model, trainer=None)
child = child.to('cpu')
with tempfile.TemporaryDirectory() as tmpdir_parent:
parent_path = os.path.join(tmpdir_parent, "parent.nemo")
with tempfile.TemporaryDirectory() as tmpdir_child_with_child:
child_with_child_path = os.path.join(tmpdir_child_with_child, 'child_with_child.nemo')
with tempfile.TemporaryDirectory() as tmpdir_child:
# save child
child_path = os.path.join(tmpdir_child, 'child.nemo')
child.save_to(child_path)
# create child model with child
cfg_child_with_child = _mock_model_with_children_config(
child1_model_path=None, child2_model_path=child_path
)
cfg_child_with_child.model.temp_file = file_child_with_child.name
child_with_child = MockModelWithChildren(cfg_child_with_child.model)
child_with_child.save_to(child_with_child_path)
# create parent model with child-with-child, leaf checkpoint is not available here
cfg_parent = _mock_model_with_children_config(
child1_model_path=None, child2_model_path=child_with_child_path
)
cfg_parent.model.temp_file = file_parent.name
parent = MockModelWithChildren(cfg_parent.model)
parent.save_to(parent_path)
# restore, separate children checkpoints are not available here
# tmpdir_child, tmpdir_child_with_child are destroyed
parent = ModelPT.restore_from(parent_path)
# model is transparent, children and model itself can be saved/restored
child = self.__test_restore_elsewhere(parent.child2_model.child2_model, map_location='cpu')
child_with_child = self.__test_restore_elsewhere(parent.child2_model, map_location='cpu')
parent = self.__test_restore_elsewhere(parent, map_location='cpu')
# test resources for all restored models
# leaf model
assert child.temp_data == child_data
# child with child
assert child_with_child.temp_data == child_with_child_data
assert child_with_child.child2_model.temp_data == child_data
# parent
assert parent.temp_data == parent_data
assert parent.child2_model.temp_data == child_with_child_data
assert parent.child2_model.child2_model.temp_data == child_data
# check named_nemo_modules: parent -> child2 -> child2.child2,
# tuples of (attribute_path, cfg_path, module)
named_nemo_modules = list(parent.named_nemo_modules())
etalon_nemo_modules = [
("", "", parent),
("child2_model", "child2_model", parent.child2_model),
("child2_model.child2_model", "child2_model.child2_model", parent.child2_model.child2_model),
]
assert len(named_nemo_modules) == len(etalon_nemo_modules)
for etalon, actual in zip(etalon_nemo_modules, named_nemo_modules):
assert etalon[0] == actual[0]
assert etalon[1] == actual[1]
assert etalon[2] is actual[2]
@pytest.mark.unit
@pytest.mark.with_downloads
def test_mock_model_nested_child_from_pretrained(self):
"""
Test nested model with child initialized from pretrained model
"""
cfg = _mock_model_with_child_encdecctcbpe_config("stt_en_conformer_ctc_small")
parent = MockModelWithChildEncDecCTCBPE(cfg=cfg.model, trainer=None)
with tempfile.TemporaryDirectory() as tmpdir_parent:
parent_path = os.path.join(tmpdir_parent, "parent.nemo")
# save, then restore
parent.save_to(parent_path)
parent = ModelPT.restore_from(parent_path)
# test child can be saved/restored
_ = self.__test_restore_elsewhere(parent.ctc_model, map_location='cpu')
# test parent can be saved/restored
parent = self.__test_restore_elsewhere(parent, map_location='cpu')
assert isinstance(parent.ctc_model, EncDecCTCModel)
@pytest.mark.unit
def test_mock_model_nested_custom_config_field(self):
"""
Test nested model with custom config field not equal to attribute name
Config is stored in `child1_model_config`
Child model is stored in `child1_model` attribute
"""
with tempfile.NamedTemporaryFile('w') as file_child1, tempfile.NamedTemporaryFile('w') as file_parent:
# write text data, use these files as resources
parent_data = ["*****\n"]
child1_data = ["+++++\n"]
file_parent.writelines(parent_data)
file_parent.flush()
file_child1.writelines(child1_data)
file_child1.flush()
cfg = _mock_model_with_child_custom_config_path_config()
cfg.model.temp_file = file_parent.name
cfg.model.child1_model_config.temp_file = file_child1.name
# construct parent model
parent = MockModelWithChildCustomConfigPath(cfg=cfg.model, trainer=None)
with tempfile.TemporaryDirectory() as tmpdir_parent:
parent_path = os.path.join(tmpdir_parent, "parent.nemo")
# save, then restore
parent.save_to(parent_path)
parent = ModelPT.restore_from(parent_path)
# test child can be saved/restored
_ = self.__test_restore_elsewhere(parent.child1_model, map_location='cpu')
# test parent can be saved/restored
parent = self.__test_restore_elsewhere(parent, map_location='cpu')
# check data
assert parent.temp_data == parent_data
assert parent.child1_model.temp_data == child1_data
# check named_nemo_modules: parent -> child, tuples of (attribute_path, cfg_path, module)
named_nemo_modules = list(parent.named_nemo_modules())
etalon_nemo_modules = [("", "", parent), ("child1_model", "child1_model_config", parent.child1_model)]
assert len(named_nemo_modules) == len(etalon_nemo_modules)
for etalon, actual in zip(etalon_nemo_modules, named_nemo_modules):
assert etalon[0] == actual[0]
assert etalon[1] == actual[1]
assert etalon[2] is actual[2]
@pytest.mark.unit
def test_using_nemo_checkpoint_as_artifact_disallowed(self):
"""
Test that using nemo checkpoint as artifact is disallowed
"""
cfg_child = _mock_model_config()
child = MockModel(cfg=cfg_child.model, trainer=None).to("cpu")
with tempfile.TemporaryDirectory() as tmpdir:
child_path = os.path.join(tmpdir, "child.nemo")
child.save_to(child_path)
cfg_parent = _mock_model_incorrect_with_nemo_artifact_config(child_path)
with pytest.raises(NeMoBaseException):
# registering .nemo checkpoint as an artifact is not allowed
_ = MockModelIncorrectWithNemoArtifact(cfg=cfg_parent.model, trainer=None)
@pytest.mark.unit
def test_restore_from_save_restore_connector_extracted_dir(self):
class MySaveRestoreConnector(save_restore_connector.SaveRestoreConnector):
def save_to(self, model, save_path: str):
save_path = save_path.replace(".nemo", "_XYZ.nemo")
super().save_to(model, save_path)
class MockModelV2(MockModel):
pass
with tempfile.TemporaryDirectory() as extracted_tempdir:
with tempfile.TemporaryDirectory() as tmpdir:
# Update config
cfg = _mock_model_config()
# Create model
save_path = os.path.join(tmpdir, 'save_custom.nemo')
model_with_custom_connector = MockModel(cfg=cfg.model, trainer=None)
model_with_custom_connector._save_restore_connector = MySaveRestoreConnector()
model_with_custom_connector.save_to(save_path)
nemo_filepath = os.path.join(tmpdir, 'save_custom_XYZ.nemo')
assert os.path.exists(nemo_filepath)
# extract the contents to this dir apriori
# simulate by extracting now before calling restore_from
connector = MySaveRestoreConnector()
MySaveRestoreConnector._unpack_nemo_file(nemo_filepath, extracted_tempdir)
assert get_size(extracted_tempdir) > 0
# delete the old directory and preserve only the new extracted directory (escape scope of old dir)
# next, set the model's extracted directory path
connector.model_extracted_dir = extracted_tempdir
# note, we pass in the "old" nemo_filepath, stored somewhere other than the extracted directory
# this nemo_filepath is no longer valid, and has been deleted.
restored_model = MockModelV2.restore_from(nemo_filepath, save_restore_connector=connector)
assert type(restored_model) == MockModelV2
assert type(restored_model._save_restore_connector) == MySaveRestoreConnector
# assert models have correct restoration information and paths
appstate = AppState()
original_metadata = appstate.get_model_metadata_from_guid(model_with_custom_connector.model_guid)
assert original_metadata.restoration_path is None
restored_metadata = appstate.get_model_metadata_from_guid(restored_model.model_guid)
assert restored_metadata.restoration_path is not None
# assert that the restore path was the path of the pre-extracted directory
# irrespective of whether an old `nemo_filepath` (which doesnt exist anymore) was passed to restore_from.
assert extracted_tempdir in restored_metadata.restoration_path
assert extracted_tempdir not in nemo_filepath
assert not os.path.exists(nemo_filepath)
# test for parameter equality
model_with_custom_connector = model_with_custom_connector.to('cpu')
restored_model = restored_model.to('cpu')
original_state_dict = model_with_custom_connector.state_dict()
restored_state_dict = restored_model.state_dict()
for orig, restored in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[orig] - restored_state_dict[restored]).abs().mean() < 1e-6
@pytest.mark.unit
def test_hf_model_filter(self):
filt = ModelPT.get_hf_model_filter()
assert isinstance(filt, ModelFilter)
assert filt.library == 'nemo'
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_hf_model_info(self):
filt = ModelPT.get_hf_model_filter()
# check no override results
model_infos = ModelPT.search_huggingface_models(model_filter=None)
assert len(model_infos) > 0
# check with default override results (should match above)
default_model_infos = ModelPT.search_huggingface_models(model_filter=filt)
assert len(model_infos) == len(default_model_infos)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_hf_model_info_with_card_data(self):
filt = ModelPT.get_hf_model_filter()
# check no override results
model_infos = ModelPT.search_huggingface_models(model_filter=filt)
assert len(model_infos) > 0
assert not hasattr(model_infos[0], 'cardData')
# check overriden defaults
filt.resolve_card_info = True
model_infos = ModelPT.search_huggingface_models(model_filter=filt)
assert len(model_infos) > 0
for info in model_infos:
if hasattr(info, 'cardData'):
assert info.cardData is not None
break
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_hf_model_info_with_limited_results(self):
filt = ModelPT.get_hf_model_filter()
# check no override results
model_infos = ModelPT.search_huggingface_models(model_filter=filt)
assert len(model_infos) > 0
# check overriden defaults
filt.limit_results = 5
new_model_infos = ModelPT.search_huggingface_models(model_filter=filt)
assert len(new_model_infos) <= 5
assert len(new_model_infos) < len(model_infos)
| NeMo-main | tests/core/test_save_restore.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, NamedTuple
import pytest
import torch
from nemo.core import Typing, typecheck
from nemo.core.neural_types import *
# Perform recursive shape assert
def recursive_assert_shape(x, shape):
if isinstance(x, list) or isinstance(x, tuple):
for xi in x:
recursive_assert_shape(xi, shape)
return
assert x.shape == shape
# Perform recursive type assert
def recursive_assert_homogeneous_type(x, type_val):
if isinstance(x, list) or isinstance(x, tuple):
for xi in x:
recursive_assert_homogeneous_type(xi, type_val)
return
assert x.neural_type.compare(type_val) == NeuralTypeComparisonResult.SAME
class TestNeuralTypeCheckSystem:
@pytest.mark.unit
def test_no_types_passthrough(self):
class NoTypes(Typing):
@typecheck()
def __call__(self, x):
return torch.tensor(1.0)
obj = NoTypes()
result = obj(torch.tensor(1.0))
assert result == torch.tensor(1.0)
assert not hasattr(result, 'neural_type')
@pytest.mark.unit
def test_input_output_types(self):
class InputOutputTypes(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = InputOutputTypes()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert result.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
# Test passing wrong key for input
with pytest.raises(TypeError):
_ = obj(a=torch.zeros(10))
# Test using positional args
with pytest.raises(TypeError):
_ = obj(torch.zeros(10))
@pytest.mark.unit
def test_input_types_only(self):
class InputTypes(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = InputTypes()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert hasattr(result, 'neural_type') is False
@pytest.mark.unit
def test_multiple_input_types_only(self):
class InputTypes(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType()), "y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x, y):
x += y
return x
obj = InputTypes()
result = obj(x=torch.zeros(10), y=torch.ones(10))
assert result.sum() == torch.tensor(10.0)
assert hasattr(result, 'neural_type') is False
@pytest.mark.unit
def test_output_types_only(self):
class OutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = OutputTypes()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert result.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
# Test passing positional args
# Positional args allowed if input types is not set !
result = obj(torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
@pytest.mark.unit
def test_multiple_output_types_only(self):
class MultipleOutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType()), "z": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
y = x + 1
z = x + 2
return y, z
obj = MultipleOutputTypes()
result_y, result_z = obj(x=torch.zeros(10))
assert result_y.sum() == torch.tensor(10.0)
assert result_y.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result_z.sum() == torch.tensor(20.0)
assert result_z.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_multiple_output_types_only_namedtuple(self):
class NamedTupleOutputType(NamedTuple):
y: torch.Tensor
z: torch.Tensor
class MultipleOutputTypesWithNamedTuple(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType()), "z": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
y = x + 1
z = x + 2
return NamedTupleOutputType(y=y, z=z)
obj = MultipleOutputTypesWithNamedTuple()
result = obj(x=torch.zeros(10))
assert result.y.sum() == torch.tensor(10.0)
assert result.y.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result.z.sum() == torch.tensor(20.0)
assert result.z.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_multiple_mixed_output_types_only(self):
class MultipleMixedOutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType()), "z": [NeuralType(('B',), ElementType())]}
@typecheck()
def __call__(self, x):
y = x + 1
z = x + 2
return y, [z, z]
obj = MultipleMixedOutputTypes()
result_y, result_z = obj(x=torch.zeros(10))
assert result_y.sum() == torch.tensor(10.0)
assert result_y.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result_z[0].sum() == torch.tensor(20.0)
assert result_z[0].neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result_z[1].sum() == torch.tensor(20.0)
assert result_z[1].neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_multiple_mixed_output_types_only_namedtuple(self):
class NamedTupleOutputType(NamedTuple):
y: torch.Tensor
zs: List[torch.Tensor]
class MultipleMixedOutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType()), "zs": [NeuralType(('B',), ElementType())]}
@typecheck()
def __call__(self, x):
y = x + 1
z = x + 2
return NamedTupleOutputType(y=y, zs=[z, z])
obj = MultipleMixedOutputTypes()
result_y, result_z = obj(x=torch.zeros(10))
assert result_y.sum() == torch.tensor(10.0)
assert result_y.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result_z[0].sum() == torch.tensor(20.0)
assert result_z[0].neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert result_z[1].sum() == torch.tensor(20.0)
assert result_z[1].neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_multiple_mixed_output_types_only_mismatched(self):
class MultipleMixedOutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType()), "z": [NeuralType(('B',), ElementType())]}
@typecheck()
def __call__(self, x):
# Use list of y, single z, contrary to signature
y = x + 1
z = x + 2
return [y, y], z
obj = MultipleMixedOutputTypes()
with pytest.raises(TypeError):
result_y, result_z = obj(x=torch.zeros(10))
@pytest.mark.unit
def test_multiple_mixed_output_types_only_namedtuple_mismatched(self):
class NamedTupleOutputType(NamedTuple):
ys: List[torch.Tensor]
z: torch.Tensor
class MultipleMixedOutputTypes(Typing):
@property
def output_types(self):
return {"ys": NeuralType(('B',), ElementType()), "z": [NeuralType(('B',), ElementType())]}
@typecheck()
def __call__(self, x):
# Use list of y, single z, contrary to signature
y = x + 1
z = x + 2
return NamedTupleOutputType(ys=[y, y], z=z)
obj = MultipleMixedOutputTypes()
with pytest.raises(TypeError):
_ = obj(x=torch.zeros(10))
@pytest.mark.unit
def test_incorrect_inheritance(self):
class IncorrectInheritance(object):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = IncorrectInheritance()
with pytest.raises(RuntimeError):
_ = obj(x=torch.zeros(10))
@pytest.mark.unit
def test_port_definition_rejection(self):
class InputPortDefinitionRejection(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"w": NeuralType(('B',), ElementType()), "u": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x, y):
x += 1
y -= 1
return x, y
# Test input port mismatch
obj = InputPortDefinitionRejection()
with pytest.raises(TypeError):
_ = obj(x=torch.zeros(10), y=torch.zeros(10))
class OutputPortDefinitionRejection(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {
"w": NeuralType(('B',), ElementType()),
}
@typecheck()
def __call__(self, x):
return x + 1, x - 1
obj = OutputPortDefinitionRejection()
with pytest.raises(TypeError):
_ = obj(x=torch.zeros(10))
@pytest.mark.unit
def test_port_shape_rejection(self):
class InputPortShapeRejection(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B', 'T'), ElementType())} # expect rank 2 matrix
@property
def output_types(self):
return {"w": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
# Test input port mismatch
obj = InputPortShapeRejection()
with pytest.raises(TypeError):
_ = obj(x=torch.zeros(10))
class OutputPortShapeRejection(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {
"w": NeuralType(('B', 'T', 'D'), ElementType()), # expect rank 3 matrix
}
@typecheck()
def __call__(self, x):
return x + 1
obj = OutputPortShapeRejection()
with pytest.raises(TypeError):
_ = obj(x=torch.zeros(10))
@pytest.mark.unit
def test_positional_args(self):
# Test positional check on input type
class InputPositional(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = InputPositional()
with pytest.raises(TypeError):
_ = obj(torch.zeros(10))
# Test positional pass-through for only output ports defined
# NOTE: This is required behaviour to support type checking of NeMo Dataset class
# during collate_fn() call.
class OutputPositionalPassthrough(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
obj = OutputPositionalPassthrough()
result = obj(torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
@pytest.mark.unit
def test_optional_types(self):
class InputOptionalTypes(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType()), "y": NeuralType(('B',), ElementType(), optional=True)}
@typecheck()
def __call__(self, x, y=None):
if y is None:
x += 1
else:
x += y
return x
obj = InputOptionalTypes()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert hasattr(result, 'neural_type') is False
result2 = obj(x=torch.zeros(10), y=torch.full([10], fill_value=5, dtype=torch.int32))
assert result2.sum() == torch.tensor(10 * 5)
assert hasattr(result, 'neural_type') is False
@pytest.mark.unit
def test_input_output_neural_types(self):
class NodeA(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": NeuralType(('B', 'D'), LogitsType())}
@typecheck()
def __call__(self, x):
y = torch.randn(x.shape[0], 4)
return y
class NodeB(Typing):
@property
def input_types(self):
return {"w": NeuralType(('B', 'D'), LogitsType())}
@property
def output_types(self):
return {"u": NeuralType(('B',), LabelsType())}
@typecheck()
def __call__(self, w):
_, u = w.max(-1)
return u
nodeA = NodeA()
nodeB = NodeB()
outA = nodeA(x=torch.zeros(10))
outB = nodeB(w=outA)
assert outB.shape == torch.Size([10])
assert outB.neural_type.compare(NeuralType(('B',), LabelsType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_nested_input_output_neural_types(self):
class NestedNodeA(Typing):
@property
def input_types(self):
return {"x": [[NeuralType(('B',), ElementType())]]}
@property
def output_types(self):
return {
"y0": [NeuralType(('B', 'D'), LogitsType())],
"y1": [NeuralType(('B', 'D'), LogitsType())],
}
@typecheck(ignore_collections=False)
def __call__(self, x):
# input x = [[x1, x2], [x3]]
x0 = x[0][0]
y = torch.randn(x0.shape[0], 4)
# Output is same as
# 1) return ([y, y], [y])
# 2) return [y, y], [y]
return [[y, y], [y]]
# Non-homogeneous output types
class NestedNodeB(Typing):
@property
def input_types(self):
return {"w": [[NeuralType(('B', 'D'), LogitsType())]]}
@property
def output_types(self):
return {
"u0": [NeuralType(('B',), LogprobsType())], # check non homogeneous type
"u1": [NeuralType(('B',), LabelsType())],
}
@typecheck(ignore_collections=False)
def __call__(self, w):
# input x = [[x1, x2], [x3]]
_, u00 = w[0][0].max(-1)
_, u01 = w[0][1].max(-1)
_, u10 = w[1][0].max(-1)
# Output is same as
# 1) return ([u00, u01], [u10])
# 2) return [u00, u01], [u10]
return [[u00, u01], [u10]]
nodeA = NestedNodeA()
nodeB = NestedNodeB()
input_nest = [[torch.zeros(10), torch.zeros(10)], [torch.zeros(10)]]
outA = nodeA(x=input_nest)
outB = nodeB(w=outA)
# Perform recursive shape assert
recursive_assert_shape(outB, torch.Size([10]))
# Assert non-homogeneous type assertions
assert outB[0][0].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert outB[0][1].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert outB[1][0].neural_type.compare(NeuralType(('B',), LabelsType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_nested_input_output_neural_types_ignore_collections(self):
class NestedNodeA(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {
"y0": NeuralType(('B', 'D'), LogitsType()),
"y1": NeuralType(('B', 'D'), LogitsType()),
}
@typecheck(ignore_collections=True)
def __call__(self, x):
# input x = [[x1, x2], [x3]]
x0 = x[0][0]
y = torch.randn(x0.shape[0], 4)
return [[y, y], [y]]
# Non-homogeneous output types
class NestedNodeB(Typing):
@property
def input_types(self):
return {"w": NeuralType(('B', 'D'), LogitsType())}
@property
def output_types(self):
return {
"u0": NeuralType(('B',), LogprobsType()), # check non homogeneous type
"u1": NeuralType(('B',), LabelsType()),
}
@typecheck(ignore_collections=True)
def __call__(self, w):
# input x = [[x1, x2], [x3]]
_, u00 = w[0][0].max(-1)
_, u01 = w[0][1].max(-1)
_, u10 = w[1][0].max(-1)
return [[u00, u01], [u10]]
nodeA = NestedNodeA()
nodeB = NestedNodeB()
input_nest = [[torch.zeros(10), torch.zeros(10)], [torch.zeros(10)]]
outA = nodeA(x=input_nest)
outB = nodeB(w=outA)
# Perform recursive shape assert
recursive_assert_shape(outB, torch.Size([10]))
# Assert non-homogeneous type assertions
assert outB[0][0].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert outB[0][1].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert outB[1][0].neural_type.compare(NeuralType(('B',), LabelsType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_nested_mixed_input_output_neural_types(self):
class NestedMixedNodeA(Typing):
@property
def input_types(self):
return {"x1": NeuralType(('B',), ElementType()), "x2": [[NeuralType(('B',), ElementType())]]}
@property
def output_types(self):
return {
"y0": NeuralType(('B', 'D'), LogprobsType()),
"y1": [[NeuralType(('B', 'D'), LogitsType())]],
}
@typecheck(ignore_collections=False)
def __call__(self, x1, x2):
# input x = [[x1, x2], [x3]]
x0 = x2[0][0]
y = torch.randn(x0.shape[0], 4)
return y, [[y, y], [y]]
# Non-homogeneous output types
class NestedMixedNodeB(Typing):
@property
def input_types(self):
return {"w": [[NeuralType(('B', 'D'), LogitsType())]]}
@property
def output_types(self):
return {
"u0": [NeuralType(('B',), LogprobsType())], # check non homogeneous type
"u1": NeuralType(('B',), LabelsType()),
}
@typecheck(ignore_collections=False)
def __call__(self, w):
# input x = [[x1, x2], [x3]]
_, u00 = w[0][0].max(-1)
_, u01 = w[0][1].max(-1)
_, u10 = w[1][0].max(-1)
return [u00, u01], u10
nodeA = NestedMixedNodeA()
nodeB = NestedMixedNodeB()
input_nest = [[torch.zeros(10), torch.zeros(10)], [torch.zeros(10)]]
out_y, outA = nodeA(x1=torch.zeros(10), x2=input_nest)
outB, out_u = nodeB(w=outA)
# Perform recursive shape assert
assert out_y.neural_type.compare(NeuralType(('B', 'D'), LogprobsType()))
recursive_assert_shape(outB, torch.Size([10]))
# Assert non-homogeneous type assertions
assert outB[0].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert outB[1].neural_type.compare(NeuralType(('B',), LogprobsType())) == NeuralTypeComparisonResult.SAME
assert out_u.neural_type.compare(NeuralType(('B',), LabelsType())) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_multi_forward_type(self):
class AdaptiveTypeCheck(Typing):
@property
def input_types(self):
if self.mode == 'train':
return {"x": NeuralType(('B',), ElementType())}
elif self.mode == 'infer':
return {"y": NeuralType(('B',), ChannelType())}
elif self.mode == 'eval':
return {"x": NeuralType(('B',), ElementType()), "y": NeuralType(('B',), ChannelType())}
else:
raise ValueError("Wrong mode of operation")
@property
def output_types(self):
if self.mode == 'train':
return {"u": NeuralType(('B',), ElementType())}
elif self.mode == 'infer':
return {"v": NeuralType(('B',), ChannelType())}
elif self.mode == 'eval':
return {"u": NeuralType(('B',), ElementType()), "v": NeuralType(('B',), ChannelType())}
else:
raise ValueError("Wrong mode of operation")
def __init__(self):
self.mode = 'train'
def __call__(self, **kwargs):
# Call should call and forward appropriate method in its own mode
if self.mode == 'train':
return self.train_forward(x=kwargs['x'])
elif self.mode == 'eval':
return self.eval_forward(x=kwargs['x'], y=kwargs['y'])
elif self.mode == 'infer':
return self.infer_forward(y=kwargs['y'])
@typecheck()
def train_forward(self, x):
return x + 10
@typecheck()
def eval_forward(self, x, y):
return x - 1, y - 1
@typecheck()
def infer_forward(self, y):
return y - 10
@property
def mode(self):
return self._mode
@mode.setter
def mode(self, val):
if val not in ['train', 'infer', 'eval']:
raise ValueError('mode must be either train infer or eval')
self._mode = val
obj = AdaptiveTypeCheck()
x = torch.zeros(10)
y = torch.full([10], fill_value=5, dtype=torch.int32)
obj.mode = 'train'
x = obj(x=x)
assert torch.all(x == 10)
assert x.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
obj.mode = 'eval'
x, y = obj(x=x, y=y)
assert torch.all(x == 9)
assert torch.all(y == 4)
assert x.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert y.neural_type.compare(NeuralType(('B',), ChannelType())) == NeuralTypeComparisonResult.SAME
obj.mode = 'infer'
y = obj(y=y)
assert torch.all(y == -6)
assert y.neural_type.compare(NeuralType(('B',), ChannelType())) == NeuralTypeComparisonResult.SAME
# Now perform assertions of wrong mode with wrong input combinations
obj.mode = 'train'
# In train mode, call infer
with pytest.raises(TypeError):
_ = obj.eval_forward(x=x, y=y)
with pytest.raises(TypeError):
# wrong input + wrong mode
_ = obj.infer_forward(y=x)
@pytest.mark.unit
def test_input_type_override(self):
class InputTypesOverride(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
@typecheck(input_types={"y": NeuralType(('B',), CategoricalValuesType())})
def forward(self, y):
y -= 1
return y
obj = InputTypesOverride()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert hasattr(result, 'neural_type') is False
# Test override
result2 = obj.forward(y=torch.zeros(10))
assert result2.sum() == torch.tensor(-10.0)
assert hasattr(result2, 'neural_type') is False
@pytest.mark.unit
def test_output_type_override(self):
class OutputTypes(Typing):
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x):
x += 1
return x
@typecheck(output_types={"z": NeuralType(('B',), CategoricalValuesType())})
def forward(self, z):
z -= 1
return z
obj = OutputTypes()
result = obj(x=torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert result.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
# Test passing positional args
# Positional args allowed if input types is not set !
result = obj(torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
# Test override
result2 = obj.forward(z=torch.zeros(10))
assert result2.sum() == torch.tensor(-10.0)
assert hasattr(result2, 'neural_type')
assert (
result2.neural_type.compare(NeuralType(('B',), CategoricalValuesType())) == NeuralTypeComparisonResult.SAME
)
@pytest.mark.unit
def test_multi_type_override(self):
class AdaptiveTypeCheck(Typing):
@property
def input_types(self):
# __call__ assumed to be for inference only,
# therefore infer types checked at class scope
return {"y": NeuralType(('B',), ChannelType())}
@property
def output_types(self):
# __call__ assumed to be for inference only,
# therefore infer types checked at class scope
return {"v": NeuralType(('B',), ChannelType())}
def __call__(self, **kwargs):
# Call should call and forward appropriate method in its own mode
# Let default "forward" call be the infer mode (this is upto developer)
# Therefore default class level types == infer types
return self.infer_forward(y=kwargs['y'])
@typecheck(
input_types={"x": NeuralType(('B',), ElementType())},
output_types={"u": NeuralType(('B',), ElementType())},
)
def train_forward(self, x):
return x + 10
@typecheck(
input_types={"x": NeuralType(('B',), ElementType()), "y": NeuralType(('B',), ChannelType())},
output_types={"u": NeuralType(('B',), ElementType()), "v": NeuralType(('B',), ChannelType())},
)
def eval_forward(self, x, y):
return x - 1, y - 1
@typecheck(
input_types={"y": NeuralType(('B',), ChannelType())},
output_types={"v": NeuralType(('B',), ChannelType())},
)
def infer_forward(self, y):
return y - 10
obj = AdaptiveTypeCheck()
x = torch.zeros(10)
y = torch.full([10], fill_value=5, dtype=torch.int32)
# infer mode
y = obj(y=y)
assert torch.all(y == -5)
assert y.neural_type.compare(NeuralType(('B',), ChannelType())) == NeuralTypeComparisonResult.SAME
x, y = obj.eval_forward(x=x, y=y)
assert torch.all(x == -1)
assert torch.all(y == -6)
assert x.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
assert y.neural_type.compare(NeuralType(('B',), ChannelType())) == NeuralTypeComparisonResult.SAME
x = obj.train_forward(x=x)
assert torch.all(x == 9)
assert x.neural_type.compare(NeuralType(('B',), ElementType())) == NeuralTypeComparisonResult.SAME
# In train func, call eval signature
with pytest.raises(TypeError):
_ = obj.train_forward(x=x, y=y)
with pytest.raises(TypeError):
# wrong input + wrong mode
_ = obj.infer_forward(x=x)
@pytest.mark.unit
def test_disable_typecheck(self):
class InputOutputTypes(Typing):
@property
def input_types(self):
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": NeuralType(('B',), ElementType())}
@typecheck()
def __call__(self, x, **kwargs):
x += 1
return x
# Disable typecheck tests
with typecheck.disable_checks():
obj = InputOutputTypes()
# Execute function without kwarg
result = obj(torch.zeros(10))
assert result.sum() == torch.tensor(10.0)
assert hasattr(result, 'neural_type') is False
# Test passing wrong key for input
_ = obj(a=torch.zeros(10), x=torch.zeros(5))
@pytest.mark.unit
def test_nested_shape_mismatch(self):
class NestedShapeMismatch(Typing):
@property
def input_types(self):
return {"x": [[NeuralType(('D',), ElementType())]]} # Each element of nest will have 4 values
@property
def output_types(self):
return {"y": [[NeuralType(('D',), ElementType())]]} # Each element of nest will have 4 values
@typecheck()
def __call__(self, x):
# v-- this is to satisfy 1 output constraint, python will otherwise interpret x as a 3 output value
return x
def bb(dim=4):
return torch.zeros(dim)
obj = NestedShapeMismatch()
# Arbitrary nest 1 (should pass)
data = [[bb(), bb(), bb()], [bb()], [bb(), bb()]]
result = obj(x=data)
recursive_assert_shape(result, torch.Size([4]))
recursive_assert_homogeneous_type(result, NeuralType(('D',), ElementType()))
# Arbitrary nest 2 (should pass)
def bb(dim=4):
return torch.zeros(dim, dim)
data = [[bb(), bb(), bb()], [bb()], [bb(), bb()]]
# Fails since input shape is incorrect
with pytest.raises(TypeError):
_ = obj(x=data)
# Arbitrary nest 3
def bb(dim=4):
return torch.zeros(dim)
data = [[[bb(), bb(), bb()]], [[bb()], [bb(), bb()]]]
# Check should fail since nest level is 3!
with pytest.raises(TypeError):
result = obj(x=data)
@pytest.mark.unit
def test_nested_mixed_shape_mismatch(self):
class NestedMixedShapeMismatch(Typing):
@property
def input_types(self):
return {"x": [[NeuralType(('D',), ElementType())]]} # Each element of nest will have 4 values
@property
def output_types(self):
return {"y": [NeuralType(('D',), ElementType())]} # Each element of nest will have 4 values
@typecheck()
def __call__(self, x):
# v-- this is to satisfy 1 output constraint, python will otherwise interpret x as a 3 output value
x = x[0]
return x
def bb(dim=4):
return torch.zeros(dim)
obj = NestedMixedShapeMismatch()
# Arbitrary nest 1 (should pass)
data = [[bb(), bb(), bb()], [bb()], [bb(), bb()]]
result = obj(x=data)
recursive_assert_shape(result, torch.Size([4]))
recursive_assert_homogeneous_type(result, NeuralType(('D',), ElementType()))
# Arbitrary nest 2 (should pass)
def bb(dim=4):
return torch.zeros(dim, dim)
data = [[bb(), bb(), bb()], [bb()], [bb(), bb()]]
# Fails since input shape is incorrect
with pytest.raises(TypeError):
_ = obj(x=data)
# Arbitrary nest 3
def bb(dim=4):
return torch.zeros(dim)
data = [[[bb(), bb(), bb()]], [[bb()], [bb(), bb()]]]
# Check should fail since nest level is 3!
with pytest.raises(TypeError):
result = obj(x=data)
@pytest.mark.unit
def test_input_container_neural_types(self):
class NodeA(Typing):
@property
def input_types(self):
return {"x": [NeuralType(('B',), ElementType())]}
@property
def output_types(self):
return {"y": NeuralType(('B', 'D'), LogitsType())}
@typecheck()
def __call__(self, x: list()):
x1, x2, x3 = x # unpack x
y = torch.randn(x1.shape[0], 4)
return y
nodeA = NodeA()
outA = nodeA(x=[torch.zeros(10), torch.zeros(10), torch.zeros(10)])
assert outA.neural_type.compare(NeuralType(('B', 'D'), LogitsType()))
@pytest.mark.unit
def test_input_container_neural_types_incorrect(self):
class NodeA(Typing):
@property
def input_types(self):
# Nest depth level of 2
return {"x": [[NeuralType(('B',), ElementType())]]}
@property
def output_types(self):
return {"y": NeuralType(('B', 'D'), LogitsType())}
@typecheck()
def __call__(self, x: list()):
x1, x2, x3 = x # unpack x
y = torch.randn(x1.shape[0], 4)
return y
nodeA = NodeA()
# Input nest level of 1
with pytest.raises(TypeError):
outA = nodeA(x=[torch.zeros(10), torch.zeros(10), torch.zeros(10)])
@pytest.mark.unit
def test_output_container_neural_types_incorrect(self):
class NodeA(Typing):
@property
def input_types(self):
# Nest depth level of 2
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": [[NeuralType(('B', 'D'), LogitsType())]]}
@typecheck()
def __call__(self, x):
y = torch.randn(x.shape[0], 4)
return y, y, y
nodeA = NodeA()
# Input nest level of 1
with pytest.raises(TypeError):
outA = nodeA(x=torch.zeros(10))
@pytest.mark.unit
def test_output_container_neural_types_no_tuple_wrap(self):
class NodeA(Typing):
@property
def input_types(self):
# Nest depth level of 2
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": [NeuralType(('B', 'D'), LogitsType())]}
@typecheck()
def __call__(self, x):
y = torch.randn(x.shape[0], 4)
y = [y, y, y]
return y
nodeA = NodeA()
# Input nest level of 1
outA = nodeA(x=torch.zeros(10))
assert len(outA) == 3
for i in range(len(outA)):
assert outA[i].neural_type.compare(NeuralType(('B', 'D'), LogitsType()))
@pytest.mark.unit
def test_output_container_neural_types_explicit_tuple_wrap(self):
class NodeA(Typing):
@property
def input_types(self):
# Nest depth level of 2
return {"x": NeuralType(('B',), ElementType())}
@property
def output_types(self):
return {"y": [NeuralType(('B', 'D'), LogitsType())]}
@typecheck()
def __call__(self, x):
y = torch.randn(x.shape[0], 4)
y = [y, y, y]
return (y,)
nodeA = NodeA()
# Input nest level of 1
outA = nodeA(x=torch.zeros(10))
assert len(outA) == 1
assert len(outA[0]) == 3
for i in range(len(outA)):
assert outA[0][i].neural_type.compare(NeuralType(('B', 'D'), LogitsType()))
| NeMo-main | tests/core/test_typecheck.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import os
import re
from pathlib import Path
from typing import Any
import pytest
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from omegaconf.errors import OmegaConfBaseException
from pytorch_lightning import Callback
from pytorch_lightning.loops import _TrainingEpochLoop
from nemo.constants import NEMO_ENV_VARNAME_VERSION
from nemo.core.classes import ModelPT
from nemo.utils.exp_manager import (
CheckpointMisconfigurationError,
LoggerMisconfigurationError,
NotFoundError,
exp_manager,
)
class MyTestOptimizer(torch.optim.Optimizer):
def __init__(self, params):
self._step = 0
super().__init__(params, {})
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if self._step == 0:
p.data = 0.1 * torch.ones(p.shape)
elif self._step == 1:
p.data = 0.0 * torch.ones(p.shape)
else:
p.data = 0.01 * torch.ones(p.shape)
self._step += 1
return loss
class DoNothingOptimizer(torch.optim.Optimizer):
def __init__(self, params):
self._step = 0
super().__init__(params, {})
@torch.no_grad()
def step(self, closure=None):
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
self._step += 1
return loss
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg)
pl.seed_everything(1234)
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
dataset = OnesDataset(2)
return torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=8)
def val_dataloader(self):
dataset = OnesDataset(10)
return torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=8)
def forward(self, batch):
output = self.l1(batch)
output = torch.nn.functional.l1_loss(output, torch.zeros(output.size()).to(output.device))
return output
def validation_step(self, batch, batch_idx):
self.loss = self(batch)
return self.loss
def training_step(self, batch, batch_idx):
return self(batch)
def configure_optimizers(self):
return MyTestOptimizer(self.parameters())
# return torch.optim.Adam(self.parameters(), lr=0.1)
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def on_validation_epoch_end(self):
self.log("val_loss", torch.stack([self.loss]).mean())
class DoNothingModel(ExampleModel):
def configure_optimizers(self):
return DoNothingOptimizer(self.parameters())
class TestExpManager:
@pytest.mark.unit
def test_omegaconf(self):
"""Ensure omegaconf raises an error when an unexcepted argument is passed"""
with pytest.raises(OmegaConfBaseException):
exp_manager(pl.Trainer(accelerator='cpu'), {"unused": 1})
@pytest.mark.unit
def test_trainer_loggers(self, tmp_path):
""" Test that a trainer with logger errors out with a number of arguments. Test that it works with
create_tensorboard_logger set to False
"""
test_trainer = pl.Trainer(accelerator='cpu') # Should create logger and modelcheckpoint
with pytest.raises(LoggerMisconfigurationError): # Fails because exp_manager defaults to trainer
exp_manager(test_trainer, {"exp_dir": str(tmp_path)})
with pytest.raises(LoggerMisconfigurationError): # Fails because exp_manager defaults to trainer
exp_manager(test_trainer, {"explicit_log_dir": str(tmp_path)})
with pytest.raises(LoggerMisconfigurationError): # Fails because exp_manager defaults to trainer
exp_manager(test_trainer, {"resume_if_exists": True})
# Check that exp_manager uses trainer.logger, it's exp_dir, name, and version
log_dir = exp_manager(test_trainer, {"create_tensorboard_logger": False, "create_checkpoint_callback": False})
assert log_dir.resolve() == Path("./lightning_logs/version_0").resolve()
assert Path("./lightning_logs").exists()
assert Path("./lightning_logs/version_0").exists()
# Check that a trainer without a logger gets a logger attached to it
test_trainer = pl.Trainer(accelerator='cpu', logger=False)
log_dir = exp_manager(
test_trainer,
{"create_tensorboard_logger": True, "create_checkpoint_callback": False, "exp_dir": str(tmp_path)},
)
assert isinstance(test_trainer.logger, pl.loggers.TensorBoardLogger)
test_trainer = pl.Trainer(accelerator='cpu', logger=False)
# Check that a create_wandb_logger=True errors out unless wandb_logger_kwargs is passed.
with pytest.raises(ValueError):
log_dir = exp_manager(
test_trainer,
{
"create_tensorboard_logger": False,
"create_checkpoint_callback": False,
"exp_dir": str(tmp_path),
"create_wandb_logger": True,
},
)
# Check that a WandbLogger is attached to logger if create_wandb_logger=True and wandb_logger_kwargs has name
# and project
log_dir = exp_manager(
test_trainer,
{
"create_tensorboard_logger": False,
"create_checkpoint_callback": False,
"exp_dir": str(tmp_path),
"create_wandb_logger": True,
"wandb_logger_kwargs": {"name": "", "project": "", "offline": True},
},
)
assert isinstance(test_trainer.logger, pl.loggers.WandbLogger)
@pytest.mark.unit
def test_checkpoint_configurations(self):
""" Test that trainer creating modelcheckpoint and asking exp_manager to do it too results in errors, but
is error free if only one is asked to do so.
"""
disable_tb_logger = {"create_tensorboard_logger": False}
test_trainer = pl.Trainer(accelerator='cpu') # Should create logger and modelcheckpoint
with pytest.raises(CheckpointMisconfigurationError): # Fails because both try to create modelcheckpoint
exp_manager(test_trainer, disable_tb_logger)
# Should succeed without error
exp_manager(test_trainer, {"create_checkpoint_callback": False, "create_tensorboard_logger": False})
test_trainer_2 = pl.Trainer(accelerator='cpu', enable_checkpointing=False)
exp_manager(test_trainer_2, disable_tb_logger) # Should succeed without error
@pytest.mark.unit
def test_default_log_dir(self):
"""Check the default of ./nemo_experiments/default/datetime works as intended"""
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
log_dir = exp_manager(test_trainer, {"create_tensorboard_logger": False, "create_checkpoint_callback": False})
assert (log_dir / "..").resolve() == Path("./nemo_experiments/default/").resolve()
assert Path("./nemo_experiments").exists()
assert Path("./nemo_experiments/default/").exists()
sub_dirs = [x for x in Path("./nemo_experiments/default/").iterdir() if x.is_dir()]
assert len(sub_dirs) == 1
assert re.match(r"[0-9]{4}-[0-9]{2}-[0-9]{2}_[0-9]{2}-[0-9]{2}-[0-9]{2}", sub_dirs[0].name)
@pytest.mark.unit
def test_log_dir_overrides(self, monkeypatch, tmp_path):
"""Check a variety of trainer options with exp_manager"""
# Checks that explicit_log_dir ignores exp_dir, name, and version
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
log_dir = exp_manager(test_trainer, {"explicit_log_dir": str(tmp_path / "test_log_dir_overrides")})
assert log_dir.resolve() == (tmp_path / "test_log_dir_overrides").resolve()
assert Path(tmp_path).exists()
assert Path(tmp_path / "test_log_dir_overrides").exists()
# Checks that exp_manager uses exp_dir, default name, and explicit version
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
log_dir = exp_manager(test_trainer, {"exp_dir": str(tmp_path / "test_no_name"), "version": 957})
assert log_dir.resolve() == (tmp_path / "test_no_name" / "default" / "957").resolve()
assert Path(tmp_path).exists()
assert Path(tmp_path / "test_no_name" / "default" / "957").exists()
monkeypatch.delenv(NEMO_ENV_VARNAME_VERSION)
# Checks that use_datetime_version False toggle works
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
log_dir = exp_manager(test_trainer, {"exp_dir": str(tmp_path / "test_no_name"), "use_datetime_version": False})
assert log_dir.resolve() == (tmp_path / "test_no_name" / "default" / "version_0").resolve()
assert Path(tmp_path).exists()
assert Path(tmp_path / "test_no_name" / "default" / "version_0").exists()
monkeypatch.delenv(NEMO_ENV_VARNAME_VERSION)
# Checks that use_datetime_version False toggle works and version increments
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
log_dir = exp_manager(test_trainer, {"exp_dir": str(tmp_path / "test_no_name"), "use_datetime_version": False})
assert log_dir.resolve() == (tmp_path / "test_no_name" / "default" / "version_1").resolve()
assert Path(tmp_path).exists()
assert Path(tmp_path / "test_no_name" / "default" / "version_1").exists()
@pytest.mark.unit
def test_resume(self, tmp_path):
""" Tests the resume capabilities of exp_manager"""
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
# Error because explicit_log_dir does not exist
with pytest.raises(NotFoundError):
exp_manager(
test_trainer,
{
"exp_dir": str(tmp_path / "test_resume"),
"resume_if_exists": True,
"explicit_log_dir": "Does_not_exist",
},
)
# Error because checkpoints folder does not exist
with pytest.raises(NotFoundError):
exp_manager(test_trainer, {"resume_if_exists": True, "exp_dir": str(tmp_path / "test_resume")})
# No error because we tell exp_manager to ignore notfounderror
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"exp_dir": str(tmp_path / "test_resume_2"),
"resume_ignore_no_checkpoint": True,
},
)
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints").mkdir(parents=True)
# Error because checkpoints do not exist in folder
with pytest.raises(NotFoundError):
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0"),
},
)
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--end.ckpt").touch()
# Error because *end.ckpt is in folder indicating that training has already finished
with pytest.raises(ValueError):
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0"),
},
)
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--end.ckpt").unlink()
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt").touch()
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel2--last.ckpt").touch()
# Error because multiple *last.ckpt is in folder. If more than one, don't know which to restore
with pytest.raises(ValueError):
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0"),
},
)
# Finally succeed
Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel2--last.ckpt").unlink()
log_dir = exp_manager(
test_trainer,
{"resume_if_exists": True, "explicit_log_dir": str(tmp_path / "test_resume" / "default" / "version_0")},
)
checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt")
assert Path(test_trainer.ckpt_path).resolve() == checkpoint.resolve()
# Succeed again and make sure that run_0 exists and previous log files were moved
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
exp_manager(test_trainer, {"resume_if_exists": True, "explicit_log_dir": str(log_dir)})
checkpoint = Path(tmp_path / "test_resume" / "default" / "version_0" / "checkpoints" / "mymodel--last.ckpt")
assert Path(test_trainer.ckpt_path).resolve() == checkpoint.resolve()
prev_run_dir = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0")
assert prev_run_dir.exists()
prev_log = Path(tmp_path / "test_resume" / "default" / "version_0" / "run_0" / "lightning_logs.txt")
assert prev_log.exists()
# Error becasue `dirpath` specified and has no checkpoint
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False)
dirpath_checkpoint_dir = Path(tmp_path / "test_resume" / "dirpath_test" / "ckpts")
dirpath_checkpoint_dir.mkdir(parents=True)
with pytest.raises(NotFoundError):
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"checkpoint_callback_params": {"dirpath": str(dirpath_checkpoint_dir)},
"explicit_log_dir": str(log_dir),
},
)
# Check that model loads from `dirpath` and not <log_dir>/checkpoints
dirpath_log_dir = Path(tmp_path / "test_resume" / "dirpath_test" / "logs")
dirpath_log_dir.mkdir(parents=True)
dirpath_checkpoint = Path(dirpath_checkpoint_dir / "mymodel--last.ckpt")
dirpath_checkpoint.touch()
exp_manager(
test_trainer,
{
"resume_if_exists": True,
"checkpoint_callback_params": {"dirpath": str(dirpath_checkpoint_dir)},
"explicit_log_dir": str(dirpath_log_dir),
},
)
assert Path(test_trainer.ckpt_path).resolve() == dirpath_checkpoint.resolve()
@pytest.mark.unit
def test_nemo_checkpoint_save_best_model_1(self, tmp_path):
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=4)
exp_manager(
test_trainer,
{"checkpoint_callback_params": {"save_best_model": True}, "explicit_log_dir": str(tmp_path / "test")},
)
model = ExampleModel()
test_trainer.fit(model)
assert Path(str(tmp_path / "test" / "checkpoints" / "default.nemo")).exists()
model = ExampleModel.restore_from(str(tmp_path / "test" / "checkpoints" / "default.nemo"))
assert float(model(torch.tensor([1.0, 1.0], device=model.device))) == 0.0
@pytest.mark.unit
def test_nemo_checkpoint_save_best_model_2(self, tmp_path):
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=4)
exp_manager(
test_trainer, {"explicit_log_dir": str(tmp_path / "test")},
)
model = ExampleModel()
test_trainer.fit(model)
assert Path(str(tmp_path / "test" / "checkpoints" / "default.nemo")).exists()
model = ExampleModel.restore_from(str(tmp_path / "test" / "checkpoints" / "default.nemo"))
assert math.fabs(float(model(torch.tensor([1.0, 1.0], device=model.device))) - 0.03) < 1e-5
@pytest.mark.unit
def test_nemo_checkpoint_always_save_nemo(self, tmp_path):
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=4)
exp_manager(
test_trainer,
{
"checkpoint_callback_params": {"save_best_model": True, "always_save_nemo": True},
"explicit_log_dir": str(tmp_path / "test"),
},
)
model = ExampleModel()
test_trainer.fit(model)
assert Path(str(tmp_path / "test" / "checkpoints" / "default.nemo")).exists()
model = ExampleModel.restore_from(str(tmp_path / "test" / "checkpoints" / "default.nemo"))
assert float(model(torch.tensor([1.0, 1.0], device=model.device))) == 0.0
@pytest.mark.unit
def test_nemo_checkpoint_make_checkpoint_dir(self, tmp_path):
test_trainer = pl.Trainer(
accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=4, check_val_every_n_epoch=5
)
exp_manager(
test_trainer,
{
"checkpoint_callback_params": {"save_best_model": True, "always_save_nemo": True},
"explicit_log_dir": str(tmp_path / "test"),
},
)
model = ExampleModel()
test_trainer.fit(model)
assert Path(str(tmp_path / "test" / "checkpoints" / "default.nemo")).exists()
@pytest.mark.unit
def test_nemo_checkpoint_restore_model(self, tmp_path):
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=4)
exp_manager(
test_trainer,
{
"checkpoint_callback_params": {"save_top_k": 1, "save_last": True},
"explicit_log_dir": str(tmp_path / "test"),
},
)
model = ExampleModel()
test_trainer.fit(model)
checkpoint = list(Path(str(tmp_path / "test" / "checkpoints")).glob("*.ckpt"))
# Make sure that only the best and last checkpoint is saved
assert len(checkpoint) == 2
assert math.fabs(float(model(torch.tensor([1.0, 1.0], device=model.device))) - 0.03) < 1e-5
test_trainer = pl.Trainer(accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=5)
exp_manager(
test_trainer,
{
"checkpoint_callback_params": {"save_top_k": 1, "save_last": False},
"explicit_log_dir": str(tmp_path / "test"),
"resume_if_exists": True,
"resume_past_end": True,
},
)
model = DoNothingModel()
model.l1.weight = torch.nn.Parameter(torch.tensor((0.0, 0.0)).unsqueeze(0))
model.l1.bias = torch.nn.Parameter(torch.tensor(1.0))
assert math.fabs(float(model(torch.tensor([1.0, 1.0], device=model.device))) - 1.0) < 1e-5
test_trainer.fit(model)
assert math.fabs(float(model(torch.tensor([1.0, 1.0], device=model.device))) - 0.03) < 1e-5
@pytest.mark.unit
def test_last_checkpoint_saved(self, tmp_path):
max_steps = 64
tmp_path = tmp_path / "test_1"
class TestModel(ExampleModel):
def train_dataloader(self):
dataset = OnesDataset(64)
return torch.utils.data.DataLoader(dataset, batch_size=1)
trainer = pl.Trainer(
accelerator='cpu', enable_checkpointing=False, logger=False, max_steps=max_steps, val_check_interval=0.33
)
exp_manager(
trainer,
{
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"filename": f"{{val_loss:.4f}}-{{epoch}}-{{step}}"},
},
)
model = TestModel()
trainer.fit(model)
checkpoint_dir = Path(str(tmp_path / "checkpoints"))
model_path = checkpoint_dir / "val_loss=0.0300-epoch=1-step=64-last.ckpt"
last_saved_checkpoint = torch.load(model_path)
assert max_steps == last_saved_checkpoint['global_step']
# restart training, ensure global step starts correctly
class AssertCallback(Callback):
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
assert trainer.global_step == max_steps
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch: Any, batch_idx: int
) -> None:
# we should only be running for one more step.
assert trainer.global_step == max_steps + 1
trainer = pl.Trainer(
accelerator='cpu',
enable_checkpointing=False,
logger=False,
max_steps=65,
val_check_interval=0.33,
callbacks=AssertCallback(),
)
exp_manager(
trainer,
{
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"filename": f"{{val_loss:.4f}}-{{epoch}}-{{step}}"},
},
)
model = TestModel()
trainer.fit(model, ckpt_path=model_path)
@pytest.mark.unit
def test_resume_checkpoint_skip_validation(self, tmp_path):
"""Test to ensure that when we resume from a checkpoint, we do not re-run validation unnecessarily."""
tmp_path = tmp_path / "test_2"
def run_training(resume_path=None):
class TestModel(ExampleModel):
def train_dataloader(self):
dataset = OnesDataset(10)
return torch.utils.data.DataLoader(dataset, batch_size=1)
class AssertCallback(Callback):
recorded_validations = 0
recorded_train_steps = 0
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.recorded_validations += 1
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs, batch: Any, batch_idx: int
) -> None:
self.recorded_train_steps += 1
def on_train_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if resume_path is not None:
# we should only run validation at the end of training.
assert self.recorded_validations == 1
# we continue from half way
assert self.recorded_train_steps == len(pl_module.train_dataloader()) // 2
else:
# we've run validation within the middle of training and at the end of training.
assert self.recorded_validations == 2
assert self.recorded_train_steps == len(pl_module.train_dataloader())
model = TestModel()
trainer = pl.Trainer(
accelerator='cpu',
enable_checkpointing=False,
logger=False,
callbacks=[AssertCallback()],
val_check_interval=0.5,
num_sanity_val_steps=0,
max_epochs=1,
)
exp_manager(
trainer,
{"explicit_log_dir": str(tmp_path), "checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"}},
)
trainer.fit(model, ckpt_path=resume_path)
run_training()
resume_path = tmp_path / 'checkpoints/epoch=0-step=5.ckpt'
run_training(resume_path)
def test_warning_validation_skipping_when_custom_epoch_loop(self, tmp_path):
"""When using validation skipping on restart with a custom epoch loop, we warn the user that we skip
support to not interfere with their custom logic.
"""
tmp_path = tmp_path / "test_3"
class CustomLoop(_TrainingEpochLoop):
...
trainer = pl.Trainer(
accelerator='cpu', enable_checkpointing=False, logger=False, max_epochs=1, val_check_interval=0.33
)
## _TrainingEpochLoop in PTL 2.0 takes trainer as an arg
loop = CustomLoop(trainer)
trainer.fit_loop.epoch_loop = loop
with pytest.warns(UserWarning, match="Detected custom epoch loop"):
exp_manager(trainer, {"explicit_log_dir": str(tmp_path)})
| NeMo-main | tests/core/test_exp_manager.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import random
import omegaconf
import pytest
import pytorch_lightning as pl
import torch
import torch.optim
from pytorch_lightning.utilities import rank_zero_only
from nemo.core import config, optim
from nemo.core.optim.lr_scheduler import AVAILABLE_SCHEDULERS
from nemo.core.optim.optimizers import AVAILABLE_OPTIMIZERS
from nemo.utils import logging
class TempModel(torch.nn.Module):
def __init__(self):
super(TempModel, self).__init__()
self.layer = torch.nn.Linear(5, 1)
def forward(self, x):
x = self.layer(x)
return x
class OptCounter(torch.optim.SGD):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for group in self.param_groups:
group.setdefault('count', 0)
def step(self, closure=None):
for group in self.param_groups:
group['count'] += 1
super().step(closure)
class RandomDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.randn(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(pl.LightningModule):
def __init__(self, batch_size, dataset_len, drop_last, max_steps):
super().__init__()
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
self.batch_size = batch_size
self.dataset_len = dataset_len
self.drop_last = drop_last
self.max_steps = max_steps
def train_dataloader(self):
dataset = RandomDataset(self.dataset_len)
return torch.utils.data.DataLoader(dataset, batch_size=self.batch_size, drop_last=self.drop_last)
def training_step(self, batch, batch_idx):
output = self.l1(batch)
output = torch.nn.functional.l1_loss(output, torch.ones(output.size()).to(output.device))
return {"loss": output}
def configure_optimizers(self):
self.my_opt = OptCounter(self.parameters(), lr=0.02)
return self.my_opt
class Callback(pl.callbacks.Callback):
@rank_zero_only
def on_train_end(self, trainer, module):
count = module.my_opt.param_groups[0]['count']
if trainer.global_step != count or trainer.global_step != module.max_steps:
logging.debug(f"max_epochs: {trainer.max_epochs}")
logging.debug(f"accumulate_grad_batches: {trainer.accumulate_grad_batches}")
logging.debug(f"limit_train_batches: {trainer.limit_train_batches}")
logging.debug(f"num_devices: {trainer.num_devices}")
logging.debug(f"batch_size: {module.batch_size}")
logging.debug(f"dataset_len: {module.dataset_len}")
logging.debug(f"drop_last: {module.drop_last}")
logging.debug(f"{len(trainer.train_dataloader)}")
logging.debug(f"{trainer.num_training_batches }")
self.assert_counts(trainer, module, count)
def assert_counts(self, trainer, module, count):
assert trainer.global_step == count, f"{trainer.global_step} != {count} != {module.max_steps}"
assert trainer.global_step == module.max_steps, f"{trainer.global_step} != {count} != {module.max_steps}"
class SchedulerNoOpCallback(Callback):
def on_train_batch_end(self, trainer: pl.Trainer, pl_module, outputs, batch, batch_idx):
# pl_module.max_steps is "original" max steps without trainer extra steps.
if (trainer.global_step + 1) % 3 == 0 and (trainer.global_step + 1) < pl_module.max_steps:
schedulers = trainer.lr_scheduler_configs
for scheduler in schedulers:
# Decrement the counter by 2, then perform a scheduler.step() to perform a no-up
# as well as update the optimizer lr in all param groups
scheduler.scheduler.last_epoch -= 2
scheduler.scheduler.step()
# Increase the max step count by 1
trainer.fit_loop.epoch_loop.max_steps = trainer.fit_loop.epoch_loop.max_steps + 1
def assert_counts(self, trainer, module, count):
num_skips = module.max_steps // 3
extra_steps = module.max_steps + num_skips
assert trainer.global_step == count, f"{trainer.global_step} != {count} != {extra_steps}"
assert trainer.global_step == extra_steps, f"{trainer.global_step} != {count} != {extra_steps}"
class TestOptimizersSchedulers:
INITIAL_LR = 0.1
MIN_LR = 1e-3
MAX_STEPS = 10
D_MODEL = 16
# Apex optimizers require CUDA and this test is being run on CPU only tests
@pytest.mark.unit
def test_get_optimizer(self):
model = TempModel()
if torch.cuda.is_available():
model.cuda()
for opt_name in AVAILABLE_OPTIMIZERS.keys():
if opt_name == 'fused_adam':
if not torch.cuda.is_available():
continue
if opt_name == 'distributed_fused_adam':
# TODO: this test fails when run with all other tests, we need to move this test to nightly or CI
continue
# if not torch.cuda.is_available() or not torch.distributed.is_nccl_available():
# continue
# if not torch.distributed.is_initialized():
# torch.distributed.init_process_group(
# 'nccl', world_size=1, rank=0, store=torch.distributed.HashStore(),
# )
opt_cls = optim.get_optimizer(opt_name)
if opt_name == 'adafactor':
# Adafactor's default mode uses relative_step without any lr.
opt = opt_cls(model.parameters())
else:
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
assert isinstance(opt, AVAILABLE_OPTIMIZERS[opt_name])
@pytest.mark.unit
def test_register_optimizer(self):
class TempOpt(torch.optim.SGD):
pass
class TempOptParams(config.optimizers.SGDParams):
pass
optim.register_optimizer('TempOpt', TempOpt, TempOptParams)
model = TempModel()
opt_cls = optim.get_optimizer('TempOpt')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
assert isinstance(opt, TempOpt)
@pytest.mark.unit
def test_optim_config_parse_bypass(self):
basic_optim_config = {'weight_decay': 0.001, 'betas': [0.8, 0.5]}
parsed_params = optim.parse_optimizer_args('novograd', basic_optim_config)
assert parsed_params['weight_decay'] == basic_optim_config['weight_decay']
assert parsed_params['betas'][0] == basic_optim_config['betas'][0]
assert parsed_params['betas'][1] == basic_optim_config['betas'][1]
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = optim.parse_optimizer_args('novograd', dict_config)
assert parsed_params['weight_decay'] == dict_config['weight_decay']
assert parsed_params['betas'][0] == dict_config['betas'][0]
assert parsed_params['betas'][1] == dict_config['betas'][1]
@pytest.mark.unit
def test_optim_config_parse_arg_by_name(self):
basic_optim_config = {'name': 'auto', 'weight_decay': 0.001, 'betas': [0.8, 0.5]}
parsed_params = optim.parse_optimizer_args('novograd', basic_optim_config)
assert parsed_params['weight_decay'] == basic_optim_config['weight_decay']
assert parsed_params['betas'][0] == basic_optim_config['betas'][0]
assert parsed_params['betas'][1] == basic_optim_config['betas'][1]
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = optim.parse_optimizer_args('novograd', dict_config)
assert parsed_params['weight_decay'] == dict_config['weight_decay']
assert parsed_params['betas'][0] == dict_config['betas'][0]
assert parsed_params['betas'][1] == dict_config['betas'][1]
with pytest.raises(omegaconf.errors.ConfigKeyError):
optim.parse_optimizer_args('sgd', dict_config)
@pytest.mark.unit
def test_optim_config_parse_arg_by_target(self):
basic_optim_config = {
'_target_': 'nemo.core.config.NovogradParams',
'params': {'weight_decay': 0.001, 'betas': [0.8, 0.5]},
}
basic_optim_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = optim.parse_optimizer_args('novograd', basic_optim_config)
assert parsed_params['weight_decay'] == basic_optim_config['params']['weight_decay']
assert parsed_params['betas'][0] == basic_optim_config['params']['betas'][0]
assert parsed_params['betas'][1] == basic_optim_config['params']['betas'][1]
dict_config = omegaconf.OmegaConf.create(basic_optim_config)
parsed_params = optim.parse_optimizer_args('novograd', dict_config)
assert parsed_params['weight_decay'] == dict_config['params']['weight_decay']
assert parsed_params['betas'][0] == dict_config['params']['betas'][0]
assert parsed_params['betas'][1] == dict_config['params']['betas'][1]
# Names are ignored when passing class path
# This will be captured during optimizer instantiation
output_config = optim.parse_optimizer_args('sgd', dict_config)
sgd_config = vars(config.SGDParams())
novograd_config = vars(config.NovogradParams())
assert set(output_config.keys()) != set(sgd_config.keys())
assert set(output_config.keys()) == set(novograd_config)
@pytest.mark.unit
def test_get_scheduler(self):
model = TempModel()
optimizer = optim.Novograd(model.parameters(), lr=self.INITIAL_LR)
for sched_name in AVAILABLE_SCHEDULERS.keys():
sched_cls = optim.lr_scheduler.get_scheduler(sched_name)
try:
sched = sched_cls(optimizer)
assert isinstance(sched, AVAILABLE_SCHEDULERS[sched_name])
continue
except Exception:
pass
try:
sched = sched_cls(optimizer, max_steps=self.MAX_STEPS)
assert isinstance(sched, AVAILABLE_SCHEDULERS[sched_name])
continue
except Exception:
pass
@pytest.mark.unit
def test_register_scheduler(self):
class TempSched(optim.lr_scheduler.CosineAnnealing):
pass
class TempSchedParams(config.schedulers.CosineAnnealingParams):
pass
optim.lr_scheduler.register_scheduler('TempSched', TempSched, TempSchedParams)
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
sched_cls = optim.lr_scheduler.get_scheduler('TempSched')
sched = sched_cls(opt, max_steps=self.MAX_STEPS)
assert isinstance(sched, TempSched)
@pytest.mark.unit
def test_sched_config_parse_simple(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {'name': 'CosineAnnealing', 'max_steps': 10}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
@pytest.mark.unit
def test_sched_config_parse_from_cls(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
basic_sched_config = {
'_target_': 'nemo.core.config.CosineAnnealingParams',
'params': {'min_lr': 0.1},
'max_steps': self.MAX_STEPS,
}
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
assert isinstance(scheduler_setup['scheduler'], optim.lr_scheduler.CosineAnnealing)
@pytest.mark.unit
def test_sched_config_parse_reduce_on_plateau(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
reduce_on_plateau_parameters = {
'mode': 'min',
'factor': 0.5,
'patience': 1,
'threshold': 1e-4,
'threshold_mode': 'rel',
'min_lr': 1e-6,
'eps': 1e-7,
'verbose': True,
'cooldown': 1,
}
basic_sched_config = {
'name': 'ReduceLROnPlateau',
'monitor': 'val_loss',
'reduce_on_plateau': True,
'max_steps': self.MAX_STEPS,
}
basic_sched_config.update(reduce_on_plateau_parameters)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, basic_sched_config)
assert isinstance(scheduler_setup['scheduler'], torch.optim.lr_scheduler.ReduceLROnPlateau)
for k, v in reduce_on_plateau_parameters.items():
if k == 'min_lr':
k += 's'
v = [v]
found_v = getattr(scheduler_setup['scheduler'], k)
assert (
found_v == v
), f"Wrong value `{repr(found_v)}` for `ReduceLROnPlateau` parameter `{k}`. Expected `{repr(v)}`."
dict_config = omegaconf.OmegaConf.create(basic_sched_config)
scheduler_setup = optim.lr_scheduler.prepare_lr_scheduler(opt, dict_config)
assert isinstance(scheduler_setup['scheduler'], torch.optim.lr_scheduler.ReduceLROnPlateau)
for k, v in reduce_on_plateau_parameters.items():
if k == 'min_lr':
k += 's'
v = [v]
found_v = getattr(scheduler_setup['scheduler'], k)
assert (
found_v == v
), f"Wrong value `{repr(found_v)}` for `ReduceLROnPlateau` parameter `{k}`. Expected `{repr(v)}`."
@pytest.mark.unit
def test_WarmupPolicy(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.WarmupPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 4:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_WarmupHoldPolicy(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 4:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 4:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_WarmupAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.WarmupAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.WarmupAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup + Hold steps available
policy = optim.lr_scheduler.WarmupHoldPolicy(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 4:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] == self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_SquareAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.SquareAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.SquareAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_SquareRootAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.SquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.SquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_CosineAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.CosineAnnealing(opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup + Constant steps available
policy = optim.lr_scheduler.CosineAnnealing(
opt, warmup_steps=3, constant_steps=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 3:
assert policy.get_last_lr()[0] <= self.INITIAL_LR + 1e-5
elif i > 3 and i <= 8:
assert policy.get_last_lr()[0] == policy._get_lr(i)[0]
else:
assert policy.get_last_lr()[0] == self.MIN_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Noam scheduler should decay past MAX_STEPS - run two schedulers in parallel to test it
@pytest.mark.unit
def test_NoamAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt1 = opt_cls(model.parameters(), lr=self.INITIAL_LR)
opt2 = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy1 = optim.lr_scheduler.NoamAnnealing(
opt1, d_model=self.D_MODEL, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
policy2 = optim.lr_scheduler.NoamAnnealing(
opt2, d_model=self.D_MODEL, max_steps=self.MAX_STEPS * 2, min_lr=self.MIN_LR
)
initial_lr = policy1.get_last_lr()[0]
assert initial_lr == self.D_MODEL ** (-0.5) * self.INITIAL_LR
for i in range(self.MAX_STEPS * 2):
assert self.MIN_LR < policy1.get_last_lr()[0] <= self.INITIAL_LR
assert policy1.get_last_lr()[0] == policy2.get_last_lr()[0]
opt1.step()
opt2.step()
policy1.step()
policy2.step()
# Warmup steps available
policy1 = optim.lr_scheduler.NoamAnnealing(
opt1, d_model=self.D_MODEL, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
policy2 = optim.lr_scheduler.NoamAnnealing(
opt2, d_model=self.D_MODEL, warmup_steps=5, max_steps=self.MAX_STEPS * 2, min_lr=self.MIN_LR
)
initial_lr = policy1.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS * 2):
if i <= 5:
assert policy1.get_last_lr()[0] <= self.INITIAL_LR
else:
assert self.MIN_LR < policy1.get_last_lr()[0] < self.INITIAL_LR
assert policy1.get_last_lr()[0] == policy2.get_last_lr()[0]
opt1.step()
opt2.step()
policy1.step()
policy2.step()
@pytest.mark.unit
def test_PolynomialDecayAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.PolynomialDecayAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_PolynomialHoldDecayAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, power=2, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup + Hold steps available
policy = optim.lr_scheduler.PolynomialHoldDecayAnnealing(
opt, warmup_steps=5, hold_steps=3, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR, power=2
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 4:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
elif i <= 8:
assert policy.get_last_lr()[0] == self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_InverseSquareRootAnnealing(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.InverseSquareRootAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
# Warmup steps available
policy = optim.lr_scheduler.InverseSquareRootAnnealing(
opt, warmup_steps=5, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR
)
initial_lr = policy.get_last_lr()[0]
assert initial_lr < self.INITIAL_LR
for i in range(self.MAX_STEPS):
if i <= 5:
assert policy.get_last_lr()[0] <= self.INITIAL_LR
else:
assert policy.get_last_lr()[0] < self.INITIAL_LR
opt.step()
policy.step()
policy.step()
final_lr = policy.get_last_lr()[0]
assert final_lr == self.MIN_LR
@pytest.mark.unit
def test_CosineAnnealing_with_noop_steps(self):
model = TempModel()
opt_cls = optim.get_optimizer('novograd')
opt = opt_cls(model.parameters(), lr=self.INITIAL_LR)
# No warmup case
policy = optim.lr_scheduler.CosineAnnealing(opt, max_steps=self.MAX_STEPS, min_lr=self.MIN_LR)
initial_lr = policy.get_last_lr()[0]
assert initial_lr == self.INITIAL_LR
update_steps = 0
for i in range(self.MAX_STEPS):
assert policy.get_last_lr()[0] <= self.INITIAL_LR
opt.step()
policy.step()
# Perform a No-Op for scheduler every 2 steps
if i % 2 == 0:
policy.last_epoch -= 1
else:
update_steps += 1
policy.step()
update_steps += 1
assert update_steps < self.MAX_STEPS
final_lr = policy.get_last_lr()[0]
assert final_lr > self.MIN_LR
# update step = true number of updates performed after some number of skipped steps
true_end_lr = policy._get_lr(step=update_steps)[0]
assert final_lr == true_end_lr
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
def test_max_step_computation(self):
def train(
max_epochs, accumulate_grad_batches, limit_train_batches, devices, batch_size, dataset_len, drop_last
):
trainer = pl.Trainer(
max_epochs=max_epochs,
strategy="ddp_spawn",
accelerator="cpu",
devices=devices,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
enable_progress_bar=False,
)
max_steps = optim.lr_scheduler.compute_max_steps(
max_epochs, accumulate_grad_batches, limit_train_batches, devices, dataset_len, batch_size, drop_last,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(Callback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
31,
accumulate_grad_batches=1,
limit_train_batches=1.0,
devices=9,
batch_size=60,
dataset_len=1613,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.5,
devices=4,
batch_size=97,
dataset_len=498,
drop_last=False,
)
train(
5,
accumulate_grad_batches=8,
limit_train_batches=0.5,
devices=4,
batch_size=54,
dataset_len=629,
drop_last=True,
)
train(
5,
accumulate_grad_batches=1,
limit_train_batches=0.5,
devices=1,
batch_size=68,
dataset_len=488,
drop_last=False,
)
for _ in range(5):
drop_last = bool(random.randint(0, 1))
accumulate_grad_batches = random.randint(1, 10)
limit_train_batches_int = random.randint(1, 10)
limit_train_batches_float = random.uniform(0.5, 1)
limit_train_batches = random.choice([limit_train_batches_int, limit_train_batches_float])
max_epochs = random.randint(4, 20)
devices = random.randint(1, 5)
dataset_len = random.randint(20, devices * 500)
batch_size = random.randint(math.ceil(5.0 / devices), min(dataset_len // devices, 128))
train(
max_epochs, accumulate_grad_batches, limit_train_batches, devices, batch_size, dataset_len, drop_last,
)
@pytest.mark.unit
@pytest.mark.run_only_on('CPU')
def test_max_step_computation_with_sched_no_ops(self):
def train(
max_steps, accumulate_grad_batches, limit_train_batches, devices, batch_size, dataset_len, drop_last
):
trainer = pl.Trainer(
max_steps=max_steps,
strategy="ddp_spawn",
accelerator="cpu",
devices=devices,
accumulate_grad_batches=accumulate_grad_batches,
limit_train_batches=limit_train_batches,
enable_checkpointing=False,
enable_progress_bar=False,
)
model = ExampleModel(batch_size, dataset_len, drop_last, max_steps)
trainer.callbacks.append(SchedulerNoOpCallback())
trainer.fit(model)
# This test will break once we and lightning upgrade to pytorch 1.7.0 due to a bug fix in pytorch 1.7.0
train(
max_steps=20,
accumulate_grad_batches=1,
limit_train_batches=1.0,
devices=4,
batch_size=60,
dataset_len=2000,
drop_last=True,
)
| NeMo-main | tests/core/test_optimizers_schedulers.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from omegaconf import DictConfig
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.asr.modules import SpectrogramAugmentation
from nemo.core.classes.common import Serialization
def get_class_path(cls):
return f"{cls.__module__}.{cls.__name__}"
class MockSerializationImpl(Serialization):
def __init__(self, cfg: DictConfig):
self.cfg = cfg
self.value = self.__class__.__name__
class MockSerializationImplV2(MockSerializationImpl):
pass
class TestSerialization:
@pytest.mark.unit
def test_from_config_dict_with_cls(self):
"""Here we test that instantiation works for configs with cls class path in them.
Note that just Serialization.from_config_dict can be used to create an object"""
config = DictConfig(
{
'cls': 'nemo.collections.asr.modules.SpectrogramAugmentation',
'params': {'rect_freq': 50, 'rect_masks': 5, 'rect_time': 120,},
}
)
obj = Serialization.from_config_dict(config=config)
assert isinstance(obj, SpectrogramAugmentation)
@pytest.mark.unit
def test_from_config_dict_without_cls(self):
"""Here we test that instantiation works for configs without cls class path in them.
IMPORTANT: in this case, correct class type should call from_config_dict. This should work for Models."""
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {
'feat_in': 1024,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
obj = EncDecCTCModel.from_config_dict(config=modelConfig)
assert isinstance(obj, EncDecCTCModel)
@pytest.mark.unit
def test_config_updated(self):
config = DictConfig(
{
'cls': 'nemo.collections.asr.modules.SpectrogramAugmentation',
'params': {'rect_freq': 50, 'rect_masks': 5, 'rect_time': 120,},
}
)
obj = Serialization.from_config_dict(config=config)
new_config = obj.to_config_dict()
assert config != new_config
assert 'params' not in new_config
assert 'cls' not in new_config
assert '_target_' in new_config
@pytest.mark.unit
def test_base_class_instantiation(self):
# Target class is V2 impl, calling class is Serialization (base class)
config = DictConfig({'target': get_class_path(MockSerializationImplV2)})
obj = Serialization.from_config_dict(config=config)
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImplV2)
assert obj.value == "MockSerializationImplV2"
@pytest.mark.unit
def test_self_class_instantiation(self):
# Target class is V1 impl, calling class is V1 (same class)
config = DictConfig({'target': get_class_path(MockSerializationImpl)})
obj = MockSerializationImpl.from_config_dict(config=config) # Serialization is base class
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImpl)
assert obj.value == "MockSerializationImpl"
@pytest.mark.unit
def test_sub_class_instantiation(self):
# Target class is V1 impl, calling class is V2 (sub class)
config = DictConfig({'target': get_class_path(MockSerializationImpl)})
obj = MockSerializationImplV2.from_config_dict(config=config) # Serialization is base class
new_config = obj.to_config_dict()
assert config == new_config
assert isinstance(obj, MockSerializationImplV2)
assert obj.value == "MockSerializationImplV2"
| NeMo-main | tests/core/test_serialization.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo.core.neural_types import (
AcousticEncodedRepresentation,
AudioSignal,
AxisKind,
AxisKindAbstract,
AxisType,
ChannelType,
ElementType,
MelSpectrogramType,
MFCCSpectrogramType,
NeuralType,
NeuralTypeComparisonResult,
SpectrogramType,
VoidType,
)
class TestNeuralTypeSystem:
@pytest.mark.unit
def test_short_vs_long_version(self):
long_version = NeuralType(
axes=(AxisType(AxisKind.Batch, None), AxisType(AxisKind.Dimension, None), AxisType(AxisKind.Time, None)),
elements_type=AcousticEncodedRepresentation(),
)
short_version = NeuralType(('B', 'D', 'T'), AcousticEncodedRepresentation())
assert long_version.compare(short_version) == NeuralTypeComparisonResult.SAME
assert short_version.compare(long_version) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_parameterized_type_audio_sampling_frequency(self):
audio16K = NeuralType(axes=('B', 'T'), elements_type=AudioSignal(16000))
audio8K = NeuralType(axes=('B', 'T'), elements_type=AudioSignal(8000))
another16K = NeuralType(axes=('B', 'T'), elements_type=AudioSignal(16000))
assert audio8K.compare(audio16K) == NeuralTypeComparisonResult.SAME_TYPE_INCOMPATIBLE_PARAMS
assert audio16K.compare(audio8K) == NeuralTypeComparisonResult.SAME_TYPE_INCOMPATIBLE_PARAMS
assert another16K.compare(audio16K) == NeuralTypeComparisonResult.SAME
assert audio16K.compare(another16K) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_transpose_same_1(self):
type1 = NeuralType(axes=('B', 'T', 'C'))
type2 = NeuralType(axes=('T', 'B', 'C'))
assert type1.compare(type2) == NeuralTypeComparisonResult.TRANSPOSE_SAME
assert type2.compare(type1) == NeuralTypeComparisonResult.TRANSPOSE_SAME
@pytest.mark.unit
def test_transpose_same_2(self):
audio16K = NeuralType(axes=('B', 'T'), elements_type=AudioSignal(16000))
audio16K_t = NeuralType(axes=('T', 'B'), elements_type=AudioSignal(16000))
assert audio16K.compare(audio16K_t) == NeuralTypeComparisonResult.TRANSPOSE_SAME
@pytest.mark.unit
def test_inheritance_spec_augment_example(self):
input = NeuralType(('B', 'D', 'T'), SpectrogramType())
out1 = NeuralType(('B', 'D', 'T'), MelSpectrogramType())
out2 = NeuralType(('B', 'D', 'T'), MFCCSpectrogramType())
assert out1.compare(out2) == NeuralTypeComparisonResult.INCOMPATIBLE
assert out2.compare(out1) == NeuralTypeComparisonResult.INCOMPATIBLE
assert input.compare(out1) == NeuralTypeComparisonResult.GREATER
assert input.compare(out2) == NeuralTypeComparisonResult.GREATER
assert out1.compare(input) == NeuralTypeComparisonResult.LESS
assert out2.compare(input) == NeuralTypeComparisonResult.LESS
@pytest.mark.unit
def test_singletone(self):
loss_output1 = NeuralType(axes=None)
loss_output2 = NeuralType(axes=None)
assert loss_output1.compare(loss_output2) == NeuralTypeComparisonResult.SAME
assert loss_output2.compare(loss_output1) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_list_of_lists(self):
T1 = NeuralType(
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=True),
AxisType(kind=AxisKind.Time, size=None, is_list=True),
AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
),
elements_type=ChannelType(),
)
T2 = NeuralType(
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=False),
AxisType(kind=AxisKind.Time, size=None, is_list=False),
AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
),
elements_type=ChannelType(),
)
# TODO: should this be incompatible instead???
assert T1.compare(T2), NeuralTypeComparisonResult.TRANSPOSE_SAME
@pytest.mark.unit
def test_void(self):
btc_spctr = NeuralType(('B', 'T', 'C'), SpectrogramType())
btc_spct_bad = NeuralType(('B', 'T'), SpectrogramType())
btc_void = NeuralType(('B', 'T', 'C'), VoidType())
assert btc_void.compare(btc_spctr) == NeuralTypeComparisonResult.SAME
assert btc_spctr.compare(btc_void) == NeuralTypeComparisonResult.INCOMPATIBLE
assert btc_void.compare(btc_spct_bad) == NeuralTypeComparisonResult.INCOMPATIBLE
@pytest.mark.unit
def test_big_void(self):
big_void_1 = NeuralType(elements_type=VoidType())
big_void_2 = NeuralType()
btc_spctr = NeuralType(('B', 'T', 'C'), SpectrogramType())
btc_spct_bad = NeuralType(('B', 'T'), SpectrogramType())
t1 = NeuralType(
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=True),
AxisType(kind=AxisKind.Time, size=None, is_list=True),
AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
),
elements_type=ChannelType(),
)
t2 = NeuralType(
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=False),
AxisType(kind=AxisKind.Time, size=None, is_list=False),
AxisType(kind=AxisKind.Dimension, size=32, is_list=False),
AxisType(kind=AxisKind.Dimension, size=128, is_list=False),
AxisType(kind=AxisKind.Dimension, size=256, is_list=False),
),
elements_type=ChannelType(),
)
assert big_void_1.compare(btc_spctr) == NeuralTypeComparisonResult.SAME
assert big_void_1.compare(btc_spct_bad) == NeuralTypeComparisonResult.SAME
assert big_void_1.compare(t1) == NeuralTypeComparisonResult.SAME
assert big_void_1.compare(t2) == NeuralTypeComparisonResult.SAME
assert big_void_2.compare(btc_spctr) == NeuralTypeComparisonResult.SAME
assert big_void_2.compare(btc_spct_bad) == NeuralTypeComparisonResult.SAME
assert big_void_2.compare(t1) == NeuralTypeComparisonResult.SAME
assert big_void_2.compare(t2) == NeuralTypeComparisonResult.SAME
@pytest.mark.unit
def test_unspecified_dimensions(self):
t0 = NeuralType(
(AxisType(AxisKind.Batch, 64), AxisType(AxisKind.Time, 10), AxisType(AxisKind.Dimension, 128)),
SpectrogramType(),
)
t1 = NeuralType(('B', 'T', 'C'), SpectrogramType())
assert t1.compare(t0), NeuralTypeComparisonResult.SAME
assert t0.compare(t1), NeuralTypeComparisonResult.DIM_INCOMPATIBLE
@pytest.mark.unit
def test_any_axis(self):
t0 = NeuralType(('B', 'Any', 'Any'), VoidType())
t1 = NeuralType(('B', 'Any', 'Any'), SpectrogramType())
t2 = NeuralType(('B', 'T', 'C'), SpectrogramType())
assert t0.compare(t1) == NeuralTypeComparisonResult.SAME
assert t0.compare(t2) == NeuralTypeComparisonResult.SAME
assert t1.compare(t2) == NeuralTypeComparisonResult.SAME
assert t2.compare(t1) == NeuralTypeComparisonResult.INCOMPATIBLE
assert t1.compare(t0) == NeuralTypeComparisonResult.INCOMPATIBLE
@pytest.mark.unit
def test_struct(self):
class BoundingBox(ElementType):
def __str__(self):
return "bounding box from detection model"
def fields(self):
return ("X", "Y", "W", "H")
# ALSO ADD new, user-defined, axis kind
class AxisKind2(AxisKindAbstract):
Image = 0
T1 = NeuralType(
elements_type=BoundingBox(),
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=True),
AxisType(kind=AxisKind2.Image, size=None, is_list=True),
),
)
class BadBoundingBox(ElementType):
def __str__(self):
return "bad bounding box from detection model"
def fields(self):
return ("X", "Y", "H")
T2 = NeuralType(
elements_type=BadBoundingBox(),
axes=(
AxisType(kind=AxisKind.Batch, size=None, is_list=True),
AxisType(kind=AxisKind2.Image, size=None, is_list=True),
),
)
assert T2.compare(T1) == NeuralTypeComparisonResult.INCOMPATIBLE
| NeMo-main | tests/core/test_neural_types.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.core import NeuralModule
from nemo.core.classes.mixins import AdapterModuleMixin, access_mixins, adapter_mixin_strategies, adapter_mixins
from nemo.utils import config_utils
class DefaultModule(NeuralModule):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class DefaultModuleAdapter(DefaultModule, AdapterModuleMixin):
def forward(self, x):
x = super(DefaultModuleAdapter, self).forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(DefaultModule) is None:
adapter_mixins.register_adapter(DefaultModule, DefaultModuleAdapter)
class TestAdapterStrategy:
@pytest.mark.unit
def test_ResidualAddAdapterStrategyConfig(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_mixin_strategies.ResidualAddAdapterStrategy,
adapter_mixin_strategies.ResidualAddAdapterStrategyConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_strategy_default(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# update the strategy
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy()
adapter.adapter_strategy = adapter_strategy
with torch.no_grad():
assert adapter_strategy.stochastic_depth == 0.0
out = adapter_strategy.forward(x, adapter, module=module)
assert (out - x).abs().mean() < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('stochastic_depth', [0.0, 1.0])
def test_strategy_stochasic_depth(self, stochastic_depth):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
# extract adapter
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# reinitialize the final layer of the adapter module (so that it is not zero init)
adapter.module[-1].weight.data += 1
# get just module output
module.set_enabled_adapters('temp', enabled=False)
module_out = module(x)
# get module + adapter output
module.set_enabled_adapters('temp', enabled=True)
module_adapter_out = module(x)
assert (
module_out - module_adapter_out
).abs().sum() > 0 # results should not be the same after adapter forward now
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy(stochastic_depth=stochastic_depth)
adapter.adapter_strategy = adapter_strategy
module.eval()
with torch.inference_mode(): # stochastic depth disabled, no grad tracking
assert adapter.adapter_strategy.stochastic_depth == stochastic_depth
out = adapter_strategy.forward(module_out, adapter, module=module)
assert (out - module_adapter_out).abs().mean() < 1e-5
module.train()
with torch.inference_mode(): # stochastic depth enabled, but no grad tracking during training mode
out = adapter_strategy.forward(module_out, adapter, module=module)
if stochastic_depth == 0.0:
check = module_adapter_out
else:
check = module_out
assert (out - check).abs().mean() < 1e-5
@pytest.mark.unit
def test_strategy_l2_lambda(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
module = DefaultModuleAdapter()
module.add_adapter(name='temp', cfg=get_adapter_cfg())
module.train()
adapter = module.adapter_layer[module.get_enabled_adapters()[0]]
# update the strategy
adapter_strategy = adapter_mixin_strategies.ResidualAddAdapterStrategy(l2_lambda=0.01)
adapter.adapter_strategy = adapter_strategy
with torch.no_grad():
access_mixins.AccessMixin.reset_registry(module)
assert access_mixins.AccessMixin.is_access_enabled() is False
assert adapter_strategy.stochastic_depth == 0.0
assert adapter_strategy.l2_lambda > 0.0
out = adapter_strategy.forward(x, adapter, module=module)
assert (out - x).abs().mean() < 1e-5
# extract losses
assert access_mixins.AccessMixin.is_access_enabled() is True
auxiliary_losses = access_mixins.AccessMixin.get_module_registry(module)
loss = list(auxiliary_losses.values())[0]
assert 'adapter_loss' in loss
assert loss['adapter_loss'][0] == torch.tensor(0.0) # initially adapter is 0 init, no loss required.
| NeMo-main | tests/core/mixins/adapters/test_adapter_strategy.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.core import NeuralModule
from nemo.core.classes.mixins import adapter_mixin_strategies, adapter_mixins
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin
class DefaultModule(NeuralModule):
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class DefaultModuleAdapter(DefaultModule, AdapterModuleMixin):
def forward(self, x):
x = super(DefaultModuleAdapter, self).forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(DefaultModule) is None:
adapter_mixins.register_adapter(DefaultModule, DefaultModuleAdapter)
class TestAdapterMixin:
@pytest.mark.unit
def test_module_registered_adapter_by_class_path(self):
classpath = get_classpath(DefaultModule)
adapter_meta = adapter_mixins.get_registered_adapter(classpath)
assert adapter_meta is not None
assert adapter_meta.base_class == DefaultModule
assert adapter_meta.adapter_class == DefaultModuleAdapter
@pytest.mark.unit
def test_module_registered_adapter_by_class(self):
adapter_meta = adapter_mixins.get_registered_adapter(DefaultModule)
assert adapter_meta is not None
assert adapter_meta.base_class == DefaultModule
assert adapter_meta.adapter_class == DefaultModuleAdapter
@pytest.mark.unit
def test_module_registered_adapter_by_adapter_class(self):
adapter_meta = adapter_mixins.get_registered_adapter(DefaultModuleAdapter)
assert adapter_meta is not None
assert adapter_meta.base_class == DefaultModule
assert adapter_meta.adapter_class == DefaultModuleAdapter
@pytest.mark.unit
def test_single_adapter(self):
model = DefaultModuleAdapter()
original_num_params = model.num_params()
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_params()
assert new_num_params > original_num_params
@pytest.mark.unit
def test_multiple_adapter(self):
model = DefaultModuleAdapter()
original_num_params = model.num_params()
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_params()
assert new_num_params > original_num_params
original_num_params = new_num_params
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_params()
assert new_num_params > original_num_params
@pytest.mark.unit
def test_forward_linear_pre(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_output = model(x)
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
def test_forward_linear_post(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(norm_pos='post'))
new_output = model(x)
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
def test_multi_adapter_forward(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_output = model(x)
assert model._adapter_names == ['adapter_0', 'adapter_1']
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
def test_multi_adapter_partial_forward(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
model.set_enabled_adapters(name='adapter_0', enabled=False)
new_output = model(x)
assert model._adapter_names == ['adapter_1']
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
def test_forward_unfrozen_adapters(self):
model = DefaultModuleAdapter()
original_num_params = model.num_params()
dim = 10
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(dim=dim))
model.freeze()
model.unfreeze_enabled_adapters()
assert original_num_params == 2650
original_params = 0
adapter_params = 0
for name, param in model.named_parameters():
if 'adapter' not in name:
assert param.requires_grad is False
original_params += param.numel()
else:
assert param.requires_grad is True
adapter_params += param.numel()
for mname, module in model.named_modules():
if isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
assert module.track_running_stats is False
assert original_params > adapter_params
@pytest.mark.unit
def test_forward_linear_no_strategy(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
# delete the strategy
adapter_module = model.adapter_layer[model.get_enabled_adapters()[0]]
del adapter_module.adapter_strategy
with pytest.raises(AttributeError):
_ = model(x)
@pytest.mark.unit
def test_forward_linear_replaced_strategy(self):
class MultiplyAdapterStrategy(adapter_mixin_strategies.AbstractAdapterStrategy):
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: AdapterModuleMixin):
out = adapter(input)
return input * out
torch.random.manual_seed(0)
x = torch.randn(2, 50)
model = DefaultModuleAdapter()
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
# modify the strategy
adapter_module = model.adapter_layer[model.get_enabled_adapters()[0]]
adapter_module.adapter_strategy = MultiplyAdapterStrategy()
out = model(x)
# result of adapter is zero tensor, output multiplied by adapter result should be zero
assert (out > 0.0).any() == torch.tensor(False)
| NeMo-main | tests/core/mixins/adapters/test_adapter_mixin.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from typing import Tuple
import pytest
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from nemo.core import ModelPT, NeuralModule
from nemo.core.classes.mixins import adapter_mixin_strategies, adapter_mixins
from nemo.core.classes.mixins.adapter_mixins import AdapterModelPTMixin, AdapterModuleMixin
from nemo.utils import logging, logging_mode
class DefaultModule(NeuralModule):
""" Define a default neural module (without adapter support)"""
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class DefaultModuleAdapter(DefaultModule, AdapterModuleMixin):
""" Subclass the DefaultModule, adding adapter module support"""
def forward(self, x):
x = super(DefaultModuleAdapter, self).forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
class DefaultModelAdapterMixin(AdapterModelPTMixin):
""" Mixin class that implements this model's specific overrides to AdapterModelPTMixin
It will container two modules, an encoder and a decoder, and both can have adapters.
By default, encoder adapters are enabled, and decoder adapters are diabled. Decoder adapters
can be enabled via the global_cfg in model.cfg.adapters.
Checks and forwards functions to the corresponding modules.
It supports both global adapters and module adapters for testing purpose.
"""
def setup_adapters(self):
supports_adapters = False
# Check the inheriting class' modules supports adapters or not
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
supports_adapters |= True
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
supports_adapters |= True
if supports_adapters:
super().setup_adapters()
def add_adapter(self, name: str, cfg: DictConfig):
# Setup the config for adapters
super().add_adapter(name, cfg)
# Resolve module name and adapter name
module_name, adapter_name = self.resolve_adapter_module_name_(name)
# Try to retrieve global adapter config
global_config = self._get_global_cfg()
# forward the method call to the individual modules
# If module name is empty, it is a global adapter, otherwise it is a local adapter
if (module_name == '' and global_config.get('encoder_adapter', True)) or (module_name == 'encoder'):
if hasattr(self, 'encoder'):
self.encoder.add_adapter(name, cfg)
if (module_name == '' and global_config.get('decoder_adapter', False)) or (module_name == 'decoder'):
if hasattr(self, 'decoder'):
self.decoder.add_adapter(name, cfg)
def set_enabled_adapters(self, name=None, enabled: bool = True):
# check if valid model with some adapter support
super().set_enabled_adapters(name, enabled)
# Resolve module name and adapter name
if name is not None:
module_name, _ = self.resolve_adapter_module_name_(name)
else:
module_name = None
# Try to retrieve global adapter config
global_config = self._get_global_cfg()
# Forward the method call to the individual modules
if name is None or global_config.get('encoder_adapter', True) or module_name in ('', 'encoder'):
if hasattr(self, 'encoder') and self.encoder.is_adapter_available():
self.encoder.set_enabled_adapters(name, enabled)
if name is None or global_config.get('decoder_adapter', False) or module_name == 'decoder':
if hasattr(self, 'decoder') and self.decoder.is_adapter_available():
self.decoder.set_enabled_adapters(name, enabled)
def get_enabled_adapters(self) -> list:
enabled_adapters = super().get_enabled_adapters()
# Forward the method call to the individual modules
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
encoder_adapters = self.encoder.get_enabled_adapters()
enabled_adapters.extend(encoder_adapters)
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
decoder_adapters = self.decoder.get_enabled_adapters()
enabled_adapters.extend(decoder_adapters)
return enabled_adapters
def is_adapter_available(self) -> bool:
adapters_available = super().is_adapter_available()
# Try to retrieve global adapter config
# Forward the method call to the individual modules
if hasattr(self, 'encoder') and isinstance(self.encoder, AdapterModuleMixin):
print("Encoder is adapter available", self.encoder.is_adapter_available())
adapters_available |= self.encoder.is_adapter_available()
if hasattr(self, 'decoder') and isinstance(self.decoder, AdapterModuleMixin):
adapters_available |= self.decoder.is_adapter_available()
return adapters_available
def check_valid_model_with_adapter_support_(self):
global_cfg = DictConfig({})
if self.adapter_global_cfg_key in self.adapter_cfg:
global_cfg = self.adapter_cfg[self.adapter_global_cfg_key]
encoder_adapter = global_cfg.get('encoder_adapter', True)
decoder_adapter = global_cfg.get('decoder_adapter', False)
if encoder_adapter and not hasattr(self, 'encoder'):
logging.warning("Encoder not available", mode=logging_mode.ONCE)
elif encoder_adapter and not isinstance(self.encoder, AdapterModuleMixin):
logging.warning("Encoder does not support adapters !", mode=logging_mode.ONCE)
if decoder_adapter and not hasattr(self, 'decoder'):
logging.warning("Decoder is not available", mode=logging_mode.ONCE)
elif decoder_adapter and not isinstance(self.decoder, AdapterModuleMixin):
logging.warning("Decoder does not support adapters !", mode=logging_mode.ONCE)
def resolve_adapter_module_name_(self, name: str) -> Tuple[str, str]:
# resolve name and module
valid_module_names = self.adapter_module_names
module_name, adapter_name = super().resolve_adapter_module_name_(name)
if module_name not in valid_module_names:
raise ValueError(f"Provided module name `{module_name}` is not in valid list : {valid_module_names}")
return (module_name, adapter_name)
def _get_global_cfg(self):
global_config = DictConfig({})
if 'adapters' in self.cfg and self.adapter_global_cfg_key in self.cfg.adapters:
global_config = self.adapter_cfg[self.adapter_global_cfg_key]
return global_config
@property
def adapter_module_names(self) -> list:
valid_adapter_modules = ['', 'encoder', 'decoder']
return valid_adapter_modules
class DefaultAdapterModel(ModelPT, DefaultModelAdapterMixin):
def __init__(self, cfg, trainer=None):
super().__init__(cfg, trainer=trainer)
self.encoder = instantiate(cfg.encoder) # type: DefaultModuleAdapter
self.decoder = instantiate(cfg.decoder) # type: DefaultModuleAdapter
# Required to be called for adapter support
self.setup_adapters()
def forward(self, x):
y = self.encoder(x)
z = self.decoder(y)
return z
def list_available_models(cls):
return []
def setup_training_data(self, train_data_config):
self._update_dataset_config('train', train_data_config)
self._train_dl = None
def setup_validation_data(self, val_data_config):
self._update_dataset_config('validation', val_data_config)
self._validation_dl = None
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_model_config(in_features=50, update_adapter_cfg: bool = True):
config = OmegaConf.create(
{
'in_features': in_features,
'encoder': {'_target_': get_classpath(DefaultModule)},
'decoder': {'_target_': get_classpath(DefaultModule)},
}
)
if update_adapter_cfg:
enc_adapter_metadata = adapter_mixins.get_registered_adapter(config.encoder._target_)
if enc_adapter_metadata is not None:
config.encoder._target_ = enc_adapter_metadata.adapter_class_path
dec_adapter_metadata = adapter_mixins.get_registered_adapter(config.decoder._target_)
if dec_adapter_metadata is not None:
config.decoder._target_ = dec_adapter_metadata.adapter_class_path
return config
def update_adapter_global_cfg(cfg: DictConfig, encoder_adapter=True, decoder_adapter=False):
if 'adapters' not in cfg:
cfg.adapters = adapter_mixins._prepare_default_adapter_config(
global_key=AdapterModuleMixin.adapter_global_cfg_key, meta_key=AdapterModuleMixin.adapter_metadata_cfg_key
)
cfg.adapters.global_cfg.encoder_adapter = encoder_adapter
cfg.adapters.global_cfg.decoder_adapter = decoder_adapter
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(DefaultModule) is None:
adapter_mixins.register_adapter(DefaultModule, DefaultModuleAdapter)
class TestAdapterModelMixin:
@pytest.mark.unit
def test_base_model_no_support_for_adapters(self, caplog):
logging._logger.propagate = True
original_verbosity = logging.get_verbosity()
logging.set_verbosity(logging.WARNING)
caplog.set_level(logging.WARNING)
cfg = get_model_config(in_features=50, update_adapter_cfg=False)
model = DefaultAdapterModel(cfg)
with pytest.raises(AttributeError):
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
# check that warning message indicates that it module is not available
assert """Encoder does not support adapters !""" in caplog.text
caplog.clear()
model.get_enabled_adapters()
# check that there is not warning message, since it should log only once.
assert """Encoder does not support adapters !""" not in caplog.text
logging._logger.propagate = False
logging.set_verbosity(original_verbosity)
@pytest.mark.unit
def test_single_adapter(self):
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_single_encoder_module_adapter(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='encoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.decoder.is_adapter_available() is False
adapter_cfg = model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[model.get_enabled_adapters()[0]] == 'encoder' # encoder
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'temp.nemo')
model.save_to(path)
shutil.move(path, outer_tmpdir)
outer_path = os.path.join(outer_tmpdir, 'temp.nemo')
new_model = DefaultAdapterModel.restore_from(outer_path) # type: DefaultAdapterModel
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights
assert new_model.decoder.is_adapter_available() is False
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[model.get_enabled_adapters()[0]] == 'encoder' # encoder
@pytest.mark.unit
def test_single_decoder_module_adapter(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.encoder.is_adapter_available() is False
adapter_cfg = model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[model.get_enabled_adapters()[0]] == 'decoder' # decoder module
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
path = os.path.join(tmpdir, 'temp.nemo')
model.save_to(path)
shutil.move(path, outer_tmpdir)
outer_path = os.path.join(outer_tmpdir, 'temp.nemo')
new_model = DefaultAdapterModel.restore_from(outer_path) # type: DefaultAdapterModel
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights
assert new_model.encoder.is_adapter_available() is False
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == 'decoder' # decoder module
@pytest.mark.unit
def test_single_adapter_default_metaconfig(self):
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
adapter_cfg = model.cfg.adapters
assert model.adapter_global_cfg_key in adapter_cfg
assert model.adapter_metadata_cfg_key in adapter_cfg[model.adapter_global_cfg_key]
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
assert meta_cfg is not None
assert 'modules' in meta_cfg
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[model.get_enabled_adapters()[0]] == '' # default module
@pytest.mark.unit
def test_all_disabled_adapters(self):
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=False, decoder_adapter=False)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params == original_num_params
assert model.is_adapter_available() is False
@pytest.mark.unit
def test_set_enabled_all_adapters_with_no_name(self):
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=True)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
model.add_adapter(name='decoder:adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_weights
model.set_enabled_adapters(enabled=False)
assert new_num_params > original_num_params
assert model.is_adapter_available() is True
assert len(model.get_enabled_adapters()) == 0
@pytest.mark.unit
def test_set_enabled_all_adapters_with_no_name_only_decoder(self):
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=True)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_weights
model.set_enabled_adapters(enabled=False)
assert new_num_params > original_num_params
assert model.is_adapter_available() is True
assert len(model.get_enabled_adapters()) == 0
@pytest.mark.unit
def test_enc_dec_enabled_adapters(self):
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=False)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=True)
model2 = DefaultAdapterModel(cfg)
model2.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_encdec_num_params = model2.num_weights
assert new_encdec_num_params > new_num_params
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_multiple_adapter(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
original_num_params = new_num_params
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_multiple_adapter_non_unique_adapter_name(self):
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=True)
model = DefaultAdapterModel(cfg)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
with pytest.raises(ValueError):
model.add_adapter(name='encoder:adapter_0', cfg=get_adapter_cfg())
with pytest.raises(ValueError):
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg())
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_forward_linear_pre(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_output = model(x)
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_forward_linear_post(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(norm_pos='post'))
new_output = model(x)
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_multi_adapter_forward(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
origial_output = model(x)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_output = model(x)
if enc:
assert model.encoder._adapter_names == ['adapter_0', 'adapter_1']
if dec:
assert model.decoder._adapter_names == ['adapter_0', 'adapter_1']
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_multi_adapter_partial_forward_global_module_different(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
origial_output = model(x)
# add encoder adapters
if enc:
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
model.add_adapter(name='encoder:adapter_1', cfg=get_adapter_cfg())
# add decoder adapters
if dec:
model.add_adapter(name='decoder:adapter_2', cfg=get_adapter_cfg())
model.add_adapter(name='decoder:adapter_3', cfg=get_adapter_cfg())
# disable encoder adapters
if enc:
model.set_enabled_adapters(name='adapter_0', enabled=False)
# disable decoder adapters
if dec:
model.set_enabled_adapters(name='adapter_3', enabled=False)
# perform forward
new_output = model(x)
if enc:
assert model.encoder._adapter_names == ['adapter_1']
if dec:
assert model.decoder._adapter_names == ['adapter_2']
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name1', ['adapter_0', 'encoder:adapter_0'])
@pytest.mark.parametrize('name2', ['adapter_1', 'encoder:adapter_1'])
def test_multi_adapter_partial_forward_global_module_same_output(self, name1, name2):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=False)
model = DefaultAdapterModel(cfg)
original_output = model(x)
model.add_adapter(name=name1, cfg=get_adapter_cfg())
model.add_adapter(name=name2, cfg=get_adapter_cfg())
model.set_enabled_adapters(name=name1, enabled=False)
new_output = model(x)
resolved_name2 = model.resolve_adapter_module_name_(name2)[-1]
assert model.get_enabled_adapters() == [resolved_name2]
assert torch.mean(torch.abs(original_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('enc', [True, False])
@pytest.mark.parametrize('dec', [True, False])
def test_forward_unfrozen_adapters(self, enc, dec):
if enc is False and dec is False:
return # need at least one adapter active
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=enc, decoder_adapter=dec)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
dim = 10
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(dim=dim))
model.freeze()
model.unfreeze_enabled_adapters()
assert original_num_params == 5300
original_params = 0
adapter_params = 0
for name, param in model.named_parameters():
if 'adapter' not in name:
assert param.requires_grad is False
original_params += param.numel()
else:
assert param.requires_grad is True
adapter_params += param.numel()
for mname, module in model.named_modules():
if isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
assert module.track_running_stats is False
assert original_params > adapter_params
enc_params = model.encoder.num_params()
dec_params = model.decoder.num_params()
assert adapter_params == enc_params + dec_params
@pytest.mark.unit
def test_forward_linear_no_strategy(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=False)
model = DefaultAdapterModel(cfg)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
# delete the strategy
adapter_module = model.encoder.adapter_layer[model.get_enabled_adapters()[0]]
del adapter_module.adapter_strategy
with pytest.raises(AttributeError):
_ = model(x)
@pytest.mark.unit
def test_forward_linear_replaced_strategy(self):
class MultiplyAdapterStrategy(adapter_mixin_strategies.AbstractAdapterStrategy):
def forward(self, input: torch.Tensor, adapter: torch.nn.Module, *, module: AdapterModuleMixin):
out = adapter(input)
return input * out
torch.random.manual_seed(0)
x = torch.randn(2, 50)
cfg = get_model_config(in_features=50)
# Use decoder only adapter
cfg = update_adapter_global_cfg(cfg, encoder_adapter=True, decoder_adapter=True)
model = DefaultAdapterModel(cfg)
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
# modify the strategy of both encoder and decoder
adapter_module = model.encoder.adapter_layer[model.get_enabled_adapters()[0]]
adapter_module.adapter_strategy = MultiplyAdapterStrategy()
adapter_module = model.decoder.adapter_layer[model.get_enabled_adapters()[0]]
adapter_module.adapter_strategy = MultiplyAdapterStrategy()
out = model(x)
# result of adapter is zero tensor, output multiplied by adapter result should be zero
assert (out > 0.0).any() == torch.tensor(False)
@pytest.mark.unit
def test_save_adapter_with_no_adapters_added(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
with pytest.raises(AttributeError):
model.save_adapters(filepath='temp.pt', name=None)
@pytest.mark.unit
def test_single_decoder_save_load_adapter_only_exact_name(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.encoder.is_adapter_available() is False
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
model.save_adapters(adapter_path, name='decoder:adapter_0')
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
new_model.load_adapters(outer_adapter_path, name='decoder:adapter_0')
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights
assert new_model.encoder.is_adapter_available() is False
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == 'decoder' # decoder module
original_state_dict = model.decoder.adapter_layer.state_dict()
restored_state_dict = new_model.decoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
@pytest.mark.unit
@pytest.mark.parametrize('restore_name', [None, 'adapter_0'])
def test_single_decoder_save_load_adapter_only_global_name(self, restore_name):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.decoder.is_adapter_available() is False
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
model.save_adapters(adapter_path, name='adapter_0')
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
# restore adapter to new model (without any encoder adapter)
new_model = DefaultAdapterModel(cfg)
new_model.load_adapters(outer_adapter_path, name=restore_name)
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights
assert new_model.decoder.is_adapter_available() is False
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == '' # global adapter
original_state_dict = model.encoder.adapter_layer.state_dict()
restored_state_dict = new_model.encoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
@pytest.mark.unit
def test_multiple_decoder_save_load_adapter_only_exact_name(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg(dim=5))
model.add_adapter(name='encoder:adapter_1', cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
model.save_adapters(adapter_path, name='decoder:adapter_0')
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
new_model.load_adapters(outer_adapter_path, name='decoder:adapter_0')
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights > new_model.num_weights # the new model has only one adapter not both
assert new_model.encoder.is_adapter_available() is False # encoder adaper not available in new model
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == 'decoder' # decoder
original_state_dict = model.decoder.adapter_layer.state_dict()
restored_state_dict = new_model.decoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
@pytest.mark.unit
@pytest.mark.parametrize("decoder", ["adapter_0",]) # "decoder:adapter_0"
@pytest.mark.parametrize("encoder", ["adapter_1",]) # "encoder:adapter_1"
def test_multiple_save_load_adapter_with_multiple_load(self, decoder, encoder):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
model.add_adapter(name=decoder, cfg=get_adapter_cfg(dim=5))
model.add_adapter(name=encoder, cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert len(model.get_enabled_adapters()) == 2
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
adapter_path_2 = os.path.join(tmpdir, 'temp-2.pt')
print("saving adapter ", decoder)
model.save_adapters(adapter_path, name=decoder)
print("Saving adapter ", encoder)
model.save_adapters(adapter_path_2, name=encoder)
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(adapter_path_2, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_adapter_path_2 = os.path.join(outer_tmpdir, 'temp-2.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
adapter_2_filesize = os.path.getsize(outer_adapter_path_2)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
assert model_filesize > adapter_2_filesize
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
new_model.load_adapters(outer_adapter_path, name=decoder)
# Seperately load another adapter after the first one
new_model.load_adapters(outer_adapter_path_2, name=encoder)
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights # the new model has only one adapter not both
assert new_model.encoder.is_adapter_available() is True # encoder adaper is available in new model
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
enabled_adapters = new_model.get_enabled_adapters()
assert len(enabled_adapters) == 2
if "decoder:" in decoder:
original_state_dict = model.decoder.adapter_layer.state_dict()
restored_state_dict = new_model.decoder.adapter_layer.state_dict()
else:
# Default adapter position is on encoder
original_state_dict = model.encoder.adapter_layer.state_dict()
restored_state_dict = new_model.encoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
@pytest.mark.unit
def test_multiple_decoder_save_load_adapter_dual_name(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
# one adapter will have module name, other will have global name
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg(dim=5))
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
model.save_adapters(adapter_path, name=None) # save all adapters
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
new_model.load_adapters(outer_adapter_path, name='decoder:adapter_0') # load just one adapter from 2 saved
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights > new_model.num_weights # the new model has only one adapter not both
assert new_model.encoder.is_adapter_available() is False # encoder adaper not available in new model
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == 'decoder' # decoder
original_state_dict = model.decoder.adapter_layer.state_dict()
restored_state_dict = new_model.decoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
@pytest.mark.unit
def test_single_decoder_save_load_adapter_only_partial_name(self):
# create a model config, but do not add global_cfg to it
# we want to test just module level adapter
cfg = get_model_config(in_features=50)
model = DefaultAdapterModel(cfg)
original_num_params = model.num_weights
# build adapter with exact name in decoder module only
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg(dim=5))
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.encoder.is_adapter_available() is False
# save restore test
with tempfile.TemporaryDirectory() as outer_tmpdir:
with tempfile.TemporaryDirectory() as tmpdir:
adapter_path = os.path.join(tmpdir, 'temp.pt')
# save adapter with partial name- just adapter_0
model.save_adapters(adapter_path, name='adapter_0')
model_path = os.path.join('temp.nemo')
model.save_to(model_path)
shutil.move(adapter_path, outer_tmpdir)
shutil.move(model_path, outer_tmpdir)
outer_adapter_path = os.path.join(outer_tmpdir, 'temp.pt')
outer_model_path = os.path.join(outer_tmpdir, 'temp.nemo')
# Assert size of this params
adapter_filesize = os.path.getsize(outer_adapter_path)
model_filesize = os.path.getsize(outer_model_path)
assert model_filesize > adapter_filesize
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
# load adapter with partial name only - just adapter_0 - should work
new_model.load_adapters(outer_adapter_path, name='adapter_0')
# restore adapter to new model (without any decoder adapter)
new_model = DefaultAdapterModel(cfg)
# properly load with correct key
new_model.load_adapters(outer_adapter_path, name='decoder:adapter_0')
assert isinstance(new_model, AdapterModelPTMixin)
assert len(new_model.get_enabled_adapters()) > 0
assert model.num_weights == new_model.num_weights
assert new_model.encoder.is_adapter_available() is False
adapter_cfg = new_model.cfg.adapters
meta_cfg = adapter_cfg[model.adapter_global_cfg_key][model.adapter_metadata_cfg_key]
modules_cfg = meta_cfg['modules']
assert modules_cfg is not None
assert modules_cfg[new_model.get_enabled_adapters()[0]] == 'decoder' # decoder module
original_state_dict = model.decoder.adapter_layer.state_dict()
restored_state_dict = new_model.decoder.adapter_layer.state_dict()
for ogkey, newkey in zip(original_state_dict.keys(), restored_state_dict.keys()):
assert (original_state_dict[ogkey] - restored_state_dict[newkey]).abs().mean() < 1e-6
| NeMo-main | tests/core/mixins/adapters/test_adapter_model_mixin.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import rank_zero_only
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg, trainer=kwargs.get('trainer', None))
# dummy parameter in order to allow DDP to execute
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
return None
def val_dataloader(self):
return None
def predict_dataloader(self):
dataset = OnesDataset(2)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def forward(self, batch):
return batch.mean()
def validation_step(self, batch, batch_idx):
loss = self(batch)
self.validation_step_outputs.append(loss)
return loss
def training_step(self, batch, batch_idx):
return self(batch)
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def on_validation_epoch_end(self):
self.log("val_loss", torch.stack(self.validation_step_outputs).mean())
self.validation_step_outputs.clear() # free memory
def instantiate_multinode_ddp_if_possible():
num_gpus = torch.cuda.device_count()
## Change logger=None to logger=False to support PTL 2.0
trainer = Trainer(devices=num_gpus, accelerator='gpu', strategy='ddp', logger=False, enable_checkpointing=False)
exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="")
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
return trainer
def setup_model(trainer: Trainer):
model = ExampleModel(trainer=trainer)
logging.info(f"M.Global Rank:{model.global_rank}")
logging.info(f"M.Local Rank:{model.local_rank}")
logging.info(f"M.World Size:{model.trainer.world_size}")
trainer.predict(model)
return model
def get_rank_info(texts: list, rank_key: str) -> int:
for line in texts:
if rank_key in line:
rank_value = line.split(":")[-1]
rank_value = int(rank_value)
return rank_value
print("Could not find the correct rank key !")
exit(1)
@rank_zero_only
def check_model_ranks(model: ExampleModel):
basedir = os.path.join('./ddp_check/', 'default', 'version_0')
file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt"
world_size = torch.cuda.device_count()
for rank in range(world_size):
filename = file_template.format(rank=rank)
filepath = os.path.join(basedir, filename)
with open(filepath, 'r', encoding='utf-8') as f:
texts = f.readlines()
texts = [t.replace("\n", "") for t in texts]
log_global_rank = get_rank_info(texts, rank_key='M.Global Rank')
log_world_size = get_rank_info(texts, rank_key='M.World Size')
if log_global_rank != rank:
print("Logged global rank is not equal to trainer.global_rank !")
exit(1)
if log_world_size != world_size:
print("Logged world size if not equal to trainer.world_size !")
exit(1)
@rank_zero_only
def cleanup():
if os.path.exists('./ddp_check'):
shutil.rmtree('./ddp_check', ignore_errors=True)
def run_checks():
cleanup()
trainer = instantiate_multinode_ddp_if_possible()
model = setup_model(trainer)
check_model_ranks(model)
print("DDP checks passed !")
cleanup()
if __name__ == '__main__':
run_checks()
| NeMo-main | tests/core_ptl/check_for_ranks.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import pytest
import torch
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from nemo.core import ModelPT
from nemo.utils import logging
from nemo.utils.exp_manager import CallbackParams, ExpManagerConfig, StatelessTimer, exp_manager
class OnesDataset(torch.utils.data.Dataset):
def __init__(self, dataset_len):
super().__init__()
self.__dataset_len = dataset_len
def __getitem__(self, *args):
return torch.ones(2)
def __len__(self):
return self.__dataset_len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg, trainer=kwargs.get('trainer', None))
# dummy parameter in order to allow DDP to execute
self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1)
def train_dataloader(self):
dataset = OnesDataset(10000)
return torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=4)
def val_dataloader(self):
dataset = OnesDataset(10)
return torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=4)
def predict_dataloader(self):
dataset = OnesDataset(10)
return torch.utils.data.DataLoader(dataset, batch_size=2, num_workers=4)
def forward(self, batch):
return (self.l1(batch) - batch.mean(dim=1)).mean()
def validation_step(self, batch, batch_idx):
loss = (self.l1(batch) - batch.mean(dim=1)).mean()
self.validation_step_outputs.append(loss)
return loss
def training_step(self, batch, batch_idx):
return (self.l1(batch) - batch.mean(dim=1)).mean()
def list_available_models(self):
pass
def setup_training_data(self):
pass
def setup_validation_data(self):
pass
def on_validation_epoch_end(self):
if not self.validation_step_outputs:
return
self.log("val_loss", torch.stack(self.validation_step_outputs).mean(), sync_dist=True)
self.validation_step_outputs.clear() # free memory
class TestStatelessTimer:
def setup_model(self):
# Stateless timer for 3 seconds.
# Max steps shouldn't matter for it should stop in 3 seconds based on the timer.
# Val check interval makes sure a checkpoint is written and can be restored from.
callback_params = CallbackParams()
callback_params.monitor = "val_loss"
callback_params.save_top_k = 1
trainer = Trainer(
devices=1,
val_check_interval=5,
max_steps=10000,
accelerator='gpu',
strategy='ddp',
logger=False,
enable_checkpointing=False,
)
exp_manager_cfg = ExpManagerConfig(
explicit_log_dir='./ptl_stateless_timer_check/',
use_datetime_version=False,
version="",
resume_ignore_no_checkpoint=True,
create_checkpoint_callback=True,
checkpoint_callback_params=callback_params,
resume_if_exists=True,
max_time_per_run="00:00:00:03",
)
exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg))
model = ExampleModel(trainer=trainer)
trainer.fit(model)
return trainer
def cleanup(self):
if os.path.exists('./ptl_stateless_timer_check'):
shutil.rmtree('./ptl_stateless_timer_check', ignore_errors=True)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_stateless_timer(self):
self.cleanup()
trainer = self.setup_model()
global_step_1 = trainer.global_step
trainer = self.setup_model()
global_step_2 = trainer.global_step
trainer = self.setup_model()
global_step_3 = trainer.global_step
logging.info(f"Global steps : {global_step_1}, {global_step_2}, {global_step_3}")
assert global_step_3 > global_step_2 > global_step_1
self.cleanup()
| NeMo-main | tests/core_ptl/test_ptl_stateless_timer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import inspect
import os
import traceback
import torch
import wrapt
from nemo.core import Model
from nemo.utils import model_utils
DOMAINS = ['asr', 'tts', 'nlp']
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--domain', choices=DOMAINS, type=str)
args = parser.parse_args()
return args
###############################
def _build_import_path(domain, subdomains: list, imp):
import_path = ["nemo", "collections", domain]
import_path.extend(subdomains)
import_path.append(imp)
path = ".".join(import_path)
return path
def _get_class_from_path(domain, subdomains, imp):
path = _build_import_path(domain, subdomains, imp)
class_ = None
result = None
try:
class_ = model_utils.import_class_by_path(path)
if inspect.isclass(class_):
# Is class wrpped in a wrapt.decorator a the class level? Unwrap for checks.
if isinstance(class_, wrapt.FunctionWrapper):
class_ = class_.__wrapped__
# Subclass tests
if issubclass(class_, (Model, torch.nn.Module)):
result = class_
else:
class_ = None
error = None
except Exception:
error = traceback.format_exc()
return class_, result, error
def _test_domain_module_imports(module, domain, subdomains: list):
module_list = []
failed_list = []
error_list = []
error = None
if len(subdomains) > 0:
basepath = module.__path__[0]
nemo_index = basepath.rfind("nemo")
basepath = basepath[nemo_index:].replace(os.path.sep, ".")
new_path = '.'.join([basepath, *subdomains])
try:
module = importlib.import_module(new_path)
except Exception:
print(f"Could not import `{new_path}` ; Traceback below :")
error = traceback.format_exc()
error_list.append(error)
if error is None:
for imp in dir(module):
class_, result, error = _get_class_from_path(domain, subdomains, imp)
if result is not None:
module_list.append(class_)
elif class_ is not None:
failed_list.append(class_)
if error is not None:
error_list.append(error)
for module in module_list:
print("Module successfully imported :", module)
print()
for module in failed_list:
print("Module did not match a valid signature of NeMo Model (hence ignored):", module)
print()
if len(error_list) > 0:
print("Imports crashed with following traceback !")
for error in error_list:
print("*" * 100)
print()
print(error)
print()
print("*" * 100)
print()
if len(error_list) > 0:
return False
else:
return True
###############################
def test_domain_asr(args):
import nemo.collections.asr as nemo_asr
all_passed = _test_domain_module_imports(nemo_asr, domain=args.domain, subdomains=['models'])
if not all_passed:
exit(1)
def test_domain_nlp(args):
# If even this fails, just fail entirely.
import nemo.collections.nlp as nemo_nlp
# Basic NLP test
all_passed = _test_domain_module_imports(nemo_nlp, domain=args.domain, subdomains=['models'])
# Megatron Test
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_base_model']
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_bert_model']
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_glue_model']
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_gpt_model']
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp,
domain=args.domain,
subdomains=['models', 'language_modeling', 'megatron_lm_encoder_decoder_model'],
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp,
domain=args.domain,
subdomains=['models', 'language_modeling', 'megatron_gpt_prompt_learning_model'],
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_t5_model']
)
and all_passed
)
all_passed = (
_test_domain_module_imports(
nemo_nlp, domain=args.domain, subdomains=['models', 'language_modeling', 'megatron_t5_model']
)
and all_passed
)
if not all_passed:
exit(1)
def test_domain_tts(args):
import nemo.collections.tts as nemo_tts
all_passed = _test_domain_module_imports(nemo_tts, domain=args.domain, subdomains=['models'])
if not all_passed:
exit(1)
###############################
def test_domain(args):
domain = args.domain
if domain == 'asr':
test_domain_asr(args)
elif domain == 'nlp':
test_domain_nlp(args)
elif domain == 'tts':
test_domain_tts(args)
else:
raise RuntimeError(f"Cannot resolve domain : {domain}")
def run_checks():
args = process_args()
test_domain(args)
if __name__ == '__main__':
run_checks()
| NeMo-main | tests/core_ptl/check_imports.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
import pytest
from nemo import __version__ as NEMO_VERSION
from nemo.utils.data_utils import (
ais_binary,
ais_endpoint_to_dir,
bucket_and_object_from_uri,
datastore_path_to_webdataset_url,
is_datastore_path,
resolve_cache_dir,
)
class TestDataUtils:
@pytest.mark.unit
def test_resolve_cache_dir(self):
"""Test cache dir path.
"""
TEST_NEMO_ENV_CACHE_DIR = 'TEST_NEMO_ENV_CACHE_DIR'
with mock.patch('nemo.constants.NEMO_ENV_CACHE_DIR', TEST_NEMO_ENV_CACHE_DIR):
envar_to_resolved_path = {
'/path/to/cache': '/path/to/cache',
'relative/path': os.path.join(os.getcwd(), 'relative/path'),
'': os.path.expanduser(f'~/.cache/torch/NeMo/NeMo_{NEMO_VERSION}'),
}
for envar, expected_path in envar_to_resolved_path.items():
# Set envar
os.environ[TEST_NEMO_ENV_CACHE_DIR] = envar
# Check path
uut_path = resolve_cache_dir().as_posix()
assert uut_path == expected_path, f'Expected: {expected_path}, got {uut_path}'
@pytest.mark.unit
def test_is_datastore_path(self):
"""Test checking for datastore path.
"""
# Positive examples
assert is_datastore_path('ais://positive/example')
# Negative examples
assert not is_datastore_path('ais/negative/example')
assert not is_datastore_path('/negative/example')
assert not is_datastore_path('negative/example')
@pytest.mark.unit
def test_bucket_and_object_from_uri(self):
"""Test getting bucket and object from URI.
"""
# Positive examples
assert bucket_and_object_from_uri('ais://bucket/object') == ('bucket', 'object')
assert bucket_and_object_from_uri('ais://bucket_2/object/is/here') == ('bucket_2', 'object/is/here')
# Negative examples: invalid URI
with pytest.raises(ValueError):
bucket_and_object_from_uri('/local/file')
with pytest.raises(ValueError):
bucket_and_object_from_uri('local/file')
@pytest.mark.unit
def test_ais_endpoint_to_dir(self):
"""Test converting an AIS endpoint to dir.
"""
assert ais_endpoint_to_dir('http://local:123') == os.path.join('local', '123')
assert ais_endpoint_to_dir('http://1.2.3.4:567') == os.path.join('1.2.3.4', '567')
with pytest.raises(ValueError):
ais_endpoint_to_dir('local:123')
@pytest.mark.unit
def test_ais_binary(self):
"""Test cache dir path.
"""
with mock.patch('shutil.which', lambda x: '/test/path/ais'):
assert ais_binary() == '/test/path/ais'
# Negative example: AIS binary cannot be found
with mock.patch('shutil.which', lambda x: None), mock.patch('os.path.isfile', lambda x: None):
with pytest.raises(RuntimeError):
ais_binary()
@pytest.mark.unit
def test_datastore_path_to_webdataset_url(self):
"""Test conversion of data store path to an URL for WebDataset.
"""
assert datastore_path_to_webdataset_url('ais://test/path') == 'pipe:ais get ais://test/path - || true'
| NeMo-main | tests/utils/test_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.utils.enum import PrettyStrEnum
class ASRModelType(PrettyStrEnum):
CTC = "ctc"
RNNT = "rnnt"
class TestPrettyStrEnum:
def test_incorrect_value(self):
"""Test pretty error message for invalid value"""
try:
ASRModelType("incorrect")
except ValueError as e:
assert str(e) == "incorrect is not a valid ASRModelType. Possible choices: ctc, rnnt"
def test_correct_value(self):
"""Test that correct value is accepted"""
assert ASRModelType("ctc") == ASRModelType.CTC
def test_str(self):
"""
Test that str() returns the source value,
useful for serialization/deserialization and user-friendly logging
"""
assert str(ASRModelType("ctc")) == "ctc"
| NeMo-main | tests/utils/test_enum.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import soundfile as sf
import torch
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
@pytest.fixture()
def set_device():
return torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@pytest.fixture()
def language_specific_text_example():
return {
"en": "Caslon's type is clear and neat, and fairly well designed;",
"de": "Ich trinke gerne Kräutertee mit Lavendel.",
"es": "Los corazones de pollo son una delicia.",
"zh": "双辽境内除东辽河、西辽河等5条河流",
}
@pytest.fixture()
def supported_languages(language_specific_text_example):
return sorted(language_specific_text_example.keys())
@pytest.fixture()
def get_language_id_from_pretrained_model_name(supported_languages):
def _validate(pretrained_model_name):
language_id = pretrained_model_name.split("_")[1]
if language_id not in supported_languages:
pytest.fail(
f"`PretrainedModelInfo.pretrained_model_name={pretrained_model_name}` does not follow the naming "
f"convention as `tts_languageID_model_*`, or `languageID` is not supported in {supported_languages}."
)
return language_id
return _validate
@pytest.fixture()
def mel_spec_example(set_device):
# specify a value range of mel-spectrogram close to ones generated in practice. But we can also mock the values
# by `torch.randn` for testing purpose.
min_val = -11.0
max_val = 0.5
batch_size = 1
n_mel_channels = 80
n_frames = 330
spec = (min_val - max_val) * torch.rand(batch_size, n_mel_channels, n_frames, device=set_device) + max_val
return spec
@pytest.fixture()
def audio_text_pair_example_english(test_data_dir, set_device):
manifest_path = os.path.join(test_data_dir, 'tts/mini_ljspeech/manifest.json')
data = read_manifest(manifest_path)
audio_filepath = data[-1]["audio_filepath"]
text_raw = data[-1]["text"]
# Load audio
audio_data, orig_sr = sf.read(audio_filepath)
audio = torch.tensor(audio_data, dtype=torch.float, device=set_device).unsqueeze(0)
audio_len = torch.tensor(audio_data.shape[0], device=set_device).long().unsqueeze(0)
return audio, audio_len, text_raw
| NeMo-main | tests/fixtures/tts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018-2020 William Falcon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
import torch
from .pl_utils import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES
@dataclass(frozen=True)
class LossInput:
"""
The input for ``nemo.collections.common.metrics.GlobalAverageLossMetric`` metric tests.
Args:
loss_sum_or_avg: a one dimensional float tensor which contains losses for averaging. Each element is either a
sum or mean of several losses depending on the parameter ``take_avg_loss`` of the
``nemo.collections.common.metrics.GlobalAverageLossMetric`` class.
num_measurements: a one dimensional integer tensor which contains number of measurements which sums or average
values are in ``loss_sum_or_avg``.
"""
loss_sum_or_avg: torch.Tensor
num_measurements: torch.Tensor
NO_ZERO_NUM_MEASUREMENTS = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0, num_measurements=torch.randint(1, 100, (NUM_BATCHES,)),
)
SOME_NUM_MEASUREMENTS_ARE_ZERO = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0,
num_measurements=torch.cat(
(
torch.randint(1, 100, (NUM_BATCHES // 2,), dtype=torch.int32),
torch.zeros(NUM_BATCHES - NUM_BATCHES // 2, dtype=torch.int32),
)
),
)
ALL_NUM_MEASUREMENTS_ARE_ZERO = LossInput(
loss_sum_or_avg=torch.rand(NUM_BATCHES) * 2.0 - 1.0, num_measurements=torch.zeros(NUM_BATCHES, dtype=torch.int32),
)
| NeMo-main | tests/collections/common/loss_inputs.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from contextlib import contextmanager
from pathlib import Path
from unittest import mock
import numpy as np
import pytest
from nemo.collections.common.parts.preprocessing.manifest import get_full_path
from nemo.collections.common.parts.utils import flatten
class TestListUtils:
@pytest.mark.unit
def test_flatten(self):
"""Test flattening an iterable with different values: str, bool, int, float, complex.
"""
test_cases = []
test_cases.append({'input': ['aa', 'bb', 'cc'], 'golden': ['aa', 'bb', 'cc']})
test_cases.append({'input': ['aa', ['bb', 'cc']], 'golden': ['aa', 'bb', 'cc']})
test_cases.append({'input': ['aa', [['bb'], [['cc']]]], 'golden': ['aa', 'bb', 'cc']})
test_cases.append({'input': ['aa', [[1, 2], [[3]], 4]], 'golden': ['aa', 1, 2, 3, 4]})
test_cases.append({'input': [True, [2.5, 2.0 + 1j]], 'golden': [True, 2.5, 2.0 + 1j]})
for n, test_case in enumerate(test_cases):
assert flatten(test_case['input']) == test_case['golden'], f'Test case {n} failed!'
class TestPreprocessingUtils:
@pytest.mark.unit
def test_get_full_path_local(self, tmpdir):
"""Test with local paths
"""
# Create a few files
num_files = 10
audio_files_relative_path = [f'file_{n}.test' for n in range(num_files)]
audio_files_absolute_path = [os.path.join(tmpdir, a_file_rel) for a_file_rel in audio_files_relative_path]
data_dir = tmpdir
manifest_file = os.path.join(data_dir, 'manifest.json')
# Context manager to create dummy files
@contextmanager
def create_files(paths):
# Create files
for a_file in paths:
Path(a_file).touch()
yield
# Remove files
for a_file in paths:
Path(a_file).unlink()
# 1) Test with absolute paths and while files don't exist.
# Note: it's still expected the path will be resolved correctly, since it will be
# expanded using manifest_file.parent or data_dir and relative path.
# - single file
for n in range(num_files):
assert (
get_full_path(audio_files_absolute_path[n], manifest_file=manifest_file)
== audio_files_absolute_path[n]
)
assert get_full_path(audio_files_absolute_path[n], data_dir=data_dir) == audio_files_absolute_path[n]
# - all files in a list
assert get_full_path(audio_files_absolute_path, manifest_file=manifest_file) == audio_files_absolute_path
assert get_full_path(audio_files_absolute_path, data_dir=data_dir) == audio_files_absolute_path
# 2) Test with absolute paths and existing files.
with create_files(audio_files_absolute_path):
# - single file
for n in range(num_files):
assert (
get_full_path(audio_files_absolute_path[n], manifest_file=manifest_file)
== audio_files_absolute_path[n]
)
assert get_full_path(audio_files_absolute_path[n], data_dir=data_dir) == audio_files_absolute_path[n]
# - all files in a list
assert get_full_path(audio_files_absolute_path, manifest_file=manifest_file) == audio_files_absolute_path
assert get_full_path(audio_files_absolute_path, data_dir=data_dir) == audio_files_absolute_path
# 3) Test with relative paths while files don't exist.
# This is a situation we may have with a tarred dataset.
# In this case, we expect to return the relative path.
# - single file
for n in range(num_files):
assert (
get_full_path(audio_files_relative_path[n], manifest_file=manifest_file)
== audio_files_relative_path[n]
)
assert get_full_path(audio_files_relative_path[n], data_dir=data_dir) == audio_files_relative_path[n]
# - all files in a list
assert get_full_path(audio_files_relative_path, manifest_file=manifest_file) == audio_files_relative_path
assert get_full_path(audio_files_relative_path, data_dir=data_dir) == audio_files_relative_path
# 4) Test with relative paths and existing files.
# In this case, we expect to return the absolute path.
with create_files(audio_files_absolute_path):
# - single file
for n in range(num_files):
assert (
get_full_path(audio_files_relative_path[n], manifest_file=manifest_file)
== audio_files_absolute_path[n]
)
assert get_full_path(audio_files_relative_path[n], data_dir=data_dir) == audio_files_absolute_path[n]
# - all files in a list
assert get_full_path(audio_files_relative_path, manifest_file=manifest_file) == audio_files_absolute_path
assert get_full_path(audio_files_relative_path, data_dir=data_dir) == audio_files_absolute_path
@pytest.mark.unit
def test_get_full_path_ais(self, tmpdir):
"""Test with paths on AIStore.
"""
# Create a few files
num_files = 10
audio_files_relative_path = [f'file_{n}.test' for n in range(num_files)]
audio_files_cache_path = [os.path.join(tmpdir, a_file_rel) for a_file_rel in audio_files_relative_path]
ais_data_dir = 'ais://test'
ais_manifest_file = os.path.join(ais_data_dir, 'manifest.json')
# Context manager to create dummy files
@contextmanager
def create_files(paths):
# Create files
for a_file in paths:
Path(a_file).touch()
yield
# Remove files
for a_file in paths:
Path(a_file).unlink()
# Simulate caching in local tmpdir
def datastore_path_to_cache_path_in_tmpdir(path):
rel_path = os.path.relpath(path, start=os.path.dirname(ais_manifest_file))
if rel_path in audio_files_relative_path:
idx = audio_files_relative_path.index(rel_path)
return audio_files_cache_path[idx]
else:
raise ValueError(f'Unexpected path {path}')
with mock.patch(
'nemo.collections.common.parts.preprocessing.manifest.datastore_path_to_local_path',
datastore_path_to_cache_path_in_tmpdir,
):
# Test with relative paths and existing cached files.
# We expect to return the absolute path in the local cache.
with create_files(audio_files_cache_path):
# - single file
for n in range(num_files):
assert (
get_full_path(audio_files_relative_path[n], manifest_file=ais_manifest_file)
== audio_files_cache_path[n]
)
assert (
get_full_path(audio_files_relative_path[n], data_dir=ais_data_dir) == audio_files_cache_path[n]
)
# - all files in a list
assert (
get_full_path(audio_files_relative_path, manifest_file=ais_manifest_file) == audio_files_cache_path
)
assert get_full_path(audio_files_relative_path, data_dir=ais_data_dir) == audio_files_cache_path
@pytest.mark.unit
def test_get_full_path_audio_file_len_limit(self):
"""Test with audio_file_len_limit.
Currently, get_full_path will always return the input path when the length
is over audio_file_len_limit, independend of whether the file exists.
"""
# Create a few files
num_examples = 10
rand_chars = list(string.ascii_uppercase + string.ascii_lowercase + string.digits + os.sep)
rand_name = lambda n: ''.join(np.random.choice(rand_chars, size=n))
for audio_file_len_limit in [255, 300]:
for n in range(num_examples):
path_length = np.random.randint(low=audio_file_len_limit, high=350)
audio_file_path = str(Path(rand_name(path_length)))
assert (
get_full_path(audio_file_path, audio_file_len_limit=audio_file_len_limit) == audio_file_path
), f'Limit {audio_file_len_limit}: expected {audio_file_path} to be returned.'
audio_file_path_with_user = os.path.join('~', audio_file_path)
audio_file_path_with_user_expected = os.path.expanduser(audio_file_path_with_user)
assert (
get_full_path(audio_file_path_with_user, audio_file_len_limit=audio_file_len_limit)
== audio_file_path_with_user_expected
), f'Limit {audio_file_len_limit}: expected {audio_file_path_with_user_expected} to be returned.'
@pytest.mark.unit
def test_get_full_path_invalid_type(self):
"""Make sure exceptions are raised when audio_file is not a string or a list of strings.
"""
with pytest.raises(ValueError, match="Unexpected audio_file type"):
get_full_path(1)
with pytest.raises(ValueError, match="Unexpected audio_file type"):
get_full_path(('a', 'b', 'c'))
with pytest.raises(ValueError, match="Unexpected audio_file type"):
get_full_path({'a': 1, 'b': 2, 'c': 3})
with pytest.raises(ValueError, match="Unexpected audio_file type"):
get_full_path([1, 2, 3])
@pytest.mark.unit
def test_get_full_path_invalid_type(self):
"""Make sure exceptions are raised when audio_file is not a string or a list of strings.
"""
with pytest.raises(ValueError, match="Use either manifest_file or data_dir"):
# Using a relative path without manifest_file or data_dir is not allowed
get_full_path('relative/path')
with pytest.raises(ValueError, match="Parameters manifest_file and data_dir cannot be used simultaneously."):
# Using a relative path without both manifest_file or data_dir is not allowed
get_full_path('relative/path', manifest_file='/manifest_dir/file.json', data_dir='/data/dir')
| NeMo-main | tests/collections/common/test_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
from typing import Any, Dict, Union
import pytest
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.types import STEP_OUTPUT
from nemo.collections.common.callbacks import EMA
from nemo.collections.common.callbacks.ema import EMAOptimizer
from nemo.core import ModelPT
from nemo.utils.exp_manager import exp_manager
DEVICE_CAPABILITY = None
if torch.cuda.is_available():
DEVICE_CAPABILITY = torch.cuda.get_device_capability()
def extract_ema_weights(pl_module, trainer):
ema_callback = [x for x in trainer.callbacks if isinstance(x, EMA)][0]
ema_callback.swap_model_weights(trainer)
weights = extract_weights(pl_module)
ema_callback.swap_model_weights(trainer)
return weights
def extract_weights(pl_module):
return [w.detach().clone() for w in pl_module.parameters()]
class RandomDataset(torch.utils.data.Dataset):
def __init__(self, size, length):
self.len = length
self.data = torch.randn(length, size)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.len
class ExampleModel(ModelPT):
def __init__(self, *args, **kwargs):
cfg = OmegaConf.structured({})
super().__init__(cfg)
self.l1 = torch.nn.modules.Linear(in_features=32, out_features=32)
self.bn = torch.nn.BatchNorm1d(32)
def train_dataloader(self):
dataset = RandomDataset(32, 16)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def val_dataloader(self):
dataset = RandomDataset(32, 16)
return torch.utils.data.DataLoader(dataset, batch_size=2)
def test_dataloader(self):
dataset = RandomDataset(32, 16)
dl = torch.utils.data.DataLoader(dataset, batch_size=2)
self._test_names = ['test_{}_'.format(idx) for idx in range(len(dl))]
return dl
def forward(self, batch):
return self.l1(self.bn(batch)).sum()
def training_step(self, batch, batch_idx):
return self(batch)
def validation_step(self, batch, batch_idx):
loss = self(batch)
self.validation_step_outputs.append(loss)
return loss
def test_step(self, batch, batch_idx):
loss = self(batch)
self.test_step_outputs.append(loss)
return loss
def configure_optimizers(self):
return torch.optim.SGD(self.parameters(), lr=1e-3)
def list_available_models(self):
pass
def setup_training_data(self, train_data_config: Union[DictConfig, Dict]):
pass
def setup_validation_data(self, val_data_config: Union[DictConfig, Dict]):
pass
def setup_test_data(self, val_data_config: Union[DictConfig, Dict]):
pass
def on_validation_epoch_end(self):
self.log("val_loss", torch.stack(self.validation_step_outputs).mean())
self.validation_step_outputs.clear() # free memory
class TestEMAConfig:
@pytest.mark.unit
def test_ema_value(self):
with pytest.raises(MisconfigurationException, match="between 0 and 1"):
EMA(decay=2)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_ema_saved_state(self, tmpdir, caplog):
"""Test to ensure that when we re-load the EMA callback, it loads the EMA weights correctly."""
temp_path = os.path.join(tmpdir, 'saved_state')
class TerminateCallback(Callback):
def on_train_epoch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
self.saved_ema_weights = extract_ema_weights(pl_module, trainer)
self.pl_module_weights = extract_weights(pl_module)
raise SystemExit
model = ExampleModel()
terminate_callback = TerminateCallback()
trainer = Trainer(
max_epochs=2,
limit_val_batches=1,
limit_train_batches=16,
logger=False,
val_check_interval=0.5,
enable_checkpointing=False,
accelerator='gpu',
devices=1,
callbacks=[terminate_callback],
)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(temp_path),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
with pytest.raises(SystemExit):
trainer.fit(model=model)
resume_path = os.path.join(temp_path, 'checkpoints/epoch=0-step=8.ckpt')
model = ExampleModel()
class CheckStateCallback(Callback):
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
weights = extract_weights(pl_module)
for x, y in zip(weights, terminate_callback.pl_module_weights):
assert torch.allclose(x.cpu(), y.cpu())
current_ema_weights = extract_ema_weights(pl_module, trainer)
for x, y in zip(current_ema_weights, terminate_callback.saved_ema_weights):
assert torch.allclose(x.cpu(), y.cpu())
for optimizer in trainer.optimizers:
assert isinstance(optimizer, EMAOptimizer)
assert optimizer.current_step == 8
trainer = Trainer(
max_epochs=2,
limit_val_batches=0,
limit_train_batches=16,
logger=False,
enable_checkpointing=False,
accelerator='gpu',
devices=1,
)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(temp_path),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
# add the callback after the exp manager has made modifications.
trainer.callbacks.append(CheckStateCallback())
trainer.fit(model, ckpt_path=resume_path)
# ensure we can resume from the EMA weights
ema_path = os.path.join(temp_path, 'checkpoints/epoch=0-step=8-EMA.ckpt')
trainer = Trainer(
max_epochs=1,
limit_val_batches=0,
limit_train_batches=1,
logger=False,
enable_checkpointing=False,
accelerator='gpu',
devices=1,
)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(temp_path),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
trainer.fit(model, ckpt_path=ema_path)
# ensure that we warn when the EMA weights do not exist
os.remove(ema_path)
trainer = Trainer(
max_epochs=1,
limit_val_batches=0,
limit_train_batches=1,
logger=False,
enable_checkpointing=False,
accelerator='gpu',
devices=1,
)
exp_manager(
trainer,
{
"ema": {"enable": True, "validate_original_weights": True},
"explicit_log_dir": str(temp_path),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
with pytest.raises(
MisconfigurationException, match="Unable to find the associated EMA weights when re-loading"
):
trainer.fit(model, ckpt_path=resume_path)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_exp_manager_ema_weights(self, tmpdir):
"""Test to ensure that the exp manager adds the EMA callback, and we save an additional EMA checkpoint."""
tmp_path = tmpdir / "exp_manager_test"
model = ExampleModel()
trainer = Trainer(max_epochs=1, enable_checkpointing=False, logger=False, accelerator='gpu', devices=1)
exp_manager(
trainer,
{
"ema": {"enable": True, "validate_original_weights": True},
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
assert any(isinstance(callback, EMA) for callback in trainer.callbacks)
trainer.fit(model)
ema_weights = extract_ema_weights(model, trainer)
assert os.path.exists(tmp_path / "checkpoints/epoch=0-step=8.ckpt")
ema_path = tmp_path / "checkpoints/epoch=0-step=8-EMA.ckpt"
assert os.path.exists(ema_path)
duplicate_model = ExampleModel.load_from_checkpoint(str(ema_path))
for saved_weight, ema_weight in zip(duplicate_model.state_dict().values(), ema_weights):
assert torch.allclose(saved_weight.cpu(), ema_weight.cpu())
@pytest.mark.unit
def test_exp_manager_ema_weights_topk(self, tmpdir):
"""Test to ensure that EMA correctly ensures we only keep topk checkpoints."""
tmp_path = tmpdir / "exp_manager_test"
model = ExampleModel()
save_top_k = 3
trainer = Trainer(max_epochs=10, enable_checkpointing=False, logger=False, devices=1)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"save_top_k": save_top_k},
},
)
trainer.fit(model)
# we save 3 checkpoints for the model, 3 accompanied EMA weights, the last checkpoint and nemo model.
assert len(os.listdir(tmp_path / "checkpoints/")) == (save_top_k + 1) * 2 + 1
@pytest.mark.unit
def test_exp_manager_ema_weights_topk_resume(self, tmpdir):
"""Test to ensure that we always keep top_k checkpoints, even after resuming."""
tmp_path = tmpdir / "exp_manager_test"
model = ExampleModel()
save_top_k = 3
trainer = Trainer(max_epochs=10, enable_checkpointing=False, logger=False, devices=1)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"save_top_k": save_top_k},
},
)
trainer.fit(model)
# we save 3 checkpoints for the model, 3 accompanied EMA weights, the last checkpoint and nemo model.
assert len(os.listdir(tmp_path / "checkpoints/")) == (save_top_k + 1) * 2 + 1
# reduce the top_k number when resuming, we should see only 2 top_k checkpoints now (one is deleted).
save_top_k = 2
trainer = Trainer(max_epochs=10, enable_checkpointing=False, logger=False, devices=1)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(tmp_path),
"resume_if_exists": True,
"checkpoint_callback_params": {"save_top_k": save_top_k},
},
)
trainer.fit(model)
# we save 2 checkpoints for the model, 2 accompanied EMA weights, the last checkpoint and nemo model.
assert len(os.listdir(tmp_path / "checkpoints/")) == (save_top_k + 1) * 2 + 1
class TestEMATrain:
@pytest.mark.unit
@pytest.mark.parametrize(
"precision",
[
32,
16,
pytest.param(
"bf16",
marks=pytest.mark.skipif(
not DEVICE_CAPABILITY or DEVICE_CAPABILITY[0] < 8,
reason='bfloat16 is not supported on this device',
),
),
],
)
@pytest.mark.parametrize("accumulate_grad_batches", [1, 2])
@pytest.mark.parametrize("validate_original_weights", [True, False])
@pytest.mark.run_only_on('GPU')
def test_ema_run_cuda(
self, test_data_dir, precision, accumulate_grad_batches, validate_original_weights, tmpdir,
):
self.run_training_test(
accumulate_grad_batches=accumulate_grad_batches,
validate_original_weights=validate_original_weights,
accelerator='gpu',
precision=precision,
tmpdir=tmpdir,
)
@pytest.mark.unit
@pytest.mark.parametrize("accumulate_grad_batches", [1, 2])
@pytest.mark.parametrize("validate_original_weights", [True, False])
def test_ema_run_cpu(self, test_data_dir, accumulate_grad_batches, validate_original_weights, tmpdir):
self.run_training_test(
accumulate_grad_batches=accumulate_grad_batches,
validate_original_weights=validate_original_weights,
accelerator='cpu',
precision=32,
tmpdir=tmpdir,
)
def run_training_test(self, accumulate_grad_batches, validate_original_weights, accelerator, precision, tmpdir):
pl.seed_everything(123)
model = ExampleModel()
trainer = Trainer(
max_epochs=1,
precision=precision,
limit_train_batches=10,
limit_val_batches=10,
logger=False,
accumulate_grad_batches=accumulate_grad_batches,
num_sanity_val_steps=0,
enable_model_summary=False,
enable_checkpointing=False,
accelerator=accelerator,
devices=1,
)
exp_manager(
trainer,
{
"ema": {"enable": True, "validate_original_weights": validate_original_weights, "decay": 0.999},
"explicit_log_dir": str(tmpdir),
"checkpoint_callback_params": {"filename": f"{{epoch}}-{{step}}"},
},
)
# add the check callback after the exp manager has made modifications.
trainer.callbacks.append(EMAAssertCallback())
trainer.callbacks.insert(0, EMAValidationAssertCallback())
trainer.fit(model=model, val_dataloaders=model.train_dataloader())
@pytest.mark.unit
def test_ema_run_with_save_best_model(self, tmpdir):
"""Test to ensure that we save the model correctly when save best model is set to True."""
tmp_path = tmpdir / "exp_manager_test"
model = ExampleModel()
trainer = Trainer(max_epochs=1, enable_checkpointing=False, logger=False, devices=1, limit_train_batches=1)
exp_manager(
trainer,
{
"ema": {"enable": True},
"explicit_log_dir": str(tmp_path),
"checkpoint_callback_params": {"save_best_model": True},
},
)
trainer.fit(model)
class EMAAssertCallback(Callback):
def on_train_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
model_weights = extract_weights(pl_module)
self.ema_weights = extract_ema_weights(pl_module, trainer)
for x, y in zip(model_weights, self.ema_weights):
assert torch.allclose(x, y)
def on_train_batch_end(
self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: STEP_OUTPUT, batch: Any, batch_idx: int
) -> None:
if (batch_idx + 1) % trainer.accumulate_grad_batches != 0:
# skip assertion as ema weights are not updated.
return
ema_callback = [x for x in trainer.callbacks if isinstance(x, EMA)][0]
decay = ema_callback.decay
expected_ema_weights = []
new_weights = extract_weights(pl_module)
for ema_weight, new_weight in zip(self.ema_weights, new_weights):
expected_ema_weight = ema_weight * decay
expected_ema_weight += new_weight * (1 - decay)
expected_ema_weights.append(expected_ema_weight)
ema_weights = extract_ema_weights(pl_module, trainer)
for actual_ema_weight, expected_ema_weight in zip(ema_weights, expected_ema_weights):
assert torch.allclose(actual_ema_weight, expected_ema_weight)
self.ema_weights = expected_ema_weights
class EMAValidationAssertCallback(Callback):
def on_validation_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
ema_callback = [x for x in trainer.callbacks if isinstance(x, EMA)][0]
self._original_weights = extract_weights(pl_module)
self._ema_weights = extract_ema_weights(pl_module, trainer)
# call original EMA function
super().on_validation_start(trainer, pl_module)
if not ema_callback.validate_original_weights:
if ema_callback._ema_initialized:
# check model weights are now EMA weights
for ema_weights, module_weights in zip(self._ema_weights, extract_weights(pl_module)):
torch.allclose(ema_weights, module_weights)
def on_validation_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
ema_callback = [x for x in trainer.callbacks if isinstance(x, EMA)][0]
if not ema_callback.validate_original_weights:
model_weights = extract_weights(pl_module)
if ema_callback._ema_initialized:
for orig_weights, module_weights in zip(self._original_weights, model_weights):
torch.allclose(orig_weights.cpu(), module_weights.cpu())
| NeMo-main | tests/collections/common/test_ema.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.common.metrics.classification_accuracy import TopKClassificationAccuracy
from .loss_inputs import ALL_NUM_MEASUREMENTS_ARE_ZERO, NO_ZERO_NUM_MEASUREMENTS, SOME_NUM_MEASUREMENTS_ARE_ZERO
from .perplexity_inputs import NO_PROBS_NO_LOGITS, ONLY_LOGITS1, ONLY_LOGITS100, ONLY_PROBS, PROBS_AND_LOGITS
from .pl_utils import LossTester, PerplexityTester
class TestCommonMetrics:
top_k_logits = torch.tensor([[0.1, 0.3, 0.2, 0.0], [0.9, 0.6, 0.2, 0.3], [0.2, 0.1, 0.4, 0.3]],) # 1 # 0 # 2
@pytest.mark.unit
def test_top_1_accuracy(self):
labels = torch.tensor([0, 0, 2], dtype=torch.long)
accuracy = TopKClassificationAccuracy(top_k=None)
acc = accuracy(logits=self.top_k_logits, labels=labels)
assert accuracy.correct_counts_k.shape == torch.Size([1])
assert accuracy.total_counts_k.shape == torch.Size([1])
assert abs(acc[0] - 0.667) < 1e-3
@pytest.mark.unit
def test_top_1_2_accuracy(self):
labels = torch.tensor([0, 1, 0], dtype=torch.long)
accuracy = TopKClassificationAccuracy(top_k=[1, 2])
top1_acc, top2_acc = accuracy(logits=self.top_k_logits, labels=labels)
assert accuracy.correct_counts_k.shape == torch.Size([2])
assert accuracy.total_counts_k.shape == torch.Size([2])
assert abs(top1_acc - 0.0) < 1e-3
assert abs(top2_acc - 0.333) < 1e-3
@pytest.mark.unit
def test_top_1_accuracy_distributed(self):
# Simulate test on 2 process DDP execution
labels = torch.tensor([[0, 0, 2], [2, 0, 0]], dtype=torch.long)
accuracy = TopKClassificationAccuracy(top_k=None)
proc1_acc = accuracy(logits=self.top_k_logits, labels=labels[0])
correct1, total1 = accuracy.correct_counts_k, accuracy.total_counts_k
accuracy.reset()
proc2_acc = accuracy(logits=torch.flip(self.top_k_logits, dims=[1]), labels=labels[1]) # reverse logits
correct2, total2 = accuracy.correct_counts_k, accuracy.total_counts_k
correct = torch.stack([correct1, correct2])
total = torch.stack([total1, total2])
assert correct.shape == torch.Size([2, 1])
assert total.shape == torch.Size([2, 1])
assert abs(proc1_acc[0] - 0.667) < 1e-3 # 2/3
assert abs(proc2_acc[0] - 0.333) < 1e-3 # 1/3
accuracy.reset()
accuracy.correct_counts_k = torch.tensor([correct.sum()])
accuracy.total_counts_k = torch.tensor([total.sum()])
acc_topk = accuracy.compute()
acc_top1 = acc_topk[0]
assert abs(acc_top1 - 0.5) < 1e-3 # 3/6
@pytest.mark.unit
def test_top_1_accuracy_distributed_uneven_batch(self):
# Simulate test on 2 process DDP execution
accuracy = TopKClassificationAccuracy(top_k=None)
proc1_acc = accuracy(logits=self.top_k_logits, labels=torch.tensor([0, 0, 2]))
correct1, total1 = accuracy.correct_counts_k, accuracy.total_counts_k
proc2_acc = accuracy(
logits=torch.flip(self.top_k_logits, dims=[1])[:2, :], # reverse logits, select first 2 samples
labels=torch.tensor([2, 0]),
) # reduce number of labels
correct2, total2 = accuracy.correct_counts_k, accuracy.total_counts_k
correct = torch.stack([correct1, correct2])
total = torch.stack([total1, total2])
assert correct.shape == torch.Size([2, 1])
assert total.shape == torch.Size([2, 1])
assert abs(proc1_acc[0] - 0.667) < 1e-3 # 2/3
assert abs(proc2_acc[0] - 0.500) < 1e-3 # 1/2
accuracy.correct_counts_k = torch.tensor([correct.sum()])
accuracy.total_counts_k = torch.tensor([total.sum()])
acc_topk = accuracy.compute()
acc_top1 = acc_topk[0]
assert abs(acc_top1 - 0.6) < 1e-3 # 3/5
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
@pytest.mark.parametrize(
"probs, logits",
[
(ONLY_PROBS.probs, ONLY_PROBS.logits),
(ONLY_LOGITS1.probs, ONLY_LOGITS1.logits),
(ONLY_LOGITS100.probs, ONLY_LOGITS100.logits),
(PROBS_AND_LOGITS.probs, PROBS_AND_LOGITS.logits),
(NO_PROBS_NO_LOGITS.probs, NO_PROBS_NO_LOGITS.logits),
],
)
class TestPerplexity(PerplexityTester):
def test_perplexity(self, ddp, dist_sync_on_step, probs, logits):
self.run_class_perplexity_test(
ddp=ddp, probs=probs, logits=logits, dist_sync_on_step=dist_sync_on_step,
)
@pytest.mark.parametrize("ddp", [True, False])
@pytest.mark.parametrize("dist_sync_on_step", [True, False])
@pytest.mark.parametrize("take_avg_loss", [True, False])
@pytest.mark.parametrize(
"loss_sum_or_avg, num_measurements",
[
(NO_ZERO_NUM_MEASUREMENTS.loss_sum_or_avg, NO_ZERO_NUM_MEASUREMENTS.num_measurements),
(SOME_NUM_MEASUREMENTS_ARE_ZERO.loss_sum_or_avg, SOME_NUM_MEASUREMENTS_ARE_ZERO.num_measurements),
(ALL_NUM_MEASUREMENTS_ARE_ZERO.loss_sum_or_avg, ALL_NUM_MEASUREMENTS_ARE_ZERO.num_measurements),
],
)
class TestLoss(LossTester):
def test_loss(self, ddp, dist_sync_on_step, loss_sum_or_avg, num_measurements, take_avg_loss):
self.run_class_loss_test(
ddp=ddp,
loss_sum_or_avg=loss_sum_or_avg,
num_measurements=num_measurements,
dist_sync_on_step=dist_sync_on_step,
take_avg_loss=take_avg_loss,
)
| NeMo-main | tests/collections/common/test_metrics.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.collections.common.tokenizers.youtokentome_tokenizer import YouTokenToMeTokenizer
MODEL_SPECIAL_TOKENS = {
'unk_token': '[UNK]',
'sep_token': '[SEP]',
'pad_token': '[PAD]',
'bos_token': '[CLS]',
'mask_token': '[MASK]',
'eos_token': '[SEP]',
'cls_token': '[CLS]',
}
class TestSentencePieceTokenizerLegacy:
model_name = "/m_common.model"
@pytest.mark.unit
def test_add_special_tokens(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
assert tokenizer.vocab_size == tokenizer.original_vocab_size + len(set(special_tokens.values()))
@pytest.mark.unit
def test_text_to_tokens(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
tokens = tokenizer.text_to_tokens(text)
assert len(tokens) == len(text.split())
assert tokens.count("[CLS]") == 1
assert tokens.count("[MASK]") == 1
assert tokens.count("[SEP]") == 2
@pytest.mark.unit
def test_tokens_to_text(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
tokens = tokenizer.text_to_tokens(text)
result = tokenizer.tokens_to_text(tokens)
assert text == result
@pytest.mark.unit
def test_text_to_ids(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
ids = tokenizer.text_to_ids(text)
assert len(ids) == len(text.split())
assert ids.count(tokenizer.token_to_id("[CLS]")) == 1
assert ids.count(tokenizer.token_to_id("[MASK]")) == 1
assert ids.count(tokenizer.token_to_id("[SEP]")) == 2
@pytest.mark.unit
def test_ids_to_text(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
ids = tokenizer.text_to_ids(text)
result = tokenizer.ids_to_text(ids)
assert text == result
@pytest.mark.unit
def test_tokens_to_ids(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
tokens = tokenizer.text_to_tokens(text)
ids = tokenizer.tokens_to_ids(tokens)
assert len(ids) == len(tokens)
assert ids.count(tokenizer.token_to_id("[CLS]")) == 1
assert ids.count(tokenizer.token_to_id("[MASK]")) == 1
assert ids.count(tokenizer.token_to_id("[SEP]")) == 2
@pytest.mark.unit
def test_ids_to_tokens(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name, legacy=True)
special_tokens = MODEL_SPECIAL_TOKENS
tokenizer.add_special_tokens(special_tokens)
text = "[CLS] a b c [MASK] e f [SEP] g h i [SEP]"
tokens = tokenizer.text_to_tokens(text)
ids = tokenizer.tokens_to_ids(tokens)
result = tokenizer.ids_to_tokens(ids)
assert len(result) == len(tokens)
for i in range(len(result)):
assert result[i] == tokens[i]
class TestSentencePieceTokenizer:
model_name = "/m_new.model"
@pytest.mark.unit
def test_text_to_tokens(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
# <cls> is user_defined_symbol in the test tokenizer model
# <unk>, <sep>, <s>, and </s> are control symbols
text = "<cls> a b c <sep> e f g h i </s>"
tokens = tokenizer.text_to_tokens(text)
assert tokens.count("<cls>") == 1
assert tokens.count("<sep>") == 0
assert tokens.count("</s>") == 0
@pytest.mark.unit
def test_tokens_to_text(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
# <cls> is user_defined_symbol in the test tokenizer model
text = "<cls> a b c e f g h i"
tokens = tokenizer.text_to_tokens(text)
result = tokenizer.tokens_to_text(tokens)
assert text == result
@pytest.mark.unit
def test_text_to_ids(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
# <cls> is user_defined_symbol in the test tokenizer model
# <unk>, <sep>, <s>, and </s> are control symbols
text = "<cls> a b c <sep> e f g h i </s>"
tokens = tokenizer.text_to_ids(text)
assert tokens.count(tokenizer.token_to_id("<cls>")) == 1
assert tokens.count(tokenizer.token_to_id("<sep>")) == 0
assert tokens.count(tokenizer.token_to_id("</s>")) == 0
@pytest.mark.unit
def test_ids_to_text(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
text = "<cls> a b c <sep> e f g h i </s>"
ids = tokenizer.text_to_ids(text)
result = tokenizer.ids_to_text(ids)
assert text == result
@pytest.mark.unit
def test_tokens_to_ids(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
tokens = ["<cls>", "a", "b", "c", "<sep>", "e", "f", "<sep>", "g", "h", "i", "</s>"]
ids = tokenizer.tokens_to_ids(tokens)
assert len(ids) == len(tokens)
assert ids.count(tokenizer.token_to_id("<cls>")) == 1
assert ids.count(tokenizer.token_to_id("</s>")) == 1
assert ids.count(tokenizer.token_to_id("<sep>")) == 2
@pytest.mark.unit
def test_ids_to_tokens(self, test_data_dir):
tokenizer = SentencePieceTokenizer(test_data_dir + self.model_name)
tokens = ["<cls>", "a", "b", "c", "<sep>", "e", "f", "<sep>", "g", "h", "i", "</s>"]
ids = tokenizer.tokens_to_ids(tokens)
result = tokenizer.ids_to_tokens(ids)
assert len(result) == len(tokens)
for i in range(len(result)):
assert result[i] == tokens[i]
class TestYouTokenToMeTokenizer:
model_name = "/yttm.4096.en-de.model"
@pytest.mark.unit
def test_text_to_tokens(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
text = "<BOS> a b c e <UNK> f g h i <EOS>"
tokens = tokenizer.text_to_tokens(text)
assert tokens.count("<BOS>") == 0
assert tokens.count("<UNK>") == 0
assert tokens.count("<EOS>") == 0
@pytest.mark.unit
def test_tokens_to_text(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
text = "a b c e f g h i"
tokens = tokenizer.text_to_tokens(text)
result = tokenizer.tokens_to_text(tokens)
assert text == result
@pytest.mark.unit
def test_text_to_ids(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
text = "<BOS> a b c <UNK> e f g h i <EOS>"
tokens = tokenizer.text_to_ids(text)
assert tokens.count(tokenizer.bos_id) == 0
assert tokens.count(tokenizer.unk_id) == 0
assert tokens.count(tokenizer.eos_id) == 0
@pytest.mark.unit
def test_ids_to_text(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
text = "a b c e f g h i"
ids = tokenizer.text_to_ids(text)
result = tokenizer.ids_to_text(ids)
assert text == result
@pytest.mark.unit
def test_tokens_to_ids(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
tokens = ["<BOS>", "a", "b", "c", "<UNK>", "e", "f", "<UNK>", "g", "h", "i", "<EOS>"]
ids = tokenizer.tokens_to_ids(tokens)
assert len(ids) == len(tokens)
assert ids.count(tokenizer.bos_id) == 1
assert ids.count(tokenizer.eos_id) == 1
assert ids.count(tokenizer.unk_id) == 2
@pytest.mark.unit
def test_ids_to_tokens(self, test_data_dir):
tokenizer = YouTokenToMeTokenizer(test_data_dir + self.model_name)
tokens = ["<BOS>", "a", "b", "c", "<UNK>", "e", "f", "<UNK>", "g", "h", "i", "<EOS>"]
ids = tokenizer.tokens_to_ids(tokens)
result = tokenizer.ids_to_tokens(ids)
assert len(result) == len(tokens)
for i in range(len(result)):
assert result[i] == tokens[i]
| NeMo-main | tests/collections/common/test_spc_tokenizer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018-2020 William Falcon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import sys
from functools import partial
from typing import Callable, Optional
import numpy as np
import pytest
import torch
from scipy.stats import entropy
from torch.distributions.utils import logits_to_probs
from torch.multiprocessing import Pool, set_start_method
from torchmetrics import Metric
from nemo.collections.common.metrics import GlobalAverageLossMetric, Perplexity
NUM_PROCESSES = 2
NUM_BATCHES = 10
BATCH_SIZE = 16
NUM_CLASSES = 5
EXTRA_DIM = 3
THRESHOLD = 0.5
def setup_ddp(rank, world_size):
""" Setup ddp enviroment """
os.environ["MASTER_ADDR"] = 'localhost'
os.environ['MASTER_PORT'] = '8088'
if torch.distributed.is_available() and sys.platform not in ['win32', 'cygwin']:
torch.distributed.init_process_group("gloo", rank=rank, world_size=world_size)
def _class_test(
rank: int,
worldsize: int,
preds: torch.Tensor,
target: torch.Tensor,
metric_class: Metric,
sk_metric: Callable,
dist_sync_on_step: bool,
metric_args: dict = {},
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
):
""" Utility function doing the actual comparison between lightning class metric
and reference metric.
Args:
rank: rank of current process
worldsize: number of processes
preds: torch tensor with predictions
target: torch tensor with targets
metric_class: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
"""
# Instanciate lightning metric
metric = metric_class(dist_sync_on_step=dist_sync_on_step, **metric_args)
# verify metrics work after being loaded from pickled state
pickled_metric = pickle.dumps(metric)
metric = pickle.loads(pickled_metric)
for i in range(rank, NUM_BATCHES, worldsize):
batch_result = metric(preds[i], target[i])
if metric.dist_sync_on_step:
if rank == 0:
ddp_preds = torch.stack([preds[i + r] for r in range(worldsize)])
ddp_target = torch.stack([target[i + r] for r in range(worldsize)])
sk_batch_result = sk_metric(ddp_preds, ddp_target)
# assert for dist_sync_on_step
if check_dist_sync_on_step:
assert np.allclose(batch_result.numpy(), sk_batch_result, atol=atol)
else:
sk_batch_result = sk_metric(preds[i], target[i])
# assert for batch
if check_batch:
assert np.allclose(batch_result.numpy(), sk_batch_result, atol=atol)
# check on all batches on all ranks
result = metric.compute()
assert isinstance(result, torch.Tensor)
total_preds = torch.stack([preds[i] for i in range(NUM_BATCHES)])
total_target = torch.stack([target[i] for i in range(NUM_BATCHES)])
sk_result = sk_metric(total_preds, total_target)
# assert after aggregation
assert np.allclose(result.numpy(), sk_result, atol=atol)
def _functional_test(
preds: torch.Tensor,
target: torch.Tensor,
metric_functional: Callable,
sk_metric: Callable,
metric_args: dict = {},
atol: float = 1e-8,
):
""" Utility function doing the actual comparison between lightning functional metric
and reference metric.
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_functional: lightning metric functional that should be tested
sk_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
"""
metric = partial(metric_functional, **metric_args)
for i in range(NUM_BATCHES):
lightning_result = metric(preds[i], target[i])
sk_result = sk_metric(preds[i], target[i])
# assert its the same
assert np.allclose(lightning_result.numpy(), sk_result, atol=atol)
class MetricTester:
""" Class used for efficiently run alot of parametrized tests in ddp mode.
Makes sure that ddp is only setup once and that pool of processes are
used for all tests.
All tests should subclass from this and implement a new method called
`test_metric_name`
where the method `self.run_metric_test` is called inside.
"""
atol = 1e-8
def setup_class(self):
""" Setup the metric class. This will spawn the pool of workers that are
used for metric testing and setup_ddp
"""
try:
set_start_method('spawn')
except RuntimeError:
pass
self.poolSize = NUM_PROCESSES
self.pool = Pool(processes=self.poolSize)
self.pool.starmap(setup_ddp, [(rank, self.poolSize) for rank in range(self.poolSize)])
def teardown_class(self):
""" Close pool of workers """
self.pool.close()
self.pool.join()
def run_functional_metric_test(
self,
preds: torch.Tensor,
target: torch.Tensor,
metric_functional: Callable,
sk_metric: Callable,
metric_args: dict = {},
):
""" Main method that should be used for testing functions. Call this inside
testing method
Args:
preds: torch tensor with predictions
target: torch tensor with targets
metric_functional: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
metric_args: dict with additional arguments used for class initialization
"""
_functional_test(
preds=preds,
target=target,
metric_functional=metric_functional,
sk_metric=sk_metric,
metric_args=metric_args,
atol=self.atol,
)
def run_class_metric_test(
self,
ddp: bool,
preds: torch.Tensor,
target: torch.Tensor,
metric_class: Metric,
sk_metric: Callable,
dist_sync_on_step: bool,
metric_args: dict = {},
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
):
""" Main method that should be used for testing class. Call this inside testing
methods.
Args:
ddp: bool, if running in ddp mode or not
preds: torch tensor with predictions
target: torch tensor with targets
metric_class: lightning metric class that should be tested
sk_metric: callable function that is used for comparison
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
"""
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
self.pool.starmap(
partial(
_class_test,
preds=preds,
target=target,
metric_class=metric_class,
sk_metric=sk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
),
[(rank, self.poolSize) for rank in range(self.poolSize)],
)
else:
_class_test(
0,
1,
preds=preds,
target=target,
metric_class=metric_class,
sk_metric=sk_metric,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
)
def reference_perplexity_func(probs):
ent = entropy(probs, axis=-1)
ppl = np.exp(ent)
return ppl.mean()
def _perplexity_class_test(
rank: int,
worldsize: int,
probs: Optional[torch.Tensor],
logits: Optional[torch.Tensor],
dist_sync_on_step: bool,
metric_args: dict = {},
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
):
""" Utility function doing the actual comparison between lightning class metric
and reference metric.
Args:
rank: rank of current process
worldsize: number of processes
probs: torch tensor with probabilities
logits: torch tensor with logits. The function checks ``probs`` and ``logits are mutually exclusive for
``Perplexity`` metric.
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
"""
# Instanciate lightning metric
perplexity = Perplexity(dist_sync_on_step=dist_sync_on_step, **metric_args)
if (probs is None) == (logits is None):
with pytest.raises(ValueError):
perplexity(probs, logits)
return
# verify perplexity works after being loaded from pickled state
pickled_metric = pickle.dumps(perplexity)
perplexity = pickle.loads(pickled_metric)
for i in range(rank, NUM_BATCHES, worldsize):
batch_result = perplexity(None if probs is None else probs[i], None if logits is None else logits[i])
if perplexity.dist_sync_on_step:
if rank == 0:
if probs is not None:
ddp_probs = torch.stack([probs[i + r] for r in range(worldsize)])
else:
ddp_logits = torch.stack([logits[i + r] for r in range(worldsize)])
ddp_probs = logits_to_probs(ddp_logits, is_binary=False)
sk_batch_result = reference_perplexity_func(ddp_probs)
# assert for dist_sync_on_step
if check_dist_sync_on_step:
assert np.allclose(batch_result.numpy(), sk_batch_result, atol=atol)
else:
if probs is None:
p = logits_to_probs(logits[i], is_binary=False)
else:
p = probs[i]
sk_batch_result = reference_perplexity_func(p)
# assert for batch
if check_batch:
assert np.allclose(batch_result.numpy(), sk_batch_result, atol=atol)
assert (probs is None) != (logits is None)
# check on all batches on all ranks
result = perplexity.compute()
assert isinstance(result, torch.Tensor)
if probs is None:
probs = logits_to_probs(logits, is_binary=False)
sk_result = reference_perplexity_func(probs)
# assert after aggregation
assert np.allclose(result.numpy(), sk_result, atol=atol)
class PerplexityTester(MetricTester):
def run_class_perplexity_test(
self,
ddp: bool,
probs: Optional[torch.Tensor],
logits: Optional[torch.Tensor],
dist_sync_on_step: bool,
metric_args: dict = {},
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
):
""" Main method that should be used for testing class. Call this inside testing
methods.
Args:
ddp: bool, if running in ddp mode or not
probs: torch tensor with probabilities.
logits: torch tensor with logits. This test checks that probs and logits are mutually exclusive for
``Perplexity`` metric.
dist_sync_on_step: bool, if true will synchronize metric state across
processes at each ``forward()``
metric_args: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
"""
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
self.pool.starmap(
partial(
_perplexity_class_test,
probs=probs,
logits=logits,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
),
[(rank, self.poolSize) for rank in range(self.poolSize)],
)
else:
_perplexity_class_test(
0,
1,
probs=probs,
logits=logits,
dist_sync_on_step=dist_sync_on_step,
metric_args=metric_args,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
)
def reference_loss_func(loss_sum_or_avg: torch.Tensor, num_measurements: torch.Tensor, take_avg_loss: bool):
"""
Returns average loss for data from``loss_sum_or_avg``. This function sums all losses from ``loss_sum_or_avg`` and
divides the sum by the sum of ``num_measurements`` elements.
If ``take_avg_loss`` is ``True`` then ``loss_sum_or_avg[i]`` elements are mean values of ``num_measurements[i]``
losses. In that case before computing sum of losses each element of ``loss_sum_or_avg`` is multiplied by
corresponding element of ``num_measurements``.
If ``num_measurements`` sum is zero then the function returns NaN tensor.
The function is used for testing ``nemo.collections.common.metrics.GlobalAverageLossMetric`` class.
Args:
loss_sum_or_avg: a one dimensional float ``torch.Tensor``. Sums or mean values of loss.
num_measurements: a one dimensional integer ``torch.Tensor``. Number of values on which sums of means in
``loss_sum_or_avg`` are calculated.
take_avg_loss: if ``True`` then ``loss_sum_or_avg`` contains mean losses else ``loss_sum_or_avg`` contains
sums of losses.
"""
loss_sum_or_avg = loss_sum_or_avg.clone().detach()
if take_avg_loss:
loss_sum_or_avg *= num_measurements
nm_sum = num_measurements.sum()
if nm_sum.eq(0):
return torch.tensor(float('nan'))
return loss_sum_or_avg.sum() / nm_sum
def _loss_class_test(
rank: int,
worldsize: int,
loss_sum_or_avg: Optional[torch.Tensor],
num_measurements: Optional[torch.Tensor],
dist_sync_on_step: bool,
take_avg_loss: bool,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
atol: float = 1e-8,
):
""" Utility function doing the actual comparison between lightning class metric
and reference metric.
Args:
rank: rank of current process
worldsize: number of processes
loss_sum_or_avg: a one dimensional float torch tensor with loss sums or means.
num_measurements: a one dimensional integer torch tensor with number of values on which sums or means from
``loss_sum_or_avg`` were computed.
dist_sync_on_step: bool, if true will synchronize metric state across processes at each call of the
method :meth:`forward()`
take_avg_loss: dict with additional arguments used for class initialization
check_dist_sync_on_step: bool, if true will check if the metric is also correctly
calculated per batch per device (and not just at the end)
check_batch: bool, if true will check if the metric is also correctly
calculated across devices for each batch (and not just at the end)
"""
# Instantiate lightning metric
loss_metric = GlobalAverageLossMetric(dist_sync_on_step=dist_sync_on_step, take_avg_loss=take_avg_loss)
# verify loss works after being loaded from pickled state
pickled_metric = pickle.dumps(loss_metric)
loss_metric = pickle.loads(pickled_metric)
for i in range(rank, NUM_BATCHES, worldsize):
batch_result = loss_metric(loss_sum_or_avg[i], num_measurements[i])
if loss_metric.dist_sync_on_step:
if rank == 0:
ddp_loss_sum_or_avg = torch.stack([loss_sum_or_avg[i + r] for r in range(worldsize)])
ddp_num_measurements = torch.stack([num_measurements[i + r] for r in range(worldsize)])
sk_batch_result = reference_loss_func(ddp_loss_sum_or_avg, ddp_num_measurements, take_avg_loss)
# assert for dist_sync_on_step
if check_dist_sync_on_step:
if sk_batch_result.isnan():
assert batch_result.isnan()
else:
assert np.allclose(
batch_result.numpy(), sk_batch_result, atol=atol
), f"batch_result = {batch_result.numpy()}, sk_batch_result = {sk_batch_result}, i = {i}"
else:
ls = loss_sum_or_avg[i : i + 1]
nm = num_measurements[i : i + 1]
sk_batch_result = reference_loss_func(ls, nm, take_avg_loss)
# assert for batch
if check_batch:
if sk_batch_result.isnan():
assert batch_result.isnan()
else:
assert np.allclose(
batch_result.numpy(), sk_batch_result, atol=atol
), f"batch_result = {batch_result.numpy()}, sk_batch_result = {sk_batch_result}, i = {i}"
# check on all batches on all ranks
result = loss_metric.compute()
assert isinstance(result, torch.Tensor)
sk_result = reference_loss_func(loss_sum_or_avg, num_measurements, take_avg_loss)
# assert after aggregation
if sk_result.isnan():
assert result.isnan()
else:
assert np.allclose(result.numpy(), sk_result, atol=atol), f"result = {result.numpy()}, sk_result = {sk_result}"
class LossTester(MetricTester):
def run_class_loss_test(
self,
ddp: bool,
loss_sum_or_avg: torch.Tensor,
num_measurements: torch.Tensor,
dist_sync_on_step: bool,
take_avg_loss: bool,
check_dist_sync_on_step: bool = True,
check_batch: bool = True,
):
if ddp:
if sys.platform == "win32":
pytest.skip("DDP not supported on windows")
self.pool.starmap(
partial(
_loss_class_test,
loss_sum_or_avg=loss_sum_or_avg,
num_measurements=num_measurements,
dist_sync_on_step=dist_sync_on_step,
take_avg_loss=take_avg_loss,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
),
[(rank, self.poolSize) for rank in range(self.poolSize)],
)
else:
_loss_class_test(
0,
1,
loss_sum_or_avg=loss_sum_or_avg,
num_measurements=num_measurements,
dist_sync_on_step=dist_sync_on_step,
take_avg_loss=take_avg_loss,
check_dist_sync_on_step=check_dist_sync_on_step,
check_batch=check_batch,
atol=self.atol,
)
| NeMo-main | tests/collections/common/pl_utils.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | tests/collections/common/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2018-2020 William Falcon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
import torch
from .pl_utils import BATCH_SIZE, NUM_BATCHES, NUM_CLASSES
Input = namedtuple('Input', ["probs", "logits"])
ONLY_PROBS = Input(probs=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES), logits=None)
ONLY_LOGITS1 = Input(probs=None, logits=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES))
ONLY_LOGITS100 = Input(probs=None, logits=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES) * 200 - 100)
PROBS_AND_LOGITS = Input(
probs=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES),
logits=torch.rand(NUM_BATCHES, BATCH_SIZE, NUM_CLASSES) * 200 - 100,
)
NO_PROBS_NO_LOGITS = Input(probs=None, logits=None)
| NeMo-main | tests/collections/common/perplexity_inputs.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers import (
EnglishCharsTokenizer,
GermanCharsTokenizer,
IPATokenizer,
SpanishCharsTokenizer,
)
from nemo.collections.tts.g2p.models.i18n_ipa import IpaG2p
class TestTTSTokenizers:
PHONEME_DICT_DE = {
"HALLO": ["hˈaloː"],
"WELT": ["vˈɛlt"],
}
PHONEME_DICT_EN = {"HELLO": ["həˈɫoʊ"], "WORLD": ["ˈwɝɫd"], "CAFE": ["kəˈfeɪ"]}
PHONEME_DICT_ES = {
"BUENOS": ["bwˈenos"],
"DÍAS": ["dˈias"],
}
@staticmethod
def _parse_text(tokenizer, text):
tokens = tokenizer.encode(text)
chars = tokenizer.decode(tokens)
chars = chars.replace('|', '')
return chars, tokens
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_chars_tokenizer(self):
input_text = "Hello world!"
expected_output = "hello world!"
tokenizer = EnglishCharsTokenizer()
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
assert len(tokens) == len(input_text)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_chars_tokenizer_unknown_token(self):
input_text = "Hey 🙂 there"
expected_output = "hey there"
tokenizer = EnglishCharsTokenizer()
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
assert len(tokens) == len(expected_output)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_chars_tokenizer_accented_character(self):
input_text = "Let's drink at the café."
expected_output = "let's drink at the cafe."
tokenizer = EnglishCharsTokenizer()
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
assert len(tokens) == len(input_text)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_german_chars_tokenizer(self):
input_text = "Was ist dein Lieblingsgetränk?"
expected_output = "Was ist dein Lieblingsgetränk?"
tokenizer = GermanCharsTokenizer()
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
assert len(tokens) == len(input_text)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_spanish_chars_tokenizer(self):
input_text = "¿Cuál es su nombre?"
expected_output = "¿cuál es su nombre?"
tokenizer = SpanishCharsTokenizer()
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
assert len(tokens) == len(input_text)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer(self):
input_text = "Hello world!"
expected_output = " həˈɫoʊ ˈwɝɫd! "
g2p = IpaG2p(phoneme_dict=self.PHONEME_DICT_EN)
tokenizer = IPATokenizer(g2p=g2p, locale=None, pad_with_space=True)
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer_unsupported_locale(self):
g2p = IpaG2p(phoneme_dict=self.PHONEME_DICT_EN)
with pytest.raises(ValueError, match="Unsupported locale"):
IPATokenizer(g2p=g2p, locale="asdf")
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer_de_de(self):
input_text = "Hallo welt"
expected_output = "hˈaloː vˈɛlt"
g2p = IpaG2p(phoneme_dict=self.PHONEME_DICT_DE, locale="de-DE")
tokenizer = IPATokenizer(g2p=g2p, locale="de-DE")
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer_en_us(self):
input_text = "Hello café."
expected_output = "həˈɫoʊ kəˈfeɪ."
g2p = IpaG2p(phoneme_dict=self.PHONEME_DICT_EN)
tokenizer = IPATokenizer(g2p=g2p, locale="en-US")
tokenizer.tokens.extend("CAFE")
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer_es_es(self):
input_text = "¡Buenos días!"
expected_output = "¡bwˈenos dˈias!"
g2p = IpaG2p(phoneme_dict=self.PHONEME_DICT_ES, locale="es-ES")
tokenizer = IPATokenizer(g2p=g2p, locale="es-ES")
chars, tokens = self._parse_text(tokenizer, input_text)
assert chars == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_ipa_tokenizer_fixed_vocab(self):
phoneme_dict = self.PHONEME_DICT_EN
phoneme_dict["WOUND"] = ["ˈwaʊnd", "ˈwund"]
g2p = IpaG2p(phoneme_dict=phoneme_dict)
assert "WOUND" in g2p.phoneme_dict
# fmt: off
symbol_vocab = {
'H', 'E', 'L', 'L', 'O',
'W', 'O', 'R', 'L', 'D',
'C', 'A', 'F', 'E',
'W', 'O', 'U', 'N', 'D',
'h', 'ə', 'ˈ', 'ɫ', 'o', 'ʊ',
'ˈ', 'w', 'ɝ', 'ɫ', 'd',
'k', 'ə', 'ˈ', 'f', 'e', 'ɪ',
'ˈ', 'w', 'a', 'ʊ', 'n', 'd',
'ˈ', 'w', 'u', 'n', 'd',
}
# fmt: on
fixed_vocab = symbol_vocab - {'ʊ', 'F'}
tokenizer = IPATokenizer(g2p=g2p, locale="en-US", fixed_vocab=fixed_vocab)
# Make sure phoneme_dict has been updated properly
assert "HELLO" not in tokenizer.g2p.phoneme_dict
assert "WORLD" in tokenizer.g2p.phoneme_dict
assert "CAFE" not in tokenizer.g2p.phoneme_dict
assert len(tokenizer.g2p.phoneme_dict["WOUND"]) == 1
assert tokenizer.g2p.phoneme_dict["WOUND"][0] == list("ˈwund")
chars, tokens = self._parse_text(tokenizer, "Hello, wound")
expected_output = "HELLO, ˈwund"
assert chars == expected_output
| NeMo-main | tests/collections/common/tokenizers/text_to_speech/test_tts_tokenizers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo.collections.common.tokenizers.text_to_speech.tokenizer_utils import (
any_locale_word_tokenize,
english_word_tokenize,
)
class TestTokenizerUtils:
@staticmethod
def _create_expected_output(words):
return [([word], False) for word in words]
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_word_tokenize(self):
input_text = "apple banana pear"
expected_output = self._create_expected_output(["apple", " ", "banana", " ", "pear"])
output = english_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_word_tokenize_with_punctuation(self):
input_text = "Hello, world!"
expected_output = self._create_expected_output(["hello", ", ", "world", "!"])
output = english_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_word_tokenize_with_contractions(self):
input_text = "It's a c'ntr'ction."
expected_output = self._create_expected_output(["it's", " ", "a", " ", "c'ntr'ction", "."])
output = english_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_word_tokenize_with_compound_words(self):
input_text = "Forty-two is no run-off-the-mill number."
expected_output = self._create_expected_output(
["forty-two", " ", "is", " ", "no", " ", "run-off-the-mill", " ", "number", "."]
)
output = english_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_english_word_tokenize_with_escaped(self):
input_text = "Leave |this part UNCHANGED|."
expected_output = [(["leave"], False), ([" "], False), (["this", "part", "UNCHANGED"], True), (["."], False)]
output = english_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_any_locale_word_tokenize(self):
input_text = "apple banana pear"
expected_output = self._create_expected_output(["apple", " ", "banana", " ", "pear"])
output = any_locale_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_any_locale_word_tokenize_with_accents(self):
input_text = "The naïve piñata at the café..."
expected_output = self._create_expected_output(
["The", " ", "naïve", " ", "piñata", " ", "at", " ", "the", " ", "café", "..."]
)
output = any_locale_word_tokenize(input_text)
assert output == expected_output
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_any_locale_word_tokenize_with_numbers(self):
input_text = r"Three times× four^teen ÷divided by [movies] on \slash."
expected_output = self._create_expected_output(
[
"Three",
" ",
"times",
"× ",
"four",
"^",
"teen",
" ÷",
"divided",
" ",
"by",
" [",
"movies",
"] ",
"on",
" \\",
"slash",
".",
]
)
output = any_locale_word_tokenize(input_text)
assert output == expected_output
| NeMo-main | tests/collections/common/tokenizers/text_to_speech/test_tokenizer_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins import adapter_mixin_strategies
from nemo.utils import config_utils
class TestAdapterModules:
@pytest.mark.unit
def test_linear_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.LinearAdapter, adapter_modules.LinearAdapterConfig, ignore_args=IGNORED_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_linear_adapter_init(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum().abs() <= 1e-8
@pytest.mark.unit
def test_linear_adapter_dropout(self):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5, dropout=0.5)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum().abs() <= 1e-8
@pytest.mark.unit
@pytest.mark.parametrize('norm_position', ['pre', 'post'])
def test_linear_adapter_norm_position(self, norm_position):
torch.random.manual_seed(0)
x = torch.randn(2, 50)
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5, norm_position=norm_position)
with torch.no_grad():
assert adapter.module[-1].weight.sum() == 0
if hasattr(adapter.module[-1], 'bias') and adapter.module[-1].bias is not None:
assert adapter.module[-1].bias.sum() == 0
out = adapter(x)
assert out.sum().abs() <= 1e-8
@pytest.mark.unit
def test_linear_adapter_strategy(self):
adapter = adapter_modules.LinearAdapter(in_features=50, dim=5)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_mixin_strategies.ResidualAddAdapterStrategy)
| NeMo-main | tests/collections/common/mixins/test_adapter_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
from typing import Tuple
import pytest
import torch
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from nemo.collections.common.parts.adapter_modules import LinearAdapter
from nemo.core import ModelPT, NeuralModule
from nemo.core.classes.mixins import adapter_mixin_strategies, adapter_mixins
from nemo.core.classes.mixins.adapter_mixins import AdapterModelPTMixin, AdapterModuleMixin
from nemo.utils import logging, logging_mode
class MockLinearAdapter1(LinearAdapter):
pass
class MockLinearAdapter2(LinearAdapter):
pass
class CommonModule(NeuralModule):
""" Define a default neural module (without adapter support)"""
def __init__(self):
super().__init__()
self.fc = torch.nn.Linear(50, 50)
self.bn = torch.nn.BatchNorm1d(50)
def forward(self, x):
x = self.fc(x)
x = self.bn(x)
out = x
return out
def num_params(self):
num: int = 0
for p in self.parameters():
if p.requires_grad:
num += p.numel()
return num
class CommonModuleAdapter(CommonModule, AdapterModuleMixin):
""" Subclass the DefaultModule, adding adapter module support"""
def forward(self, x):
x = super().forward(x)
if self.is_adapter_available():
# For testing purposes, cache the adapter names
self._adapter_names = self.get_enabled_adapters()
# call forward over model adapters, summing them up
x = self.forward_enabled_adapters(x)
return x
def get_accepted_adapter_types(self,) -> 'Set[type]':
types = super().get_accepted_adapter_types()
if len(types) == 0:
self.set_accepted_adapter_types(['nemo.collections.common.parts.adapter_modules.LinearAdapter'])
types = self.get_accepted_adapter_types()
return types
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre'):
cfg = {
'_target_': 'nemo.collections.common.parts.adapter_modules.LinearAdapter',
'in_features': in_features,
'dim': dim,
'norm_position': norm_pos,
}
return cfg
def get_classpath(cls):
return f'{cls.__module__}.{cls.__name__}'
if adapter_mixins.get_registered_adapter(CommonModule) is None:
adapter_mixins.register_adapter(CommonModule, CommonModuleAdapter)
class TestCommonAdapterModuleMixin:
@pytest.mark.unit
def test_get_accepted_adapter_types(self):
model = CommonModuleAdapter()
original_num_params = model.num_weights
assert not hasattr(model, '_accepted_adapter_types')
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
# Adding adapter will implicitly try to get accepted adapters, initializing the set
assert hasattr(model, '_accepted_adapter_types')
types = model.get_accepted_adapter_types()
types = list(types)
assert len(types) == 1
assert types[0].__name__ == 'LinearAdapter'
@pytest.mark.unit
def test_set_accepted_adapter_types_reset_types(self):
model = CommonModuleAdapter()
original_num_params = model.num_weights
assert not hasattr(model, '_accepted_adapter_types')
# Implicitly sets some types
model.get_accepted_adapter_types()
# Adding adapter will implicitly try to get accepted adapters, initializing the set
assert hasattr(model, '_accepted_adapter_types')
types = model.get_accepted_adapter_types()
types = list(types)
assert len(types) == 1
assert types[0].__name__ == 'LinearAdapter'
# Reset type now
model.set_accepted_adapter_types([])
assert hasattr(model, '_accepted_adapter_types')
types = model._accepted_adapter_types
assert len(types) == 0
# Since types are empty, get_types will set the default types
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_set_accepted_adapter_types_invalid_class(self):
model = CommonModuleAdapter()
original_num_params = model.num_weights
assert not hasattr(model, '_accepted_adapter_types')
# Explicitly set the accepted types to be the subclasses
model.set_accepted_adapter_types(
[
get_classpath(MockLinearAdapter1), # Pass string class path
MockLinearAdapter2, # Pass actual class itself
]
)
# Should throw error because the base class is now no longer in accepted list
# and the get_types method does not fill in the default
with pytest.raises(ValueError):
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
| NeMo-main | tests/collections/common/mixins/test_adapter_common_model_mixin.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.asr.parts.submodules import jasper
class TestJasperBlock:
@staticmethod
def jasper_base_config(**kwargs):
base = dict(
inplanes=16,
planes=8,
kernel_size=[11],
repeat=1,
stride=[1],
dilation=[1],
activation="relu",
conv_mask=True,
separable=False,
se=False,
)
base.update(kwargs)
return base
def check_module_exists(self, module, cls):
global _MODULE_EXISTS
_MODULE_EXISTS = 0
def _traverse(m):
if isinstance(m, cls):
global _MODULE_EXISTS
_MODULE_EXISTS += 1
module.apply(_traverse)
assert _MODULE_EXISTS > 0
@pytest.mark.unit
def test_basic_block(self):
config = self.jasper_base_config(residual=False)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
@pytest.mark.unit
def test_residual_block(self):
config = self.jasper_base_config(residual=True)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
@pytest.mark.unit
def test_basic_block_repeat(self):
config = self.jasper_base_config(residual=False, repeat=3)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
assert len(block.mconv) == 3 * 3 + 1 # (3 repeats x {1 conv + 1 norm + 1 dropout} + final conv)
@pytest.mark.unit
def test_basic_block_repeat_stride(self):
config = self.jasper_base_config(residual=False, repeat=3, stride=[2])
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 17]) # 131 // (stride ^ repeats)
assert ylen[0] == 17 # 131 // (stride ^ repeats)
assert len(block.mconv) == 3 * 3 + 1 # (3 repeats x {1 conv + 1 norm + 1 dropout} + final conv)
@pytest.mark.unit
def test_basic_block_repeat_stride_last(self):
config = self.jasper_base_config(residual=False, repeat=3, stride=[2], stride_last=True)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 66]) # 131 // stride
assert ylen[0] == 66 # 131 // stride
assert len(block.mconv) == 3 * 3 + 1 # (3 repeats x {1 conv + 1 norm + 1 dropout} + final conv)
@pytest.mark.unit
def test_basic_block_repeat_separable(self):
config = self.jasper_base_config(residual=False, repeat=3, separable=True)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
assert len(block.mconv) == 3 * 4 + 1 # (3 repeats x {1 dconv + 1 pconv + 1 norm + 1 dropout} + final conv)
@pytest.mark.unit
def test_basic_block_stride(self):
config = self.jasper_base_config(stride=[2], residual=False)
act = jasper.jasper_activations.get(config.pop('activation'))()
print(config)
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 66])
assert ylen[0] == 66
@pytest.mark.unit
def test_residual_block_stride(self):
config = self.jasper_base_config(stride=[2], residual=True, residual_mode='stride_add')
act = jasper.jasper_activations.get(config.pop('activation'))()
print(config)
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 66])
assert ylen[0] == 66
@pytest.mark.unit
def test_residual_block_activations(self):
for activation in jasper.jasper_activations.keys():
config = self.jasper_base_config(activation=activation)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
self.check_module_exists(block, act.__class__)
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
@pytest.mark.unit
def test_residual_block_normalizations(self):
NORMALIZATIONS = ["batch", "layer", "group"]
for normalization in NORMALIZATIONS:
config = self.jasper_base_config(normalization=normalization)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
@pytest.mark.unit
def test_residual_block_se(self):
config = self.jasper_base_config(se=True, se_reduction_ratio=8)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
self.check_module_exists(block, jasper.SqueezeExcite)
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
@pytest.mark.unit
def test_residual_block_asymmetric_pad_future_contexts(self):
# test future contexts at various values
# 0 = no future context
# 2 = limited future context
# 5 = symmetric context
# 8 = excess future context (more future context than present or past context)
future_contexts = [0, 2, 5, 8]
for future_context in future_contexts:
print(future_context)
config = self.jasper_base_config(future_context=future_context)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
self.check_module_exists(block, torch.nn.ConstantPad1d)
self.check_module_exists(block, jasper.MaskedConv1d)
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
assert block.mconv[0].pad_layer is not None
assert block.mconv[0]._padding == (config['kernel_size'][0] - 1 - future_context, future_context)
@pytest.mark.unit
def test_residual_block_asymmetric_pad_future_context_fallback(self):
# test future contexts at various values
# 15 = K < FC; fall back to symmetric context
future_context = 15
print(future_context)
config = self.jasper_base_config(future_context=future_context)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
x = torch.randn(1, 16, 131)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
self.check_module_exists(block, jasper.MaskedConv1d)
assert isinstance(block, jasper.JasperBlock)
assert y[0].shape == torch.Size([1, config['planes'], 131])
assert ylen[0] == 131
assert block.mconv[0].pad_layer is None
assert block.mconv[0]._padding == config['kernel_size'][0] // 2
@pytest.mark.unit
def test_padding_size_conv1d(self):
input_channels = 1
output_channels = 1
kernel_sizes = [3, 7, 11]
dilation_sizes = [2, 3, 4]
stride = 1
inp = torch.rand(2, 1, 40)
for kernel_size in kernel_sizes:
for dilation_size in dilation_sizes:
padding = jasper.get_same_padding(kernel_size, stride, dilation_size)
conv = torch.nn.Conv1d(
input_channels, output_channels, kernel_size=kernel_size, dilation=dilation_size, padding=padding
)
out = conv(inp)
assert out.shape == inp.shape
class TestParallelBlock:
@staticmethod
def contrust_jasper_block(**config_kwargs):
config = TestJasperBlock.jasper_base_config(**config_kwargs)
act = jasper.jasper_activations.get(config.pop('activation'))()
block = jasper.JasperBlock(**config, activation=act)
return block
@pytest.mark.unit
def test_blocks_with_same_input_output_channels_sum_residual(self):
blocks = []
in_planes = 8
out_planes = 8
for _ in range(2):
blocks.append(self.contrust_jasper_block(inplanes=in_planes, planes=out_planes))
block = jasper.ParallelBlock(blocks, residual_mode='sum')
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert y[0].shape == torch.Size([1, out_planes, 140])
assert ylen[0] == 131
@pytest.mark.unit
def test_blocks_with_different_input_output_channels_sum_residual(self):
blocks = []
in_planes = 8
out_planes = 16
for _ in range(2):
blocks.append(self.contrust_jasper_block(inplanes=in_planes, planes=out_planes))
block = jasper.ParallelBlock(blocks, residual_mode='sum')
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
with pytest.raises(RuntimeError):
block(([x], xlen))
@pytest.mark.unit
def test_blocks_with_same_input_output_channels_conv_residual(self):
blocks = []
in_planes = 8
out_planes = 8
for _ in range(2):
blocks.append(self.contrust_jasper_block(inplanes=in_planes, planes=out_planes))
block = jasper.ParallelBlock(blocks, residual_mode='conv', in_filters=in_planes, out_filters=out_planes)
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert y[0].shape == torch.Size([1, out_planes, 140])
assert ylen[0] == 131
@pytest.mark.unit
def test_blocks_with_different_input_output_channels_conv_residual(self):
blocks = []
in_planes = 8
out_planes = 16
for _ in range(2):
blocks.append(self.contrust_jasper_block(inplanes=in_planes, planes=out_planes))
block = jasper.ParallelBlock(blocks, residual_mode='conv', in_filters=in_planes, out_filters=out_planes)
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert y[0].shape == torch.Size([1, out_planes, 140])
assert ylen[0] == 131
@pytest.mark.unit
def test_single_block(self):
in_planes = 8
out_planes = 16
blocks = [self.contrust_jasper_block(inplanes=in_planes, planes=out_planes)]
block = jasper.ParallelBlock(blocks)
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
y, ylen = block(([x], xlen))
assert y[0].shape == torch.Size([1, out_planes, 140])
assert ylen[0] == 131
@pytest.mark.unit
def test_tower_dropout(self):
blocks = []
in_planes = 8
out_planes = 8
for _ in range(2):
blocks.append(self.contrust_jasper_block(inplanes=in_planes, planes=out_planes))
block = jasper.ParallelBlock(blocks, aggregation_mode='dropout', block_dropout_prob=1.0)
x = torch.randn(1, in_planes, 140)
xlen = torch.tensor([131])
y, _ = block(([x], xlen))
# Tower dropout is 1.0, meaning that all towers have to be dropped, so only residual remains.
torch.testing.assert_close(y[0], x)
| NeMo-main | tests/collections/asr/test_jasper_block.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import numpy as np
import pytest
import torch
from nemo.collections.asr.modules.audio_preprocessing import AudioToSpectrogram, SpectrogramToAudio
try:
importlib.import_module('torchaudio')
HAVE_TORCHAUDIO = True
except ModuleNotFoundError:
HAVE_TORCHAUDIO = False
class TestAudioSpectrogram:
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [64, 512])
@pytest.mark.parametrize('num_channels', [1, 3])
def test_audio_to_spec(self, fft_length: int, num_channels: int):
"""Test output length for audio to spectrogram.
Create signals of arbitrary length and check output
length is matching the actual transform length.
"""
hop_lengths = [fft_length // 2, fft_length // 3, fft_length // 4]
batch_size = 4
num_examples = 20
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
# Generate time-domain examples with different length
input_length = _rng.integers(low=fft_length, high=100 * fft_length, size=batch_size) # in samples
x = _rng.normal(size=(batch_size, num_channels, np.max(input_length)))
x = torch.tensor(x)
for b in range(batch_size):
x[b, :, input_length[b] :] = 0
for hop_length in hop_lengths:
# Prepare transform
audio2spec = AudioToSpectrogram(fft_length=fft_length, hop_length=hop_length)
# Transform the whole batch
batch_spec, batch_spec_len = audio2spec(input=x, input_length=torch.tensor(input_length))
for b in range(batch_size):
# Transform just the current example
b_spec, b_spec_len = audio2spec(input=x[b : b + 1, :, : input_length[b]])
actual_len = b_spec.size(-1)
# Check lengths
assert (
actual_len == b_spec_len
), f'Output length not matching for example ({n}, {b}) with length {input_length[n]} (hop_length={hop_length}): true {actual_len} vs calculated {b_spec_len}.'
assert (
actual_len == batch_spec_len[b]
), f'Output length not matching for example ({n}, {b}) with length {input_length[n]} (hop_length={hop_length}): true {actual_len} vs calculated batch len {batch_spec_len[b]}.'
# Make sure transforming a batch is the same as transforming individual examples
assert torch.allclose(
batch_spec[b, ..., :actual_len], b_spec, atol=atol
), f'Spectrograms not matching for example ({n}, {b}) with length {input_length[b]} (hop_length={hop_length})'
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [64, 512])
@pytest.mark.parametrize('num_channels', [1, 3])
def test_spec_to_audio(self, fft_length: int, num_channels: int):
"""Test output length for spectrogram to audio.
Create signals of arbitrary length and check output
length is matching the actual transform length.
"""
hop_lengths = [fft_length // 2, fft_length // 3, fft_length // 4]
batch_size = 4
num_examples = 20
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
# Generate spectrogram examples with different lengths
input_length = _rng.integers(low=10, high=100, size=batch_size) # in frames
input_shape = (batch_size, num_channels, fft_length // 2 + 1, np.max(input_length))
spec = _rng.normal(size=input_shape) + 1j * _rng.normal(size=input_shape)
spec = torch.tensor(spec)
spec[..., 0, :] = spec[..., 0, :].real
spec[..., -1, :] = spec[..., -1, :].real
for b in range(batch_size):
spec[b, ..., input_length[b] :] = 0
for hop_length in hop_lengths:
# Prepare transform
spec2audio = SpectrogramToAudio(fft_length=fft_length, hop_length=hop_length)
# Transform the whole batch
batch_x, batch_x_len = spec2audio(input=spec, input_length=torch.tensor(input_length))
for b in range(batch_size):
# Transform just the current example
b_x, b_x_len = spec2audio(input=spec[b : b + 1, ..., : input_length[b]])
actual_len = b_x.size(-1)
# Check lengths
assert (
b_x_len == actual_len
), f'Output length not matching for example ({n}, {b}) with {input_length[b]} frames (hop_length={hop_length}): true {actual_len} vs calculated {b_x_len}.'
assert (
batch_x_len[b] == actual_len
), f'Output length not matching for example ({n}, {b}) with {input_length[b]} frames (hop_length={hop_length}): true {actual_len} vs calculated batch {batch_x_len[b]}.'
# Make sure transforming a batch is the same as transforming individual examples
if input_length[b] < spec.size(-1):
# Discard the last bit of the signal which differs due to number of frames in batch (with zero padded frames) vs individual (only valid frames).
# The reason for this difference is normalization with `window_sumsquare` of the inverse STFT. More specifically,
# batched and non-batched transform are using on a different number of frames.
tail_length = max(fft_length // 2 - hop_length, 0)
else:
tail_length = 0
valid_len = actual_len - tail_length
batch_x_valid = batch_x[b, :, :valid_len]
b_x_valid = b_x[..., :valid_len]
assert torch.allclose(
batch_x_valid, b_x_valid, atol=atol
), f'Signals not matching for example ({n}, {b}) with length {input_length[b]} (hop_length={hop_length}): max abs diff {torch.max(torch.abs(batch_x_valid-b_x_valid))} at {torch.argmax(torch.abs(batch_x_valid-b_x_valid))}'
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [128, 1024])
@pytest.mark.parametrize('num_channels', [1, 4])
def test_audio_to_spectrogram_reconstruction(self, fft_length: int, num_channels: int):
"""Test analysis and synthesis transform result in a perfect reconstruction.
"""
batch_size = 4
num_samples = fft_length * 50
num_examples = 25
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
hop_lengths = [fft_length // 2, fft_length // 4]
for hop_length in hop_lengths:
audio2spec = AudioToSpectrogram(fft_length=fft_length, hop_length=hop_length)
spec2audio = SpectrogramToAudio(fft_length=fft_length, hop_length=hop_length)
for n in range(num_examples):
x = _rng.normal(size=(batch_size, num_channels, num_samples))
x_spec, x_spec_length = audio2spec(input=torch.Tensor(x))
x_hat, x_hat_length = spec2audio(input=x_spec, input_length=x_spec_length)
assert np.allclose(
x_hat.cpu().detach().numpy(), x, atol=atol
), f'Reconstructed not matching for example {n} (hop length {hop_length})'
| NeMo-main | tests/collections/asr/test_audio_preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import permutations
import pytest
import torch
from nemo.collections.asr.metrics.der import (
calculate_session_cpWER,
calculate_session_cpWER_bruteforce,
get_online_DER_stats,
get_partial_ref_labels,
)
def word_count(spk_transcript):
return sum([len(w.split()) for w in spk_transcript])
def calculate_wer_count(_ins, _del, _sub, ref_word_count):
return (_ins + _del + _sub) / ref_word_count
def permuted_input_test(hyp, ref, calculated):
"""
Randomly permute the input to see if evaluation result stays the same.
"""
for hyp_permed in permutations(hyp):
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp_permed, spk_reference=ref)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
class TestConcatMinPermWordErrorRate:
"""
Tests for cpWER calculation.
"""
@pytest.mark.unit
def test_cpwer_oneword(self):
hyp = ["oneword"]
ref = ["oneword"]
_ins, _del, _sub = 0, 0, 0
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
ref_word_count = word_count(ref)
calculated = calculate_wer_count(_ins, _del, _sub, ref_word_count)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
permuted_input_test(hyp, ref, calculated)
cpWER_perm, hyp_min_perm, ref_str = calculate_session_cpWER_bruteforce(spk_hypothesis=hyp, spk_reference=ref)
diff = torch.abs(torch.tensor(cpWER_perm - cpWER))
assert diff <= 1e-6
# Test with a substitution
hyp = ["wrongword"]
_ins, _del, _sub = 0, 0, 1
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
calculated = calculate_wer_count(_ins, _del, _sub, ref_word_count)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
permuted_input_test(hyp, ref, calculated)
cpWER_perm, hyp_min_perm, ref_str = calculate_session_cpWER_bruteforce(spk_hypothesis=hyp, spk_reference=ref)
diff = torch.abs(torch.tensor(cpWER_perm - cpWER))
assert diff <= 1e-6
@pytest.mark.unit
def test_cpwer_perfect(self):
hyp = ["ff", "aa bb cc", "dd ee"]
ref = ["aa bb cc", "dd ee", "ff"]
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
calculated = 0
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
permuted_input_test(hyp, ref, calculated)
@pytest.mark.unit
def test_cpwer_spk_counfusion_and_asr_error(self):
hyp = ["aa bb c ff", "dd e ii jj kk", "hi"]
ref = ["aa bb cc ff", "dd ee gg jj kk", "hh ii"]
_ins, _del, _sub = 0, 1, 4
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
ref_word_count = word_count(ref)
calculated = calculate_wer_count(_ins, _del, _sub, ref_word_count)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
permuted_input_test(hyp, ref, calculated)
cpWER_perm, hyp_min_perm, ref_str = calculate_session_cpWER_bruteforce(spk_hypothesis=hyp, spk_reference=ref)
diff = torch.abs(torch.tensor(cpWER_perm - cpWER))
assert diff <= 1e-6
@pytest.mark.unit
def test_cpwer_undercount(self):
hyp = ["aa bb cc", "dd ee gg", "hh ii", "jj kk"]
ref = ["aa bb cc", "dd ee", "ff", "gg", "hh ii", "jj kk"]
_ins, _del, _sub = 0, 1, 0
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
ref_word_count = word_count(ref)
calculated = calculate_wer_count(_ins, _del, _sub, ref_word_count)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
cpWER_perm, hyp_min_perm, ref_str = calculate_session_cpWER_bruteforce(spk_hypothesis=hyp, spk_reference=ref)
diff = torch.abs(torch.tensor(cpWER_perm - cpWER))
assert diff <= 1e-6
@pytest.mark.unit
def test_cpwer_overcount(self):
hyp = ["aa bb cc", "dd ee gg hh", "ii jj kk"]
ref = ["aa bb cc", "dd ee ff gg hh ii jj kk"]
_ins, _del, _sub = 0, 1, 0
cpWER, hyp_min, ref_str = calculate_session_cpWER(spk_hypothesis=hyp, spk_reference=ref)
ref_word_count = word_count(ref)
calculated = calculate_wer_count(_ins, _del, _sub, ref_word_count)
diff = torch.abs(torch.tensor(calculated - cpWER))
assert diff <= 1e-6
cpWER_perm, hyp_min_perm, ref_str = calculate_session_cpWER_bruteforce(spk_hypothesis=hyp, spk_reference=ref)
diff = torch.abs(torch.tensor(cpWER_perm - cpWER))
assert diff <= 1e-6
@pytest.mark.parametrize(
"pred_labels, ref_labels, expected_output",
[
([], [], []),
(["0.0 1.0 speaker1"], [], []),
(["0.0 1.0 speaker1"], ["0.0 1.5 speaker1"], ["0.0 1.0 speaker1"]),
(["0.1 0.4 speaker1", "0.5 1.0 speaker2"], ["0.0 1.5 speaker1"], ["0.0 1.0 speaker1"]),
(
["0.5 1.0 speaker2", "0.1 0.4 speaker1"],
["0.0 1.5 speaker1"],
["0.0 1.0 speaker1"],
), # Order of prediction does not matter
(
["0.1 1.4 speaker1", "0.5 1.0 speaker2"],
["0.0 1.5 speaker1"],
["0.0 1.4 speaker1"],
), # Overlapping prediction
(
["0.1 0.6 speaker1", "0.2 1.5 speaker2"],
["0.5 1.0 speaker1", "1.01 2.0 speaker2"],
["0.5 1.0 speaker1", "1.01 1.5 speaker2"],
),
(
["0.0 2.0 speaker1"],
["0.0 2.0 speaker1", "1.0 3.0 speaker2", "0.0 5.0 speaker3"],
["0.0 2.0 speaker1", "1.0 2.0 speaker2", "0.0 2.0 speaker3"],
),
],
)
def test_get_partial_ref_labels(self, pred_labels, ref_labels, expected_output):
assert get_partial_ref_labels(pred_labels, ref_labels) == expected_output
@pytest.mark.parametrize(
"DER, CER, FA, MISS, diar_eval_count, der_stat_dict, deci, expected_der_dict, expected_der_stat_dict",
[
(
0.3,
0.1,
0.05,
0.15,
1,
{"cum_DER": 0, "cum_CER": 0, "avg_DER": 0, "avg_CER": 0, "max_DER": 0, "max_CER": 0},
3,
{"DER": 30.0, "CER": 10.0, "FA": 5.0, "MISS": 15.0},
{"cum_DER": 0.3, "cum_CER": 0.1, "avg_DER": 30.0, "avg_CER": 10.0, "max_DER": 30.0, "max_CER": 10.0},
),
(
0.1,
0.2,
0.03,
0.07,
2,
{"cum_DER": 0.3, "cum_CER": 0.3, "avg_DER": 15.0, "avg_CER": 15.0, "max_DER": 30.0, "max_CER": 10.0},
2,
{"DER": 10.0, "CER": 20.0, "FA": 3.0, "MISS": 7.0},
{"cum_DER": 0.4, "cum_CER": 0.5, "avg_DER": 20.0, "avg_CER": 25.0, "max_DER": 30.0, "max_CER": 20.0},
),
],
)
def test_get_online_DER_stats(
self, DER, CER, FA, MISS, diar_eval_count, der_stat_dict, deci, expected_der_dict, expected_der_stat_dict
):
actual_der_dict, actual_der_stat_dict = get_online_DER_stats(
DER, CER, FA, MISS, diar_eval_count, der_stat_dict, deci
)
assert actual_der_dict == expected_der_dict
assert actual_der_stat_dict == expected_der_stat_dict
| NeMo-main | tests/collections/asr/test_diar_metrics.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import multiprocessing
import os
from dataclasses import dataclass
from pathlib import Path
import pytest
from hydra.utils import instantiate
from nemo_text_processing.text_normalization.normalize import Normalizer
from omegaconf import OmegaConf
from nemo.collections.asr.data.text_to_text import TextToTextDataset, TextToTextItem, TextToTextIterableDataset
from nemo.collections.common import tokenizers
BASE_DIR = Path(__file__).parent.parent.parent.parent
@pytest.fixture(scope="module")
def set_multiprocessing_method():
"""
Try to set 'fork' multiprocessing method to avoid problems with multiprocessing in PyTest on MacOS
"""
if multiprocessing.get_start_method(allow_none=True) != "fork":
multiprocessing.set_start_method("fork", force=True)
@pytest.fixture(scope="module")
def speakers_path(tmp_path_factory):
path = tmp_path_factory.mktemp("textonly") / "speakers.txt"
with open(path, "w", encoding="utf-8") as f:
for speaker in [1, 2, 3]:
print(f"{speaker}", file=f)
return path
@pytest.fixture(scope="module")
def textonly_manifest_path(tmp_path_factory):
path = tmp_path_factory.mktemp("textonly") / "manifest.json"
texts = [
"lorem ipsum dolor sit amet consectetur adipiscing elit",
"nullam rhoncus sapien eros eu mollis sem euismod non",
]
with open(path, "w", encoding="utf-8") as f:
for text in texts:
print(json.dumps(dict(text=text, tts_text_normalized=text)), file=f)
return path
@pytest.fixture(scope="module")
def textonly_unnormalized_manifest_path(tmp_path_factory):
path = tmp_path_factory.mktemp("textonly") / "manifest_nonorm.json"
texts = [
(
"lorem ipsum dolor sit amet consectetur adipiscing elit",
"Lorem ipsum dolor sit amet, consectetur adipiscing elit.",
),
(
"nullam rhoncus sapien eros eu mollis sem euismod non nineteen",
"Nullam rhoncus sapien eros, eu mollis sem euismod non 19.",
),
]
with open(path, "w", encoding="utf-8") as f:
for asr_text, tts_text in texts:
print(json.dumps(dict(text=asr_text, tts_text=tts_text)), file=f)
return path
@pytest.fixture(scope="module")
def tts_normalizer():
normalizer = Normalizer(lang="en", input_case="cased", overwrite_cache=True, cache_dir=None,)
return normalizer
@pytest.fixture(scope="module")
def asr_tokenizer(test_data_dir):
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
return tokenizer
@pytest.fixture(scope="module")
def tts_tokenizer():
@dataclass
class G2PConfig:
_target_: str = "nemo.collections.tts.g2p.models.en_us_arpabet.EnglishG2p"
phoneme_dict: str = str(BASE_DIR / "scripts/tts_dataset_files/cmudict-0.7b_nv22.10")
heteronyms: str = str(BASE_DIR / "scripts/tts_dataset_files/heteronyms-052722")
phoneme_probability: float = 0.5
@dataclass
class TextTokenizerCfg:
_target_: str = "nemo.collections.common.tokenizers.text_to_speech.tts_tokenizers.EnglishPhonemesTokenizer"
punct: bool = True
stresses: bool = True
chars: bool = True
apostrophe: bool = True
pad_with_space: bool = True
add_blank_at: bool = True
g2p: G2PConfig = G2PConfig()
config = OmegaConf.create(OmegaConf.to_yaml(TextTokenizerCfg()))
return instantiate(config)
class TestTextToTextDataset:
@pytest.mark.unit
@pytest.mark.parametrize("tokenizer_workers", [1, 2])
def test_text_to_text_dataset(
self,
textonly_manifest_path,
tokenizer_workers,
speakers_path,
asr_tokenizer,
tts_tokenizer,
tts_normalizer,
set_multiprocessing_method,
):
"""
Test map-style text-to-text dataset with ASR and TTS tokenizers with normalized text
"""
dataset = TextToTextDataset(
manifest_filepath=textonly_manifest_path,
speakers_filepath=speakers_path,
asr_tokenizer=asr_tokenizer,
asr_use_start_end_token=False,
tts_parser=tts_tokenizer,
tts_text_pad_id=0,
tts_text_normalizer=tts_normalizer,
tts_text_normalizer_call_kwargs=dict(),
tokenizer_workers=tokenizer_workers,
)
assert len(dataset) == 2
item = dataset[0]
assert isinstance(item, TextToTextItem)
@pytest.mark.unit
def test_text_to_text_dataset_unnormalized(
self, textonly_unnormalized_manifest_path, speakers_path, asr_tokenizer, tts_tokenizer, tts_normalizer
):
"""
Test TextToTextDataset with ASR and TTS tokenizers with non-normalized text
"""
dataset = TextToTextDataset(
manifest_filepath=textonly_unnormalized_manifest_path,
speakers_filepath=speakers_path,
asr_tokenizer=asr_tokenizer,
asr_use_start_end_token=False,
tts_parser=tts_tokenizer,
tts_text_pad_id=0,
tts_text_normalizer=tts_normalizer,
tts_text_normalizer_call_kwargs=dict(),
)
assert len(dataset) == 2
@pytest.mark.unit
@pytest.mark.parametrize("tokenizer_workers", [1, 2])
def test_text_to_text_iterable_dataset(
self,
textonly_manifest_path,
tokenizer_workers,
speakers_path,
asr_tokenizer,
tts_tokenizer,
tts_normalizer,
set_multiprocessing_method,
):
"""
Test iterable text-to-text dataset with ASR and TTS tokenizers with normalized text
"""
dataset = TextToTextIterableDataset(
manifest_filepath=textonly_manifest_path,
speakers_filepath=speakers_path,
asr_tokenizer=asr_tokenizer,
asr_use_start_end_token=False,
tts_parser=tts_tokenizer,
tts_text_pad_id=0,
tts_text_normalizer=tts_normalizer,
tts_text_normalizer_call_kwargs=dict(),
tokenizer_workers=tokenizer_workers,
)
assert len(dataset) == 2
item = next(iter(dataset))
assert isinstance(item, TextToTextItem)
| NeMo-main | tests/collections/asr/test_text_to_text_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Type
import numpy as np
import pytest
import torch
class RNNTTestHelper:
@staticmethod
def wrap_and_call(fn, acts, labels, device, input_lengths=None, target_lengths=None):
if not torch.is_tensor(acts):
acts = torch.FloatTensor(acts)
if 'cuda' in device:
acts = acts.cuda()
if not acts.requires_grad:
acts.requires_grad = True
labels = torch.LongTensor(labels)
if input_lengths is None:
lengths = [acts.shape[1]] * acts.shape[0]
lengths = torch.LongTensor(lengths)
else:
lengths = input_lengths
if target_lengths is None:
label_lengths = [len(l) for l in labels]
label_lengths = torch.LongTensor(label_lengths)
else:
label_lengths = target_lengths
if 'cuda' in device:
labels = labels.cuda()
lengths = lengths.cuda()
label_lengths = label_lengths.cuda()
costs = fn(acts, labels, lengths, label_lengths)
cost = torch.sum(costs)
cost.backward()
if 'cuda' in device:
torch.cuda.synchronize()
if acts.grad is not None:
grad = acts.grad.data.cpu().numpy()
else:
grad = None
return costs.data.cpu().numpy(), grad
@dataclass
class RnntLossSampleData:
vocab_size: int
blank_id: int
logits: torch.Tensor
targets: torch.Tensor
input_lengths: torch.Tensor
target_lengths: torch.Tensor
expected_cost: Optional[torch.Tensor] = None
expected_grads: Optional[torch.Tensor] = None
@classmethod
def get_sample_small(cls) -> "RnntLossSampleData":
activations = np.array(
[
[
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.2, 0.1, 0.1], [0.7, 0.1, 0.2, 0.1, 0.1]],
]
]
)
labels = np.asarray([[1, 2]])
expected_cost = [4.495666]
expected_grads = np.array(
[
[
[
[-0.13116688, -0.3999269, 0.17703125, 0.17703125, 0.17703125],
[-0.18572757, 0.12247056, -0.18168412, 0.12247056, 0.12247056],
[-0.32091254, 0.06269141, 0.06928472, 0.12624499, 0.06269141],
],
[
[0.05456069, -0.21824276, 0.05456069, 0.05456069, 0.05456069],
[0.12073959, 0.12073959, -0.48295835, 0.12073959, 0.12073959],
[-0.6925882, 0.16871116, 0.18645467, 0.16871116, 0.16871116],
],
]
]
)
return RnntLossSampleData(
vocab_size=3,
blank_id=0,
logits=torch.from_numpy(activations).to(torch.float32),
targets=torch.from_numpy(labels),
input_lengths=torch.tensor([2]),
target_lengths=torch.tensor([2]),
expected_cost=torch.tensor(expected_cost).to(torch.float32),
expected_grads=torch.from_numpy(expected_grads),
)
@classmethod
def get_sample_small_blank_last(cls) -> "RnntLossSampleData":
activations = np.array(
[
[
[[0.0, 1.0, 3.0], [0.0, 2.0, 3.0], [1.0, 1.0, 3.0], [2.0, 3.0, 2.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [2.0, 2.0, 0.0]],
[[0.0, 2.0, 5.0], [0.0, 3.0, 5.0], [1.0, 2.0, 5.0], [2.0, 4.0, 4.0]],
[[0.0, 3.0, 4.0], [0.0, 4.0, 4.0], [1.0, 3.0, 4.0], [2.0, 5.0, 3.0]],
[[2.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 1.0], [4.0, 4.0, 0.0]],
]
]
)
labels = np.array([[0, 1, 0]])
expected_cost = [6.789285182952881]
expected_grads = np.array(
[
[
[
[-0.03551076725125313, 0.11419519782066345, -0.07868456840515137],
[0.0027224558871239424, 0.00704305712133646, -0.009765520691871643],
[0.0013856772566214204, 0.0013924005907028913, -0.0027780719101428986],
[1.4249643527364242e-06, 3.873454716085689e-06, -5.298420546751004e-06],
],
[
[-0.1934257447719574, 0.19551163911819458, -0.0020859241485595703],
[0.07043898105621338, 0.05738453567028046, -0.12782356142997742],
[0.061031512916088104, 0.02286236733198166, -0.08389391005039215],
[0.0005252412520349026, 0.0005252412520349026, -0.0010504829697310925],
],
[
[-0.007841046899557114, 0.025142310187220573, -0.017301201820373535],
[0.0019501042552292347, 0.0005148053169250488, -0.0024650096893310547],
[0.0027856370434165, 0.008609085343778133, -0.01139475405216217],
[9.526080975774676e-05, 0.0007038871408440173, -0.000799147819634527],
],
[
[-0.01533521432429552, 0.1386115401983261, -0.12327653169631958],
[0.002850571647286415, -0.006693005561828613, 0.003842458128929138],
[0.009236274287104607, 0.08995233476161957, -0.0991886705160141],
[0.0001865450612967834, 0.0037468576338142157, -0.003933403175324202],
],
[
[-0.2888762652873993, 0.211185485124588, 0.07769080251455307],
[0.15952755510807037, -0.2182144820690155, 0.05868690833449364],
[-0.3332723379135132, 0.2436419129371643, 0.0896308496594429],
[0.4954628646373749, 0.4954628646373749, -0.9909257292747498],
],
]
]
)
return RnntLossSampleData(
vocab_size=3,
blank_id=2,
logits=torch.from_numpy(activations).to(torch.float32),
targets=torch.from_numpy(labels),
input_lengths=torch.tensor([5]),
target_lengths=torch.tensor([3]),
expected_cost=torch.tensor(expected_cost).to(torch.float32),
expected_grads=torch.from_numpy(expected_grads),
)
@classmethod
def get_sample_medium(cls) -> "RnntLossSampleData":
# minibatch x T x U x alphabet_size
activations = [
[
[
[0.06535690384862791, 0.7875301411923206, 0.08159176605666074],
[0.5297155426466327, 0.7506749639230854, 0.7541348379087998],
[0.6097641124736383, 0.8681404965673826, 0.6225318186056529],
],
[
[0.6685222872103057, 0.8580392805336061, 0.16453892311765583],
[0.989779515236694, 0.944298460961015, 0.6031678586829663],
[0.9467833543605416, 0.666202507295747, 0.28688179752461884],
],
[
[0.09418426230195986, 0.3666735970751962, 0.736168049462793],
[0.1666804425271342, 0.7141542198635192, 0.3993997272216727],
[0.5359823524146038, 0.29182076440286386, 0.6126422611507932],
],
[
[0.3242405528768486, 0.8007644367291621, 0.5241057606558068],
[0.779194617063042, 0.18331417220174862, 0.113745182072432],
[0.24022162381327106, 0.3394695622533106, 0.1341595066017014],
],
],
[
[
[0.5055615569388828, 0.051597282072282646, 0.6402903936686337],
[0.43073311517251, 0.8294731834714112, 0.1774668847323424],
[0.3207001991262245, 0.04288308912457006, 0.30280282975568984],
],
[
[0.6751777088333762, 0.569537369330242, 0.5584738347504452],
[0.08313242153985256, 0.06016544344162322, 0.10795752845152584],
[0.7486153608562472, 0.943918041459349, 0.4863558118797222],
],
[
[0.4181986264486809, 0.6524078485043804, 0.024242983423721887],
[0.13458171554507403, 0.3663418070512402, 0.2958297395361563],
[0.9236695822497084, 0.6899291482654177, 0.7418981733448822],
],
[
[0.25000547599982104, 0.6034295486281007, 0.9872887878887768],
[0.5926057265215715, 0.8846724004467684, 0.5434495396894328],
[0.6607698886038497, 0.3771277082495921, 0.3580209022231813],
],
],
]
expected_cost = [4.2806528590890736, 3.9384369822503591]
expected_grads = [
[
[
[-1.86843902e-01, -6.25548810e-02, 2.49398798e-01],
[-2.03376666e-01, 2.02399328e-01, 9.77333169e-04],
[-1.41016081e-01, 7.91234672e-02, 6.18926100e-02],
],
[
[-1.15517676e-02, -8.12802389e-02, 9.28319991e-02],
[-1.54257029e-01, 2.29432687e-01, -7.51756504e-02],
[-2.46593088e-01, 1.46404594e-01, 1.00188486e-01],
],
[
[-1.29182907e-02, -6.15932420e-02, 7.45115355e-02],
[-5.59857301e-02, 2.19830811e-01, -1.63845062e-01],
[-4.97626871e-01, 2.09239945e-01, 2.88386941e-01],
],
[
[1.36048580e-02, -3.02196294e-02, 1.66147724e-02],
[1.13924511e-01, 6.27811998e-02, -1.76705718e-01],
[-6.67078257e-01, 3.67658824e-01, 2.99419403e-01],
],
],
[
[
[-3.56343776e-01, -5.53474613e-02, 4.11691219e-01],
[-9.69219357e-02, 2.94591039e-02, 6.74628317e-02],
[-6.35175705e-02, 2.76544970e-02, 3.58630717e-02],
],
[
[-1.54499024e-01, -7.39420280e-02, 2.28441030e-01],
[-1.66789949e-01, -8.78955179e-05, 1.66877866e-01],
[-1.72369644e-01, 1.05565332e-01, 6.68043196e-02],
],
[
[2.38748826e-02, -1.18255816e-01, 9.43809375e-02],
[-1.04707085e-01, -1.08934477e-01, 2.13641584e-01],
[-3.69844258e-01, 1.80118099e-01, 1.89726159e-01],
],
[
[2.57137045e-02, -7.94617534e-02, 5.37480488e-02],
[1.22328237e-01, -2.38788679e-01, 1.16460443e-01],
[-5.98686993e-01, 3.02203178e-01, 2.96483815e-01],
],
],
]
activations = np.array(activations)
labels = np.array([[1, 2], [1, 1]])
expected_grads = np.array(expected_grads)
return RnntLossSampleData(
vocab_size=3,
blank_id=0,
logits=torch.from_numpy(activations).to(torch.float32),
targets=torch.from_numpy(labels),
input_lengths=torch.tensor([4, 4]),
target_lengths=torch.tensor([2, 2]),
expected_cost=torch.tensor(expected_cost).to(torch.float32),
expected_grads=torch.from_numpy(expected_grads),
)
@classmethod
def get_sample_small_random(cls, blank_first: bool, device=torch.device("cpu")) -> "RnntLossSampleData":
vocab_size = 4
blank_id = 0 if blank_first else vocab_size - 1
num_frames = 4
text_len = 2
if blank_first:
text = np.asarray([1, 3])
else:
text = np.asarray([0, 2])
targets = torch.from_numpy(text).unsqueeze(0).to(device)
logits = torch.rand([1, num_frames, text_len + 1, vocab_size], requires_grad=True, device=device)
input_lengths = torch.tensor([num_frames], device=device)
target_lengths = torch.tensor([text_len], device=device)
return RnntLossSampleData(
vocab_size=vocab_size,
blank_id=blank_id,
logits=logits,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
)
@classmethod
def get_sample_medium_random_var_size(cls, blank_first: bool, device=torch.device("cpu")) -> "RnntLossSampleData":
vocab_size = 32
blank_id = 0 if blank_first else vocab_size - 1
num_frames = 32
text_len = 27
min_symbol = 1 if blank_first else 0
max_symbol = vocab_size if blank_first else vocab_size - 1
batch_size = 4
rs = np.random.RandomState(2021)
text = rs.randint(min_symbol, max_symbol, size=(batch_size, text_len))
targets = torch.from_numpy(text).to(device)
logits = torch.rand([batch_size, num_frames, text_len + 1, vocab_size], requires_grad=True, device=device)
input_lengths = torch.tensor([num_frames, num_frames // 2, text_len, text_len // 2], device=device).long()
target_lengths = torch.tensor([text_len, text_len - 1, text_len - 3, text_len - 10], device=device)
return RnntLossSampleData(
vocab_size=vocab_size,
blank_id=blank_id,
logits=logits,
targets=targets,
input_lengths=input_lengths,
target_lengths=target_lengths,
)
@pytest.fixture(scope="session")
def rnnt_test_helper() -> Type[RNNTTestHelper]:
return RNNTTestHelper
@pytest.fixture(scope="session")
def rnn_loss_sample_data() -> Type[RnntLossSampleData]:
return RnntLossSampleData
| NeMo-main | tests/collections/asr/conftest.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import io
import random
import string
from copy import deepcopy
from typing import List
from unittest.mock import Mock, patch
import pytest
import torch
from torchmetrics.audio.snr import SignalNoiseRatio
from nemo.collections.asr.metrics.audio import AudioMetricWrapper
from nemo.collections.asr.metrics.rnnt_wer import RNNTWER
from nemo.collections.asr.metrics.rnnt_wer_bpe import RNNTBPEWER
from nemo.collections.asr.metrics.wer import (
WER,
CTCDecoding,
CTCDecodingConfig,
word_error_rate,
word_error_rate_detail,
word_error_rate_per_utt,
)
from nemo.collections.asr.metrics.wer_bpe import WERBPE, CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.collections.common.tokenizers import CharTokenizer
from nemo.utils.config_utils import assert_dataclass_signature_match
def build_char_tokenizer_with_vocabulary(vocabulary: List[str]) -> CharTokenizer:
with patch('pathlib.Path.open', Mock(return_value=io.StringIO('\n'.join([repr(char) for char in vocabulary])))):
char_tokenizer = CharTokenizer('a_path_which_will_not_be_used')
# For some reason `WERBPE` takes vocabulary size of inner tokenizer. Mock inner tokenizer.
setattr(char_tokenizer, "tokenizer", Mock(vocab_size=char_tokenizer.vocab_size))
return char_tokenizer
class TestWordErrorRate:
vocabulary = [' '] + list(string.ascii_lowercase) + ["'"]
char_tokenizer = build_char_tokenizer_with_vocabulary(vocabulary)
def __string_to_ctc_tensor(self, txt: str, use_tokenizer: bool, as_logprobs: bool = False) -> torch.Tensor:
# This function emulates how CTC output could like for txt
if use_tokenizer:
blank_id = self.char_tokenizer.vocab_size
string_in_id_form = self.char_tokenizer.text_to_ids(txt)
else:
blank_id = len(self.vocabulary)
char_to_ind = dict([(self.vocabulary[i], i) for i in range(len(self.vocabulary))])
string_in_id_form = [char_to_ind[c] for c in txt]
ctc_list = []
prev_id = -1
for c in string_in_id_form:
# when character is repeated we need to insert CTC blank symbol
if c != prev_id:
ctc_list.append(c)
else:
ctc_list.append(blank_id)
ctc_list.append(c)
prev_id = c
tensor = torch.Tensor(ctc_list).unsqueeze(0)
if not as_logprobs:
return tensor
else:
tensor = tensor.to(torch.int64)
new_tensor = torch.nn.functional.one_hot(tensor[0], num_classes=blank_id)
new_tensor = new_tensor.unsqueeze(0) # [1, V, T]
return new_tensor
def __reference_string_to_tensor(self, txt: str, use_tokenizer: bool) -> torch.Tensor:
# Reference tensors aren't produced by CTC logic
if use_tokenizer:
string_in_id_form = self.char_tokenizer.text_to_ids(txt)
else:
char_to_ind = dict([(self.vocabulary[i], i) for i in range(len(self.vocabulary))])
string_in_id_form = [char_to_ind[c] for c in txt]
return torch.Tensor(string_in_id_form).unsqueeze(0)
def get_wer(self, wer, prediction: str, reference: str, use_tokenizer: bool):
predictions_tensor = self.__string_to_ctc_tensor(prediction, use_tokenizer)
targets_tensor = self.__reference_string_to_tensor(reference, use_tokenizer)
if wer.batch_dim_index > 0:
targets_tensor.transpose_(0, 1)
predictions_tensor.transpose_(0, 1)
wer(predictions=predictions_tensor, targets=targets_tensor, target_lengths=torch.tensor([len(reference)]))
res, _, _ = wer.compute()
res = res.detach().cpu()
# return res[0] / res[1]
return res.item()
@pytest.mark.unit
def test_wer_function(self):
assert word_error_rate(hypotheses=['cat'], references=['cot']) == 1.0
assert word_error_rate(hypotheses=['GPU'], references=['G P U']) == 1.0
assert word_error_rate(hypotheses=['G P U'], references=['GPU']) == 3.0
assert word_error_rate(hypotheses=['ducati motorcycle'], references=['motorcycle']) == 1.0
assert word_error_rate(hypotheses=['ducati motorcycle'], references=['ducuti motorcycle']) == 0.5
assert word_error_rate(hypotheses=['a B c'], references=['a b c']) == 1.0 / 3.0
assert word_error_rate_detail(hypotheses=['cat'], references=['cot'])[0] == 1.0
assert word_error_rate_detail(hypotheses=['GPU'], references=['G P U'])[0] == 1.0
assert word_error_rate_detail(hypotheses=['G P U'], references=['GPU'])[0] == 3.0
assert word_error_rate_detail(hypotheses=['ducati motorcycle'], references=['motorcycle'])[0] == 1.0
assert word_error_rate_detail(hypotheses=['ducati motorcycle'], references=['ducuti motorcycle'])[0] == 0.5
assert word_error_rate_detail(hypotheses=['a B c'], references=['a b c'])[0] == 1.0 / 3.0
assert word_error_rate_detail(hypotheses=['cat'], references=['']) == (
float("inf"),
0,
float("inf"),
float("inf"),
float("inf"),
)
assert word_error_rate_detail(hypotheses=['cat', ''], references=['', 'gpu']) == (2.0, 1, 1.0, 1.0, 0.0,)
assert word_error_rate_detail(hypotheses=['cat'], references=['cot']) == (1.0, 1, 0.0, 0.0, 1.0)
assert word_error_rate_detail(hypotheses=['G P U'], references=['GPU']) == (3.0, 1, 2.0, 0.0, 1.0)
assert word_error_rate_detail(hypotheses=[''], references=['ducuti motorcycle'], use_cer=True) == (
1.0,
17,
0.0,
1.0,
0.0,
)
assert word_error_rate_per_utt(hypotheses=['kat'], references=['cat']) == ([1.0], 1.0)
assert word_error_rate_per_utt(hypotheses=['cat', ''], references=['', 'gpu']) == ([float("inf"), 1.0], 2.0)
assert word_error_rate_per_utt(
hypotheses=['ducuti motorcycle', 'G P U'], references=['ducati motorcycle', 'GPU']
) == ([0.5, 3.0], 4 / 3)
assert word_error_rate_per_utt(
hypotheses=['ducuti motorcycle', 'G P U'], references=['ducati motorcycle', 'GPU'], use_cer=True
) == ([1 / 17, 2 / 3], 0.15)
@pytest.mark.unit
@pytest.mark.parametrize("batch_dim_index", [0, 1])
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_wer_metric_simple(self, batch_dim_index, test_wer_bpe):
assert self.get_wer_ctc('cat', 'cot', test_wer_bpe) == 1.0
assert self.get_wer_ctc('gpu', 'g p u', test_wer_bpe) == 1.0
assert self.get_wer_ctc('g p u', 'gpu', test_wer_bpe) == 3.0
assert self.get_wer_ctc('ducati motorcycle', 'motorcycle', test_wer_bpe) == 1.0
assert self.get_wer_ctc('ducati motorcycle', 'ducuti motorcycle', test_wer_bpe) == 0.5
assert abs(self.get_wer_ctc('a f c', 'a b c', test_wer_bpe) - 1.0 / 3.0) < 1e-6
@pytest.mark.unit
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_wer_metric_randomized(self, test_wer_bpe):
"""This test relies on correctness of word_error_rate function."""
def __random_string(length):
return ''.join(random.choice(''.join(self.vocabulary)) for _ in range(length))
for test_id in range(256):
n1 = random.randint(1, 512)
n2 = random.randint(1, 512)
s1 = __random_string(n1)
s2 = __random_string(n2)
# skip empty strings as reference
if s2.strip():
assert (
abs(
self.get_wer_ctc(prediction=s1, reference=s2, test_wer_bpe=test_wer_bpe)
- word_error_rate(hypotheses=[s1], references=[s2])
)
< 1e-6
)
@pytest.mark.unit
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_wer_metric_decode(self, test_wer_bpe):
decoding_config = {'strategy': 'greedy'}
if test_wer_bpe:
decoding = CTCBPEDecoding(decoding_config, self.char_tokenizer)
wer = WERBPE(decoding, use_cer=False)
else:
decoding = CTCDecoding(decoding_config, self.vocabulary.copy())
wer = WER(decoding, use_cer=False)
tokens = self.__string_to_ctc_tensor('cat', use_tokenizer=test_wer_bpe)[0].int().numpy().tolist()
assert tokens == [3, 1, 20]
tokens_decoded = wer.decoding.decode_ids_to_tokens(tokens)
assert tokens_decoded == ['c', 'a', 't']
str_decoded = wer.decoding.decode_tokens_to_str(tokens)
assert str_decoded == 'cat'
@pytest.mark.unit
@pytest.mark.parametrize("batch_dim_index", [0, 1])
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_wer_metric_return_hypothesis(self, batch_dim_index, test_wer_bpe):
decoding_config = {'strategy': 'greedy', 'batch_dim_index': batch_dim_index}
wer = WER(CTCDecoding(decoding_config, self.vocabulary), use_cer=False)
tensor = self.__string_to_ctc_tensor('cat', test_wer_bpe, as_logprobs=True).int()
if batch_dim_index > 0:
tensor.transpose_(0, 1)
# pass batchsize 1 tensor, get back list of length 1 Hypothesis
wer.decoding.preserve_alignments = True
hyp, _ = wer.decoding.ctc_decoder_predictions_tensor(tensor, return_hypotheses=True)
hyp = hyp[0]
assert isinstance(hyp, Hypothesis)
sample = tensor[0] if batch_dim_index == 0 else tensor[:, 0, :]
assert (hyp.y_sequence - torch.tensor([3, 1, 20])).sum() == 0
assert hyp.score == 3 # sum of number of tokens in one hot representation
assert hyp.text == 'cat'
assert (hyp.alignments[0] == sample).all()
assert hyp.length == 0
length = torch.tensor([tensor.shape[1 - batch_dim_index]], dtype=torch.long)
# pass batchsize 1 tensor, get back list of length 1 Hypothesis [add length info]
hyp, _ = wer.decoding.ctc_decoder_predictions_tensor(tensor, decoder_lengths=length, return_hypotheses=True)
hyp = hyp[0]
assert isinstance(hyp, Hypothesis)
assert hyp.length == 3
@pytest.mark.unit
@pytest.mark.parametrize("batch_dim_index", [0, 1])
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_wer_metric_subword_return_hypothesis(self, batch_dim_index, test_wer_bpe):
decoding_config = {'strategy': 'greedy', 'batch_dim_index': batch_dim_index}
wer = WERBPE(CTCBPEDecoding(decoding_config, self.char_tokenizer), use_cer=False)
tensor = self.__string_to_ctc_tensor('cat', test_wer_bpe, as_logprobs=True).int()
if batch_dim_index > 0:
tensor.transpose_(0, 1)
# pass batchsize 1 tensor, get back list of length 1 Hypothesis
wer.decoding.preserve_alignments = True
hyp, _ = wer.decoding.ctc_decoder_predictions_tensor(tensor, return_hypotheses=True)
hyp = hyp[0]
assert isinstance(hyp, Hypothesis)
sample = tensor[0] if batch_dim_index == 0 else tensor[:, 0, :]
assert (hyp.y_sequence - torch.tensor([3, 1, 20])).sum() == 0
assert hyp.score == 3 # sum of number of tokens in one hot representation
assert hyp.text == 'cat'
assert (hyp.alignments[0] == sample).all()
assert hyp.length == 0
length = torch.tensor([tensor.shape[1 - batch_dim_index]], dtype=torch.long)
# pass batchsize 1 tensor, get back list of length 1 Hypothesis [add length info]
hyp, _ = wer.decoding.ctc_decoder_predictions_tensor(tensor, decoder_lengths=length, return_hypotheses=True)
hyp = hyp[0]
assert isinstance(hyp, Hypothesis)
assert hyp.length == 3
def get_wer_ctc(self, prediction: str, reference: str, test_wer_bpe: bool):
ctc_decoder_predictions_tensor_mock = Mock(return_value=([prediction], None))
if test_wer_bpe:
decoding = Mock(
blank_id=self.char_tokenizer.tokenizer.vocab_size,
tokenizer=deepcopy(self.char_tokenizer),
ctc_decoder_predictions_tensor=ctc_decoder_predictions_tensor_mock,
decode_tokens_to_str=self.char_tokenizer.ids_to_text,
)
wer = WERBPE(decoding, use_cer=False)
else:
decoding = Mock(
blank_id=len(self.vocabulary),
labels_map=self.vocabulary.copy(),
ctc_decoder_predictions_tensor=ctc_decoder_predictions_tensor_mock,
decode_tokens_to_str=self.decode_token_to_str_with_vocabulary_mock,
)
wer = WER(decoding, use_cer=False)
targets_tensor = self.__reference_string_to_tensor(reference, test_wer_bpe)
wer(
predictions=None,
predictions_lengths=None,
targets=targets_tensor,
target_lengths=torch.tensor([len(reference)]),
)
res, _, _ = wer.compute()
res = res.detach().cpu()
# return res[0] / res[1]
return res.item()
def decode_token_to_str_with_vocabulary_mock(self, ids):
return ''.join([self.vocabulary[id_] for id_ in ids])
def get_wer_rnnt(self, prediction: str, reference: str, batch_dim_index: int, test_wer_bpe: bool):
rnnt_decoder_predictions_tensor_mock = Mock(return_value=([prediction], None))
if test_wer_bpe:
decoding = Mock(
blank_id=self.char_tokenizer.tokenizer.vocab_size,
tokenizer=deepcopy(self.char_tokenizer),
rnnt_decoder_predictions_tensor=rnnt_decoder_predictions_tensor_mock,
decode_tokens_to_str=self.char_tokenizer.ids_to_text,
)
wer = RNNTBPEWER(decoding, batch_dim_index=batch_dim_index, use_cer=False)
else:
decoding = Mock(
blank_id=len(self.vocabulary),
labels_map=self.vocabulary.copy(),
rnnt_decoder_predictions_tensor=rnnt_decoder_predictions_tensor_mock,
decode_tokens_to_str=self.decode_token_to_str_with_vocabulary_mock,
)
wer = RNNTWER(decoding, batch_dim_index=batch_dim_index, use_cer=False)
targets_tensor = self.__reference_string_to_tensor(reference, test_wer_bpe)
if wer.batch_dim_index > 0:
targets_tensor.transpose_(0, 1)
wer(
encoder_output=None,
encoded_lengths=None,
targets=targets_tensor,
target_lengths=torch.tensor([len(reference)]),
)
res, _, _ = wer.compute()
res = res.detach().cpu()
# return res[0] / res[1]
return res.item()
@pytest.mark.unit
@pytest.mark.parametrize("batch_dim_index", [0, 1])
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_rnnt_wer_metric_simple(self, batch_dim_index, test_wer_bpe):
assert self.get_wer_rnnt('cat', 'cot', batch_dim_index, test_wer_bpe) == 1.0
assert self.get_wer_rnnt('gpu', 'g p u', batch_dim_index, test_wer_bpe) == 1.0
assert self.get_wer_rnnt('g p u', 'gpu', batch_dim_index, test_wer_bpe) == 3.0
assert self.get_wer_rnnt('ducati motorcycle', 'motorcycle', batch_dim_index, test_wer_bpe) == 1.0
assert self.get_wer_rnnt('ducati motorcycle', 'ducuti motorcycle', batch_dim_index, test_wer_bpe) == 0.5
assert abs(self.get_wer_rnnt('a f c', 'a b c', batch_dim_index, test_wer_bpe) - 1.0 / 3.0) < 1e-6
@pytest.mark.unit
@pytest.mark.parametrize("test_wer_bpe", [False, True])
def test_rnnt_wer_metric_randomized(self, test_wer_bpe):
"""This test relies on correctness of word_error_rate function."""
def __random_string(length):
return ''.join(random.choice(''.join(self.vocabulary)) for _ in range(length))
for test_id in range(256):
n1 = random.randint(1, 512)
n2 = random.randint(1, 512)
s1 = __random_string(n1)
s2 = __random_string(n2)
# skip empty strings as reference
if s2.strip():
assert (
abs(
self.get_wer_rnnt(prediction=s1, reference=s2, batch_dim_index=0, test_wer_bpe=test_wer_bpe)
- word_error_rate(hypotheses=[s1], references=[s2])
)
< 1e-6
)
@pytest.mark.unit
def test_char_decoding_logprobs(self):
B, T, V = 1, 8, len(self.vocabulary)
torch.manual_seed(0)
decoder_outputs = torch.randn(B, T, V, dtype=torch.float32)
decoder_lens = torch.randint(0, T, size=[B], dtype=torch.int32)
decoder_lens[torch.randint(0, B, [1])[0]] = T
decoding_cfg = CTCDecodingConfig()
decoding = CTCDecoding(decoding_cfg, vocabulary=self.vocabulary)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 0
assert hyp.alignments is None
# Preserve timestamps and alignments
decoding_cfg = CTCDecodingConfig(preserve_alignments=True, compute_timestamps=True)
decoding = CTCDecoding(decoding_cfg, vocabulary=self.vocabulary)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 3
assert hyp.alignments is not None
@pytest.mark.unit
def test_subword_decoding_logprobs(self):
B, T, V = 1, 8, self.char_tokenizer.vocab_size
torch.manual_seed(0)
decoder_outputs = torch.randn(B, T, V, dtype=torch.float32)
decoder_lens = torch.randint(0, T, size=[B], dtype=torch.int32)
decoder_lens[torch.randint(0, B, [1])[0]] = T
decoding_cfg = CTCBPEDecodingConfig()
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 0
assert hyp.alignments is None
# Preserve timestamps and alignments
decoding_cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 3
assert hyp.alignments is not None
@pytest.mark.unit
def test_char_decoding_labels(self):
B, T, V = 1, 8, len(self.vocabulary)
torch.manual_seed(0)
decoder_outputs = torch.randint(0, V + 1, size=[B, T], dtype=torch.float32)
decoder_lens = torch.randint(0, T, size=[B], dtype=torch.int32)
decoder_lens[torch.randint(0, B, [1])[0]] = T
decoding_cfg = CTCDecodingConfig()
decoding = CTCDecoding(decoding_cfg, vocabulary=self.vocabulary)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 0
assert hyp.alignments is None
# Preserve timestamps and alignments
decoding_cfg = CTCDecodingConfig(preserve_alignments=True, compute_timestamps=True)
decoding = CTCDecoding(decoding_cfg, vocabulary=self.vocabulary)
# Cannot compute alignments from labels
with pytest.raises(ValueError):
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
# Preserve timestamps
decoding_cfg = CTCDecodingConfig(preserve_alignments=False, compute_timestamps=True)
decoding = CTCDecoding(decoding_cfg, vocabulary=self.vocabulary)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 3
assert hyp.alignments is None
@pytest.mark.unit
def test_subword_decoding_logprobs(self):
B, T, V = 1, 8, self.char_tokenizer.vocab_size
torch.manual_seed(0)
decoder_outputs = torch.randn(B, T, V, dtype=torch.float32)
decoder_lens = torch.randint(0, T, size=[B], dtype=torch.int32)
decoder_lens[torch.randint(0, B, [1])[0]] = T
decoding_cfg = CTCBPEDecodingConfig()
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 0
assert hyp.alignments is None
# Preserve timestamps and alignments
decoding_cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 3
assert hyp.alignments is not None
@pytest.mark.unit
def test_subword_decoding_labels(self):
B, T, V = 1, 8, self.char_tokenizer.vocab_size
torch.manual_seed(0)
decoder_outputs = torch.randint(0, V + 1, size=[B, T], dtype=torch.float32)
decoder_lens = torch.randint(0, T, size=[B], dtype=torch.int32)
decoder_lens[torch.randint(0, B, [1])[0]] = T
decoding_cfg = CTCBPEDecodingConfig()
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 0
assert hyp.alignments is None
# Preserve timestamps and alignments
decoding_cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
# Cannot compute alignments from labels
with pytest.raises(ValueError):
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
# Preserve timestamps
decoding_cfg = CTCBPEDecodingConfig(preserve_alignments=False, compute_timestamps=True)
decoding = CTCBPEDecoding(decoding_cfg, tokenizer=self.char_tokenizer)
hyp, _ = decoding.ctc_decoder_predictions_tensor(decoder_outputs, decoder_lens, return_hypotheses=True)
hyp = hyp[0] # type: Hypothesis
assert isinstance(hyp.y_sequence, torch.Tensor)
assert hyp.length == torch.tensor(T, dtype=torch.int32)
assert hyp.text != ''
assert len(hyp.timestep) == 3
assert hyp.alignments is None
class TestAudioMetricWrapper:
def test_metric_full_batch(self):
"""Test metric on batches where all examples have equal length.
"""
ref_metric = SignalNoiseRatio()
wrapped_metric = AudioMetricWrapper(metric=SignalNoiseRatio())
num_resets = 5
num_batches = 10
batch_size = 8
num_channels = 2
num_samples = 200
batch_shape = (batch_size, num_channels, num_samples)
for nr in range(num_resets):
for nb in range(num_batches):
target = torch.rand(*batch_shape)
preds = target + torch.rand(1) * torch.rand(*batch_shape)
# test forward for a single batch
batch_value_wrapped = wrapped_metric(preds=preds, target=target)
batch_value_ref = ref_metric(preds=preds, target=target)
assert torch.allclose(
batch_value_wrapped, batch_value_ref
), f'Metric forward not matching for batch {nb}, reset {nr}'
# test compute (over num_batches)
assert torch.allclose(
wrapped_metric.compute(), ref_metric.compute()
), f'Metric compute not matching for batch {nb}, reset {nr}'
ref_metric.reset()
wrapped_metric.reset()
def test_input_length(self):
"""Test metric on batches where examples have different length.
"""
ref_metric = SignalNoiseRatio()
wrapped_metric = AudioMetricWrapper(metric=SignalNoiseRatio())
num_resets = 5
num_batches = 10
batch_size = 8
num_channels = 2
num_samples = 200
batch_shape = (batch_size, num_channels, num_samples)
for nr in range(num_resets):
for nb in range(num_batches):
target = torch.rand(*batch_shape)
preds = target + torch.rand(1) * torch.rand(*batch_shape)
input_length = torch.randint(low=num_samples // 2, high=num_samples, size=(batch_size,))
# test forward for a single batch
batch_value_wrapped = wrapped_metric(preds=preds, target=target, input_length=input_length)
# compute reference value, assuming batch reduction using averaging
batch_value_ref = 0
for b_idx, b_len in enumerate(input_length):
batch_value_ref += ref_metric(preds=preds[b_idx, ..., :b_len], target=target[b_idx, ..., :b_len])
batch_value_ref /= batch_size # average
assert torch.allclose(
batch_value_wrapped, batch_value_ref
), f'Metric forward not matching for batch {nb}, reset {nr}'
# test compute (over num_batches)
assert torch.allclose(
wrapped_metric.compute(), ref_metric.compute()
), f'Metric compute not matching for batch {nb}, reset {nr}'
ref_metric.reset()
wrapped_metric.reset()
@pytest.mark.unit
@pytest.mark.parametrize('channel', [0, 1])
def test_channel(self, channel):
"""Test metric on a single channel from a batch.
"""
ref_metric = SignalNoiseRatio()
# select only a single channel
wrapped_metric = AudioMetricWrapper(metric=SignalNoiseRatio(), channel=channel)
num_resets = 5
num_batches = 10
batch_size = 8
num_channels = 2
num_samples = 200
batch_shape = (batch_size, num_channels, num_samples)
for nr in range(num_resets):
for nb in range(num_batches):
target = torch.rand(*batch_shape)
preds = target + torch.rand(1) * torch.rand(*batch_shape)
# varying length
input_length = torch.randint(low=num_samples // 2, high=num_samples, size=(batch_size,))
# test forward for a single batch
batch_value_wrapped = wrapped_metric(preds=preds, target=target, input_length=input_length)
# compute reference value, assuming batch reduction using averaging
batch_value_ref = 0
for b_idx, b_len in enumerate(input_length):
batch_value_ref += ref_metric(
preds=preds[b_idx, channel, :b_len], target=target[b_idx, channel, :b_len]
)
batch_value_ref /= batch_size # average
assert torch.allclose(
batch_value_wrapped, batch_value_ref
), f'Metric forward not matching for batch {nb}, reset {nr}'
# test compute (over num_batches)
assert torch.allclose(
wrapped_metric.compute(), ref_metric.compute()
), f'Metric compute not matching for batch {nb}, reset {nr}'
ref_metric.reset()
wrapped_metric.reset()
| NeMo-main | tests/collections/asr/test_asr_metrics.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import filecmp
import json
import os
import shutil
import tempfile
from unittest import mock
import numpy as np
import pytest
import soundfile as sf
import torch.cuda
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import DataLoader
from nemo.collections.asr.data import audio_to_audio_dataset, audio_to_text_dataset
from nemo.collections.asr.data.audio_to_audio import (
ASRAudioProcessor,
AudioToTargetDataset,
AudioToTargetWithEmbeddingDataset,
AudioToTargetWithReferenceDataset,
_audio_collate_fn,
)
from nemo.collections.asr.data.audio_to_text import (
DataStoreObject,
TarredAudioToBPEDataset,
TarredAudioToCharDataset,
cache_datastore_manifests,
)
from nemo.collections.asr.data.audio_to_text_dali import (
__DALI_MINIMUM_VERSION__,
AudioToBPEDALIDataset,
AudioToCharDALIDataset,
is_dali_supported,
)
from nemo.collections.asr.data.audio_to_text_dataset import inject_dataloader_value_from_model_config
from nemo.collections.asr.data.feature_to_text import FeatureToBPEDataset, FeatureToCharDataset
from nemo.collections.asr.models.ctc_models import EncDecCTCModel
from nemo.collections.asr.parts.utils.audio_utils import get_segment_start
from nemo.collections.asr.parts.utils.manifest_utils import write_manifest
from nemo.collections.common import tokenizers
from nemo.utils import logging
try:
HAVE_DALI = is_dali_supported(__DALI_MINIMUM_VERSION__)
except (ImportError, ModuleNotFoundError):
HAVE_DALI = False
def decode_chars(tokens, token_length, mapping):
text = []
tokens = tokens.cpu().numpy()
for idx in tokens:
text_token = mapping[idx]
text.append(text_token)
text = text[:token_length]
text = ''.join(text)
return text
def decode_subwords(tokens, token_length, tokenizer: tokenizers.TokenizerSpec):
tokens = tokens.cpu().numpy()
tokens = tokens[:token_length]
text = tokenizer.ids_to_text(tokens)
return text
class TestASRDatasets:
labels = [
" ",
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"'",
]
@pytest.mark.unit
def test_tarred_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToCharDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
@pytest.mark.unit
def test_mismatch_in_model_dataloader_config(self, caplog):
logging._logger.propagate = True
caplog.set_level(logging.WARNING)
model_cfg = OmegaConf.create(dict(labels=OmegaConf.create(["a", "b", "c"])))
dataloader_cfg = OmegaConf.create(dict(labels=copy.deepcopy(self.labels)))
inject_dataloader_value_from_model_config(model_cfg, dataloader_cfg, key='labels')
assert (
"""`labels` is explicitly provided to the data loader, and is different from the `labels` provided at the model level config."""
in caplog.text
)
logging._logger.propagate = False
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_tarred_bpe_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
ds_braceexpand = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToBPEDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, tokenizer=tokenizer, sample_rate=16000
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
@pytest.mark.skipif(not HAVE_DALI, reason="NVIDIA DALI is not installed or incompatible version")
@pytest.mark.unit
def test_dali_char_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/an4_val.json'))
num_samples = 10
batch_size = 2
device = 'gpu' if torch.cuda.is_available() else 'cpu'
texts = []
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
with open(manifest_path, 'r', encoding='utf-8') as m:
for ix, line in enumerate(m):
if ix >= num_samples:
break
line = line.replace("tests/data/", "tests/.data/").replace("\n", "")
f.write(f"{line}\n")
data = json.loads(line)
texts.append(data['text'])
f.seek(0)
dataset = AudioToCharDALIDataset(
manifest_filepath=f.name,
device=device,
batch_size=batch_size,
labels=self.labels,
max_duration=16.0,
parser='en',
shuffle=False,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
original_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_chars(transcript, transcripts_length, mapping=self.labels)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
original_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
# Assert transcripts are correct
for text, og_transcript in zip(texts, original_transcripts):
assert text == og_transcript
# Repeat, now with shuffle enabled
f.seek(0)
dataset = AudioToCharDALIDataset(
manifest_filepath=f.name,
device=device,
batch_size=batch_size,
labels=self.labels,
max_duration=16.0,
parser='en',
shuffle=True,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
shuffled_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_chars(transcript, transcripts_length, mapping=self.labels)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
shuffled_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
samples_changed = 0
for orig, shuffled in zip(original_transcripts, shuffled_transcripts):
if orig != shuffled:
samples_changed += 1
assert samples_changed > 1 # assume after shuffling at least 1 sample was displaced
for og_transcript, shuffled_transcript in zip(sorted(original_transcripts), sorted(shuffled_transcripts)):
assert og_transcript == shuffled_transcript
@pytest.mark.skipif(not HAVE_DALI, reason="NVIDIA DALI is not installed or incompatible version")
@pytest.mark.unit
def test_dali_bpe_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/an4_val.json'))
num_samples = 10
batch_size = 2
device = 'gpu' if torch.cuda.is_available() else 'cpu'
texts = []
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
with open(manifest_path, 'r', encoding='utf-8') as m:
for ix, line in enumerate(m):
if ix >= num_samples:
break
line = line.replace("tests/data/", "tests/.data/").replace("\n", "")
f.write(f"{line}\n")
data = json.loads(line)
texts.append(data['text'])
f.seek(0)
dataset = AudioToBPEDALIDataset(
manifest_filepath=f.name,
tokenizer=tokenizer,
device=device,
batch_size=batch_size,
max_duration=16.0,
shuffle=False,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
original_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_subwords(transcript, transcripts_length, tokenizer=tokenizer)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
original_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
# Assert transcripts are correct
for text, og_transcript in zip(texts, original_transcripts):
assert text == og_transcript
# Repeat, now with shuffle enabled
f.seek(0)
dataset = AudioToBPEDALIDataset(
manifest_filepath=f.name,
tokenizer=tokenizer,
device=device,
batch_size=batch_size,
max_duration=16.0,
shuffle=True,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
shuffled_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_subwords(transcript, transcripts_length, tokenizer=tokenizer)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
shuffled_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
samples_changed = 0
for orig, shuffled in zip(original_transcripts, shuffled_transcripts):
if orig != shuffled:
samples_changed += 1
assert samples_changed > 1 # assume after shuffling at least 1 sample was displaced
for og_transcript, shuffled_transcript in zip(sorted(original_transcripts), sorted(shuffled_transcripts)):
assert og_transcript == shuffled_transcript
@pytest.mark.skipif(not HAVE_DALI, reason="NVIDIA DALI is not installed or incompatible version")
@pytest.mark.unit
def test_dali_char_vs_ref_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/an4_val.json'))
num_samples = 10
batch_size = 1
texts = []
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
with open(manifest_path, 'r') as m:
for ix, line in enumerate(m):
if ix >= num_samples:
break
line = line.replace("tests/data/", "tests/.data/").replace("\n", "")
f.write(f"{line}\n")
data = json.loads(line)
texts.append(data['text'])
f.seek(0)
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'dither': 0.0,
}
preprocessor_cfg = DictConfig(preprocessor)
dataset_cfg = {
'manifest_filepath': f.name,
'sample_rate': 16000,
'labels': self.labels,
'batch_size': batch_size,
'trim_silence': False,
'max_duration': 16.7,
'shuffle': False,
'is_tarred': False,
}
dali_dataset = audio_to_text_dataset.get_dali_char_dataset(
config=dataset_cfg,
shuffle=False,
device_id=0,
global_rank=0,
world_size=1,
preprocessor_cfg=preprocessor_cfg,
)
ref_dataset = audio_to_text_dataset.get_char_dataset(config=dataset_cfg,)
ref_dataloader = DataLoader(
dataset=ref_dataset,
batch_size=batch_size,
collate_fn=ref_dataset.collate_fn,
drop_last=False,
shuffle=False,
num_workers=0,
pin_memory=False,
)
ref_preprocessor = EncDecCTCModel.from_config_dict(preprocessor_cfg)
for ref_data, dali_data in zip(ref_dataloader, dali_dataset):
ref_audio, ref_audio_len, _, _ = ref_data
ref_features, ref_features_len = ref_preprocessor(input_signal=ref_audio, length=ref_audio_len)
dali_features, dali_features_len, _, _ = dali_data
a = ref_features.cpu().numpy()[:, :, :ref_features_len]
b = dali_features.cpu().numpy()[:, :, :dali_features_len]
err = np.abs(a - b)
assert np.mean(err) < 0.0001
assert np.max(err) < 0.01
@pytest.mark.skipif(not HAVE_DALI, reason="NVIDIA DALI is not installed or incompatible version")
@pytest.mark.unit
def test_tarred_dali_char_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
audio_tar_filepaths = [
os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{idx}.tar')) for idx in range(2)
]
audio_tar_index_filepaths = [
os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/dali_index/audio_{idx}.index'))
for idx in range(2)
]
batch_size = 8
device = 'gpu' if torch.cuda.is_available() else 'cpu'
texts = []
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
num_samples = 0
with open(manifest_path, 'r') as m:
num_samples = len(m.readlines())
dataset = AudioToCharDALIDataset(
manifest_filepath=manifest_path,
audio_tar_filepaths=audio_tar_filepaths,
audio_tar_index_filepaths=audio_tar_index_filepaths,
device=device,
batch_size=batch_size,
labels=self.labels,
max_duration=16.0,
parser='en',
shuffle=False,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
original_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_chars(transcript, transcripts_length, mapping=self.labels)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
original_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
# Assert transcripts are correct
for text, og_transcript in zip(texts, original_transcripts):
assert text == og_transcript
dataset = AudioToCharDALIDataset(
manifest_filepath=manifest_path, # f.name,
audio_tar_filepaths=audio_tar_filepaths,
audio_tar_index_filepaths=audio_tar_index_filepaths,
device=device,
batch_size=batch_size,
labels=self.labels,
max_duration=16.0,
parser='en',
shuffle=True,
)
assert len(dataset) == (num_samples // batch_size) # num batches
count = 0
shuffled_transcripts = []
for batch in dataset:
transcripts = batch[2] # transcript index in DALIOutputs
transcripts_lengths = batch[3] # transcript length index in DALIOutputs
transcripts = [
decode_chars(transcript, transcripts_length, mapping=self.labels)
for transcript, transcripts_length in zip(transcripts, transcripts_lengths)
]
shuffled_transcripts.extend(transcripts)
count += len(transcripts)
assert count == num_samples
samples_changed = 0
for orig, shuffled in zip(original_transcripts, shuffled_transcripts):
if orig != shuffled:
samples_changed += 1
assert samples_changed > 1 # assume after shuffling at least 1 sample was displaced
for og_transcript, shuffled_transcript in zip(sorted(original_transcripts), sorted(shuffled_transcripts)):
assert og_transcript == shuffled_transcript
@pytest.mark.skipif(not HAVE_DALI, reason="NVIDIA DALI is not installed or incompatible version")
@pytest.mark.unit
def test_dali_tarred_char_vs_ref_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
audio_tar_filepaths = [
os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{idx}.tar')) for idx in range(2)
]
audio_tar_index_filepaths = [
os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/dali_index/audio_{idx}.index'))
for idx in range(2)
]
batch_size = 8
texts = []
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') as f:
num_samples = 0
with open(manifest_path, 'r') as m:
for ix, line in enumerate(m):
data = json.loads(line)
texts.append(data['text'])
num_samples = ix
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'dither': 0.0,
}
preprocessor_cfg = DictConfig(preprocessor)
dataset_cfg = {
'manifest_filepath': f.name,
'tarred_audio_filepaths': audio_tar_filepaths,
'tarred_audio_index_filepaths': audio_tar_index_filepaths,
'sample_rate': 16000,
'labels': self.labels,
'batch_size': batch_size,
'trim_silence': False,
'max_duration': 16.7,
'shuffle': False,
'is_tarred': False,
}
dali_dataset = audio_to_text_dataset.get_dali_char_dataset(
config=dataset_cfg,
shuffle=False,
device_id=0,
global_rank=0,
world_size=1,
preprocessor_cfg=preprocessor_cfg,
)
ref_dataset = audio_to_text_dataset.get_tarred_dataset(
config=dataset_cfg, shuffle_n=0, global_rank=0, world_size=1
)
ref_dataloader = DataLoader(
dataset=ref_dataset,
batch_size=batch_size,
collate_fn=ref_dataset.collate_fn,
drop_last=False,
shuffle=False,
num_workers=0,
pin_memory=False,
)
ref_preprocessor = EncDecCTCModel.from_config_dict(preprocessor_cfg)
for ref_data, dali_data in zip(ref_dataloader, dali_dataset):
ref_audio, ref_audio_len, _, _ = ref_data
ref_features, ref_features_len = ref_preprocessor(input_signal=ref_audio, length=ref_audio_len)
dali_features, dali_features_len, _, _ = dali_data
a = ref_features.cpu().numpy()[:, :, :ref_features_len]
b = dali_features.cpu().numpy()[:, :, :dali_features_len]
err = np.abs(a - b)
assert np.mean(err) < 0.0001
assert np.max(err) < 0.01
@pytest.mark.unit
def test_feature_to_text_char_dataset(self):
num_samples = 5
golden_feat_shape = (80, 5)
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
for i in range(num_samples):
feat_file = os.path.join(tmpdir, f"feat_{i}.pt")
torch.save(torch.randn(80, 5), feat_file)
entry = {'audio_filepath': "", 'feature_file': feat_file, 'duration': 100000, "text": "a b c"}
fp.write(json.dumps(entry) + '\n')
dataset = FeatureToCharDataset(manifest_path, labels=self.labels)
cnt = 0
for item in dataset:
cnt += 1
feat = item[0]
token_len = item[3]
assert feat.shape == golden_feat_shape
assert torch.equal(token_len, torch.tensor(5))
assert cnt == num_samples
@pytest.mark.unit
def test_feature_to_text_bpe_dataset(self, test_data_dir):
num_samples = 5
golden_feat_shape = (80, 5)
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
for i in range(num_samples):
feat_file = os.path.join(tmpdir, f"feat_{i}.pt")
torch.save(torch.randn(80, 5), feat_file)
entry = {'audio_filepath': "", 'feature_file': feat_file, 'duration': 100000, "text": "a b c"}
fp.write(json.dumps(entry) + '\n')
dataset = FeatureToBPEDataset(manifest_path, tokenizer=tokenizer)
cnt = 0
for item in dataset:
cnt += 1
feat = item[0]
token_len = item[3]
assert feat.shape == golden_feat_shape
assert torch.equal(token_len, torch.tensor(5))
assert cnt == num_samples
@pytest.mark.unit
def test_feature_with_rttm_to_text_char_dataset(self):
num_samples = 2
golden_feat_shape = (80, 10)
sample = torch.ones(80, 10)
masked_sample = sample * FeatureToCharDataset.ZERO_LEVEL_SPEC_DB_VAL
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
feat_file = os.path.join(tmpdir, f"feat_0.pt")
torch.save(sample, feat_file)
rttm_file = os.path.join(tmpdir, f"rttm_0.rttm")
with open(rttm_file, "w") as fout:
fout.write(f"SPEAKER <NA> 1 0 1 <NA> <NA> speech <NA> <NA>\n")
entry = {
'audio_filepath': "",
'feature_file': feat_file,
'rttm_file': rttm_file,
'duration': 100000,
"text": "a b c",
}
fp.write(json.dumps(entry) + '\n')
# second sample where all frames are not masked
feat_file = os.path.join(tmpdir, f"feat_1.pt")
torch.save(sample, feat_file)
rttm_file = os.path.join(tmpdir, f"rttm_1.rttm")
with open(rttm_file, "w") as fout:
fout.write(f"SPEAKER <NA> 1 0 0 <NA> <NA> speech <NA> <NA>\n")
entry = {
'audio_filepath': "",
'feature_file': feat_file,
'rttm_file': rttm_file,
'duration': 100000,
"text": "a b c",
}
fp.write(json.dumps(entry) + '\n')
dataset = FeatureToCharDataset(manifest_path, labels=self.labels, normalize=None, use_rttm=True)
cnt = 0
for item in dataset:
cnt += 1
feat = item[0]
token_len = item[3]
assert feat.shape == golden_feat_shape
assert torch.equal(token_len, torch.tensor(5))
if cnt == 1:
assert torch.equal(feat, sample)
else:
assert torch.equal(feat, masked_sample)
assert cnt == num_samples
@pytest.mark.unit
def test_feature_with_rttm_to_text_bpe_dataset(self, test_data_dir):
tokenizer_path = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
tokenizer = tokenizers.AutoTokenizer(pretrained_model_name='bert-base-cased', vocab_file=tokenizer_path)
num_samples = 2
golden_feat_shape = (80, 10)
sample = torch.ones(80, 10)
masked_sample = sample * FeatureToCharDataset.ZERO_LEVEL_SPEC_DB_VAL
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
feat_file = os.path.join(tmpdir, f"feat_0.pt")
torch.save(sample, feat_file)
rttm_file = os.path.join(tmpdir, f"rttm_0.rttm")
with open(rttm_file, "w") as fout:
fout.write(f"SPEAKER <NA> 1 0 1 <NA> <NA> speech <NA> <NA>\n")
entry = {
'audio_filepath': "",
'feature_file': feat_file,
'rttm_file': rttm_file,
'duration': 100000,
"text": "a b c",
}
fp.write(json.dumps(entry) + '\n')
# second sample where all frames are not masked
feat_file = os.path.join(tmpdir, f"feat_1.pt")
torch.save(sample, feat_file)
rttm_file = os.path.join(tmpdir, f"rttm_1.rttm")
with open(rttm_file, "w") as fout:
fout.write(f"SPEAKER <NA> 1 0 0 <NA> <NA> speech <NA> <NA>\n")
entry = {
'audio_filepath': "",
'feature_file': feat_file,
'rttm_file': rttm_file,
'duration': 100000,
"text": "a b c",
}
fp.write(json.dumps(entry) + '\n')
dataset = FeatureToBPEDataset(manifest_path, tokenizer=tokenizer, normalize=None, use_rttm=True)
cnt = 0
for item in dataset:
cnt += 1
feat = item[0]
token_len = item[3]
assert feat.shape == golden_feat_shape
assert torch.equal(token_len, torch.tensor(5))
if cnt == 1:
assert torch.equal(feat, sample)
else:
assert torch.equal(feat, masked_sample)
assert cnt == num_samples
class TestAudioDatasets:
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 2])
@pytest.mark.parametrize('num_targets', [1, 3])
def test_list_to_multichannel(self, num_channels, num_targets):
"""Test conversion of a list of arrays into
"""
random_seed = 42
num_samples = 1000
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Multi-channel signal
golden_target = _rng.normal(size=(num_channels * num_targets, num_samples))
# Create a list of num_targets signals with num_channels channels
target_list = [golden_target[n * num_channels : (n + 1) * num_channels, :] for n in range(num_targets)]
# Check the original signal is not modified
assert (ASRAudioProcessor.list_to_multichannel(golden_target) == golden_target).all()
# Check the list is converted back to the original signal
assert (ASRAudioProcessor.list_to_multichannel(target_list) == golden_target).all()
@pytest.mark.unit
def test_audio_collate_fn(self):
"""Test `_audio_collate_fn`
"""
batch_size = 16
random_seed = 42
atol = 1e-5
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
signal_to_channels = {
'input_signal': 2,
'target_signal': 1,
'reference_signal': 1,
}
signal_to_length = {
'input_signal': _rng.integers(low=5, high=25, size=batch_size),
'target_signal': _rng.integers(low=5, high=25, size=batch_size),
'reference_signal': _rng.integers(low=5, high=25, size=batch_size),
}
# Generate batch
batch = []
for n in range(batch_size):
item = dict()
for signal, num_channels in signal_to_channels.items():
random_signal = _rng.normal(size=(num_channels, signal_to_length[signal][n]))
random_signal = np.squeeze(random_signal) # get rid of channel dimention for single-channel
item[signal] = torch.tensor(random_signal)
batch.append(item)
# Run UUT
batched = _audio_collate_fn(batch)
batched_signals = {
'input_signal': batched[0].cpu().detach().numpy(),
'target_signal': batched[2].cpu().detach().numpy(),
'reference_signal': batched[4].cpu().detach().numpy(),
}
batched_lengths = {
'input_signal': batched[1].cpu().detach().numpy(),
'target_signal': batched[3].cpu().detach().numpy(),
'reference_signal': batched[5].cpu().detach().numpy(),
}
# Check outputs
for signal, b_signal in batched_signals.items():
for n in range(batch_size):
# Check length
uut_length = batched_lengths[signal][n]
golden_length = signal_to_length[signal][n]
assert (
uut_length == golden_length
), f'Example {n} signal {signal} length mismatch: batched ({uut_length}) != golden ({golden_length})'
uut_signal = b_signal[n][:uut_length, ...]
golden_signal = batch[n][signal][:uut_length, ...].cpu().detach().numpy()
assert np.allclose(
uut_signal, golden_signal, atol=atol
), f'Example {n} signal {signal} value mismatch.'
@pytest.mark.unit
def test_audio_to_target_dataset(self):
"""Test AudioWithTargetDataset in different configurations.
Test below cover the following:
1) no constraints
2) filtering based on signal duration
3) use with channel selector
4) use with fixed audio duration and random subsegments
5) collate a batch of items
In this use case, each line of the manifest file has the following format:
```
{
'input_filepath': 'path/to/input.wav',
'target_filepath': 'path/to/path_to_target.wav',
'duration': duration_of_input,
}
```
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 25
data_num_channels = {
'input_signal': 4,
'target_signal': 2,
}
data_min_duration = 2.0
data_max_duration = 8.0
data_key = {
'input_signal': 'input_filepath',
'target_signal': 'target_filepath',
}
# Tolerance
atol = 1e-6
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)
data_duration_samples = np.floor(data_duration * sample_rate).astype(int)
data = dict()
for signal, num_channels in data_num_channels.items():
data[signal] = []
for n in range(num_examples):
if num_channels == 1:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))
else:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))
data[signal].append(random_signal)
with tempfile.TemporaryDirectory() as test_dir:
# Build metadata for manifest
metadata = []
for n in range(num_examples):
meta = dict()
for signal in data:
# filenames
signal_filename = f'{signal}_{n:02d}.wav'
# write audio files
sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')
# update metadata
meta[data_key[signal]] = signal_filename
meta['duration'] = data_duration[n]
metadata.append(meta)
# Save manifest
manifest_filepath = os.path.join(test_dir, 'manifest.json')
write_manifest(manifest_filepath, metadata)
# Test 1
# - No constraints on channels or duration
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
sample_rate=sample_rate,
)
# Also test the corresponding factory
config = {
'manifest_filepath': manifest_filepath,
'input_key': data_key['input_signal'],
'target_key': data_key['target_signal'],
'sample_rate': sample_rate,
}
dataset_factory = audio_to_audio_dataset.get_audio_to_target_dataset(config)
# Test number of channels
for signal in data:
assert data_num_channels[signal] == dataset.num_channels(
signal
), f'Num channels not correct for signal {signal}'
assert data_num_channels[signal] == dataset_factory.num_channels(
signal
), f'Num channels not correct for signal {signal}'
# Test returned examples
for n in range(num_examples):
item = dataset.__getitem__(n)
item_factory = dataset_factory.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'
item_factory_signal = item_factory[signal].cpu().detach().numpy()
assert np.allclose(
item_factory_signal, golden_signal, atol=atol
), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'
# Test 2
# - Filtering based on signal duration
min_duration = 3.5
max_duration = 7.5
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
min_duration=min_duration,
max_duration=max_duration,
sample_rate=sample_rate,
)
filtered_examples = [n for n, val in enumerate(data_duration) if min_duration <= val <= max_duration]
for n in range(len(dataset)):
item = dataset.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][filtered_examples[n]]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 2: Failed for example {n}, signal {signal} (random seed {random_seed})'
# Test 3
# - Use channel selector
channel_selector = {
'input_signal': [0, 2],
'target_signal': 1,
}
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
input_channel_selector=channel_selector['input_signal'],
target_channel_selector=channel_selector['target_signal'],
sample_rate=sample_rate,
)
for n in range(len(dataset)):
item = dataset.__getitem__(n)
for signal in data:
cs = channel_selector[signal]
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n][cs, ...]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 3: Failed for example {n}, signal {signal} (random seed {random_seed})'
# Test 4
# - Use fixed duration (random segment selection)
audio_duration = 4.0
audio_duration_samples = int(np.floor(audio_duration * sample_rate))
filtered_examples = [n for n, val in enumerate(data_duration) if val >= audio_duration]
for random_offset in [True, False]:
# Test subsegments with the default fixed offset and a random offset
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
sample_rate=sample_rate,
min_duration=audio_duration,
audio_duration=audio_duration,
random_offset=random_offset, # random offset when selecting subsegment
)
for n in range(len(dataset)):
item = dataset.__getitem__(n)
golden_start = golden_end = None
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
full_golden_signal = data[signal][filtered_examples[n]]
# Find random segment using correlation on the first channel
# of the first signal, and then use it fixed for other signals
if golden_start is None:
golden_start = get_segment_start(
signal=full_golden_signal[0, :], segment=item_signal[0, :]
)
if not random_offset:
assert (
golden_start == 0
), f'Expecting the signal to start at 0 when random_offset is False'
golden_end = golden_start + audio_duration_samples
golden_signal = full_golden_signal[..., golden_start:golden_end]
# Test length is correct
assert (
item_signal.shape[-1] == audio_duration_samples
), f'Test 4: Signal length ({item_signal.shape[-1]}) not matching the expected length ({audio_duration_samples})'
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
# Test signal values
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 4: Failed for example {n}, signal {signal} (random seed {random_seed})'
# Test 5:
# - Test collate_fn
batch_size = 16
batch = [dataset.__getitem__(n) for n in range(batch_size)]
batched = dataset.collate_fn(batch)
for n, signal in enumerate(data.keys()):
signal_shape = batched[2 * n].shape
signal_len = batched[2 * n + 1]
assert signal_shape == (
batch_size,
data_num_channels[signal],
audio_duration_samples,
), f'Test 5: Unexpected signal {signal} shape {signal_shape}'
assert len(signal_len) == batch_size, f'Test 5: Unexpected length of signal_len ({len(signal_len)})'
assert all(signal_len == audio_duration_samples), f'Test 5: Unexpected signal_len {signal_len}'
@pytest.mark.unit
def test_audio_to_target_dataset_with_target_list(self):
"""Test AudioWithTargetDataset when the input manifest has a list
of audio files in the target key.
In this use case, each line of the manifest file has the following format:
```
{
'input_filepath': 'path/to/input.wav',
'target_filepath': ['path/to/path_to_target_ch0.wav', 'path/to/path_to_target_ch1.wav'],
'duration': duration_of_input,
}
```
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 25
data_num_channels = {
'input_signal': 4,
'target_signal': 2,
}
data_min_duration = 2.0
data_max_duration = 8.0
data_key = {
'input_signal': 'input_filepath',
'target_signal': 'target_filepath',
}
# Tolerance
atol = 1e-6
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)
data_duration_samples = np.floor(data_duration * sample_rate).astype(int)
data = dict()
for signal, num_channels in data_num_channels.items():
data[signal] = []
for n in range(num_examples):
if num_channels == 1:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))
else:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))
data[signal].append(random_signal)
with tempfile.TemporaryDirectory() as test_dir:
# Build metadata for manifest
metadata = []
for n in range(num_examples):
meta = dict()
for signal in data:
if signal == 'target_signal':
# Save targets as individual files
signal_filename = []
for ch in range(data_num_channels[signal]):
# add current filename
signal_filename.append(f'{signal}_{n:02d}_ch_{ch}.wav')
# write audio file
sf.write(
os.path.join(test_dir, signal_filename[-1]),
data[signal][n][ch, :],
sample_rate,
'float',
)
else:
# single file
signal_filename = f'{signal}_{n:02d}.wav'
# write audio files
sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')
# update metadata
meta[data_key[signal]] = signal_filename
meta['duration'] = data_duration[n]
metadata.append(meta)
# Save manifest
manifest_filepath = os.path.join(test_dir, 'manifest.json')
write_manifest(manifest_filepath, metadata)
# Test 1
# - No constraints on channels or duration
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
sample_rate=sample_rate,
)
config = {
'manifest_filepath': manifest_filepath,
'input_key': data_key['input_signal'],
'target_key': data_key['target_signal'],
'sample_rate': sample_rate,
}
dataset_factory = audio_to_audio_dataset.get_audio_to_target_dataset(config)
for n in range(num_examples):
item = dataset.__getitem__(n)
item_factory = dataset_factory.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'
item_factory_signal = item_factory[signal].cpu().detach().numpy()
assert np.allclose(
item_factory_signal, golden_signal, atol=atol
), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'
# Test 2
# Set target as the first channel of input_filepath and all files listed in target_filepath.
# In this case, the target will have 3 channels.
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=[data_key['input_signal'], data_key['target_signal']],
target_channel_selector=0,
sample_rate=sample_rate,
)
for n in range(num_examples):
item = dataset.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
if signal == 'target_signal':
# add the first channel of the input
golden_signal = np.concatenate([data['input_signal'][n][0:1, ...], golden_signal], axis=0)
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 2: Failed for example {n}, signal {signal} (random seed {random_seed})'
@pytest.mark.unit
def test_audio_to_target_dataset_for_inference(self):
"""Test AudioWithTargetDataset when target_key is
not set, i.e., it is `None`. This is the case, e.g., when
running inference, and a target is not available.
In this use case, each line of the manifest file has the following format:
```
{
'input_filepath': 'path/to/input.wav',
'duration': duration_of_input,
}
```
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 25
data_num_channels = {
'input_signal': 4,
}
data_min_duration = 2.0
data_max_duration = 8.0
data_key = {
'input_signal': 'input_filepath',
}
# Tolerance
atol = 1e-6
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)
data_duration_samples = np.floor(data_duration * sample_rate).astype(int)
data = dict()
for signal, num_channels in data_num_channels.items():
data[signal] = []
for n in range(num_examples):
if num_channels == 1:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))
else:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))
data[signal].append(random_signal)
with tempfile.TemporaryDirectory() as test_dir:
# Build metadata for manifest
metadata = []
for n in range(num_examples):
meta = dict()
for signal in data:
# filenames
signal_filename = f'{signal}_{n:02d}.wav'
# write audio files
sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')
# update metadata
meta[data_key[signal]] = signal_filename
meta['duration'] = data_duration[n]
metadata.append(meta)
# Save manifest
manifest_filepath = os.path.join(test_dir, 'manifest.json')
write_manifest(manifest_filepath, metadata)
# Test 1
# - No constraints on channels or duration
dataset = AudioToTargetDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=None, # target_signal will be empty
sample_rate=sample_rate,
)
# Also test the corresponding factory
config = {
'manifest_filepath': manifest_filepath,
'input_key': data_key['input_signal'],
'target_key': None,
'sample_rate': sample_rate,
}
dataset_factory = audio_to_audio_dataset.get_audio_to_target_dataset(config)
for n in range(num_examples):
item = dataset.__getitem__(n)
item_factory = dataset_factory.__getitem__(n)
# Check target is None
assert item['target_signal'].numel() == 0, 'target_signal is expected to be empty.'
assert item_factory['target_signal'].numel() == 0, 'target_signal is expected to be empty.'
# Check valid signals
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'
item_factory_signal = item_factory[signal].cpu().detach().numpy()
assert np.allclose(
item_factory_signal, golden_signal, atol=atol
), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'
@pytest.mark.unit
def test_audio_to_target_with_reference_dataset(self):
"""Test AudioWithTargetWithReferenceDataset in different configurations.
1) reference synchronized with input and target
2) reference not synchronized
In this use case, each line of the manifest file has the following format:
```
{
'input_filepath': 'path/to/input.wav',
'target_filepath': 'path/to/path_to_target.wav',
'reference_filepath': 'path/to/path_to_reference.wav',
'duration': duration_of_input,
}
```
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 25
data_num_channels = {
'input_signal': 4,
'target_signal': 2,
'reference_signal': 1,
}
data_min_duration = 2.0
data_max_duration = 8.0
data_key = {
'input_signal': 'input_filepath',
'target_signal': 'target_filepath',
'reference_signal': 'reference_filepath',
}
# Tolerance
atol = 1e-6
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)
data_duration_samples = np.floor(data_duration * sample_rate).astype(int)
data = dict()
for signal, num_channels in data_num_channels.items():
data[signal] = []
for n in range(num_examples):
if num_channels == 1:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples[n]))
else:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_duration_samples[n]))
data[signal].append(random_signal)
with tempfile.TemporaryDirectory() as test_dir:
# Build metadata for manifest
metadata = []
for n in range(num_examples):
meta = dict()
for signal in data:
# filenames
signal_filename = f'{signal}_{n:02d}.wav'
# write audio files
sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')
# update metadata
meta[data_key[signal]] = signal_filename
meta['duration'] = data_duration[n]
metadata.append(meta)
# Save manifest
manifest_filepath = os.path.join(test_dir, 'manifest.json')
write_manifest(manifest_filepath, metadata)
# Test 1
# - No constraints on channels or duration
# - Reference is not synchronized with input and target, so whole reference signal will be loaded
dataset = AudioToTargetWithReferenceDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
reference_key=data_key['reference_signal'],
reference_is_synchronized=False,
sample_rate=sample_rate,
)
# Also test the corresponding factory
config = {
'manifest_filepath': manifest_filepath,
'input_key': data_key['input_signal'],
'target_key': data_key['target_signal'],
'reference_key': data_key['reference_signal'],
'reference_is_synchronized': False,
'sample_rate': sample_rate,
}
dataset_factory = audio_to_audio_dataset.get_audio_to_target_with_reference_dataset(config)
for n in range(num_examples):
item = dataset.__getitem__(n)
item_factory = dataset_factory.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'
item_factory_signal = item_factory[signal].cpu().detach().numpy()
assert np.allclose(
item_factory_signal, golden_signal, atol=atol
), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'
# Test 2
# - Use fixed duration (random segment selection)
# - Reference is synchronized with input and target, so the same segment of reference signal will be loaded
audio_duration = 4.0
audio_duration_samples = int(np.floor(audio_duration * sample_rate))
dataset = AudioToTargetWithReferenceDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
reference_key=data_key['reference_signal'],
reference_is_synchronized=True,
sample_rate=sample_rate,
min_duration=audio_duration,
audio_duration=audio_duration,
random_offset=True,
)
filtered_examples = [n for n, val in enumerate(data_duration) if val >= audio_duration]
for n in range(len(dataset)):
item = dataset.__getitem__(n)
golden_start = golden_end = None
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
full_golden_signal = data[signal][filtered_examples[n]]
# Find random segment using correlation on the first channel
# of the first signal, and then use it fixed for other signals
if golden_start is None:
golden_start = get_segment_start(signal=full_golden_signal[0, :], segment=item_signal[0, :])
golden_end = golden_start + audio_duration_samples
golden_signal = full_golden_signal[..., golden_start:golden_end]
# Test length is correct
assert (
item_signal.shape[-1] == audio_duration_samples
), f'Test 2: Signal {signal} length ({item_signal.shape[-1]}) not matching the expected length ({audio_duration_samples})'
# Test signal values
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 2: Failed for example {n}, signal {signal} (random seed {random_seed})'
# Test 3
# - Use fixed duration (random segment selection)
# - Reference is not synchronized with input and target, so whole reference signal will be loaded
audio_duration = 4.0
audio_duration_samples = int(np.floor(audio_duration * sample_rate))
dataset = AudioToTargetWithReferenceDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
reference_key=data_key['reference_signal'],
reference_is_synchronized=False,
sample_rate=sample_rate,
min_duration=audio_duration,
audio_duration=audio_duration,
random_offset=True,
)
filtered_examples = [n for n, val in enumerate(data_duration) if val >= audio_duration]
for n in range(len(dataset)):
item = dataset.__getitem__(n)
golden_start = golden_end = None
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
full_golden_signal = data[signal][filtered_examples[n]]
if signal == 'reference_signal':
# Complete signal is loaded for reference
golden_signal = full_golden_signal
else:
# Find random segment using correlation on the first channel
# of the first signal, and then use it fixed for other signals
if golden_start is None:
golden_start = get_segment_start(
signal=full_golden_signal[0, :], segment=item_signal[0, :]
)
golden_end = golden_start + audio_duration_samples
golden_signal = full_golden_signal[..., golden_start:golden_end]
# Test length is correct
assert (
item_signal.shape[-1] == audio_duration_samples
), f'Test 3: Signal {signal} length ({item_signal.shape[-1]}) not matching the expected length ({audio_duration_samples})'
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
# Test signal values
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 3: Failed for example {n}, signal {signal} (random seed {random_seed})'
# Test 4:
# - Test collate_fn
batch_size = 16
batch = [dataset.__getitem__(n) for n in range(batch_size)]
_ = dataset.collate_fn(batch)
@pytest.mark.unit
def test_audio_to_target_with_embedding_dataset(self):
"""Test AudioWithTargetWithEmbeddingDataset.
In this use case, each line of the manifest file has the following format:
```
{
'input_filepath': 'path/to/input.wav',
'target_filepath': 'path/to/path_to_target.wav',
'embedding_filepath': 'path/to/path_to_embedding.npy',
'duration': duration_of_input,
}
```
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 25
data_num_channels = {
'input_signal': 4,
'target_signal': 2,
'embedding_vector': 1,
}
data_min_duration = 2.0
data_max_duration = 8.0
embedding_length = 64 # 64-dimensional embedding vector
data_key = {
'input_signal': 'input_filepath',
'target_signal': 'target_filepath',
'embedding_vector': 'embedding_filepath',
}
# Tolerance
atol = 1e-6
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration = np.round(_rng.uniform(low=data_min_duration, high=data_max_duration, size=num_examples), 3)
data_duration_samples = np.floor(data_duration * sample_rate).astype(int)
data = dict()
for signal, num_channels in data_num_channels.items():
data[signal] = []
for n in range(num_examples):
data_length = embedding_length if signal == 'embedding_vector' else data_duration_samples[n]
if num_channels == 1:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(data_length))
else:
random_signal = _rng.uniform(low=-0.5, high=0.5, size=(num_channels, data_length))
data[signal].append(random_signal)
with tempfile.TemporaryDirectory() as test_dir:
# Build metadata for manifest
metadata = []
for n in range(num_examples):
meta = dict()
for signal in data:
if signal == 'embedding_vector':
signal_filename = f'{signal}_{n:02d}.npy'
np.save(os.path.join(test_dir, signal_filename), data[signal][n])
else:
# filenames
signal_filename = f'{signal}_{n:02d}.wav'
# write audio files
sf.write(os.path.join(test_dir, signal_filename), data[signal][n].T, sample_rate, 'float')
# update metadata
meta[data_key[signal]] = signal_filename
meta['duration'] = data_duration[n]
metadata.append(meta)
# Save manifest
manifest_filepath = os.path.join(test_dir, 'manifest.json')
write_manifest(manifest_filepath, metadata)
# Test 1
# - No constraints on channels or duration
dataset = AudioToTargetWithEmbeddingDataset(
manifest_filepath=manifest_filepath,
input_key=data_key['input_signal'],
target_key=data_key['target_signal'],
embedding_key=data_key['embedding_vector'],
sample_rate=sample_rate,
)
# Also test the corresponding factory
config = {
'manifest_filepath': manifest_filepath,
'input_key': data_key['input_signal'],
'target_key': data_key['target_signal'],
'embedding_key': data_key['embedding_vector'],
'sample_rate': sample_rate,
}
dataset_factory = audio_to_audio_dataset.get_audio_to_target_with_embedding_dataset(config)
for n in range(num_examples):
item = dataset.__getitem__(n)
item_factory = dataset_factory.__getitem__(n)
for signal in data:
item_signal = item[signal].cpu().detach().numpy()
golden_signal = data[signal][n]
assert (
item_signal.shape == golden_signal.shape
), f'Signal {signal}: item shape {item_signal.shape} not matching reference shape {golden_signal.shape}'
assert np.allclose(
item_signal, golden_signal, atol=atol
), f'Test 1: Failed for example {n}, signal {signal} (random seed {random_seed})'
item_factory_signal = item_factory[signal].cpu().detach().numpy()
assert np.allclose(
item_factory_signal, golden_signal, atol=atol
), f'Test 1: Failed for factory example {n}, signal {signal} (random seed {random_seed})'
# Test 2:
# - Test collate_fn
batch_size = 16
batch = [dataset.__getitem__(n) for n in range(batch_size)]
_ = dataset.collate_fn(batch)
class TestUtilityFunctions:
@pytest.mark.unit
@pytest.mark.parametrize('cache_audio', [False, True])
def test_cache_datastore_manifests(self, cache_audio: bool):
"""Test caching of manifest and audio files.
"""
# Data setup
random_seed = 42
sample_rate = 16000
num_examples = 10
num_manifests = 2
data_duration = 1.0
# Generate random signals
_rng = np.random.default_rng(seed=random_seed)
# Input and target signals have the same duration
data_duration_samples = int(data_duration * sample_rate)
with tempfile.TemporaryDirectory() as test_dir:
test_store_dir = os.path.join(test_dir, 'store')
os.mkdir(test_store_dir)
# Prepare metadata and audio files
manifest_filepaths = []
audio_files = []
for m in range(num_manifests):
manifest_dir = os.path.join(test_store_dir, f'manifest_{m}')
os.mkdir(manifest_dir)
manifest_filepath = os.path.join(manifest_dir, 'manifest.json')
metadata = []
data = _rng.uniform(low=-0.5, high=0.5, size=(data_duration_samples, num_examples))
for n in range(num_examples):
audio_filepath = f'manifest_{m}_audio_{n:02d}.wav'
audio_file = os.path.join(manifest_dir, audio_filepath)
# Write audio file
sf.write(audio_file, data[:, n], sample_rate, 'float')
# Update metadata
metadata.append(
{
'audio_filepath': audio_filepath,
'duration': data_duration,
'text': f'text for example {n:02d}',
}
)
# Update audio files
audio_files.append(audio_file)
# Save manifest
write_manifest(manifest_filepath, metadata)
manifest_filepaths.append(manifest_filepath)
# Cache location
test_cache_dir = os.path.join(test_dir, 'cache')
# Instead of using AIS, copy object from store dir to cache dir
def fake_get(self):
# Object path relative to store path
object_path = os.path.relpath(self.store_path, start=test_store_dir)
# Copy to fake local path
self._local_path = os.path.join(test_cache_dir, object_path)
os.makedirs(os.path.dirname(self.local_path), exist_ok=True)
shutil.copy(self.store_path, self.local_path)
# Return path as in the original get
return self.local_path
with mock.patch(
'nemo.collections.asr.data.audio_to_text.is_datastore_path', lambda x: True
), mock.patch.object(DataStoreObject, 'get', fake_get):
# Use a single worker for this test to avoid failure with mock & multiprocessing (#5607)
cache_datastore_manifests(manifest_filepaths, cache_audio=cache_audio, num_workers=1)
# Manifests need to be compared
store_files_to_compare = manifest_filepaths
if cache_audio:
# Audio needs to be compared
store_files_to_compare += audio_files
# Compare files
for f_store in store_files_to_compare:
f_cache = os.path.join(test_cache_dir, os.path.relpath(f_store, test_store_dir))
assert filecmp.cmp(f_store, f_cache, shallow=False), f'Files {f_store} and {f_cache} do not match.'
| NeMo-main | tests/collections/asr/test_asr_datasets.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.models import EncDecRNNTModel
from nemo.collections.asr.modules import HATJoint, RNNTDecoder, RNNTJoint, SampledRNNTJoint, StatelessTransducerDecoder
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils.config_utils import assert_dataclass_signature_match
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def asr_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
# fmt: off
labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', "'",
]
# fmt: on
model_defaults = {'enc_hidden': 1024, 'pred_hidden': 64}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu'},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
modelConfig = DictConfig(
{
'labels': ListConfig(labels),
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
}
)
model_instance = EncDecRNNTModel(cfg=modelConfig)
return model_instance
class TestEncDecRNNTModel:
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_constructor(self, asr_model):
asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecRNNTModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecRNNTModel)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_forward(self, asr_model):
asr_model = asr_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
asr_model.compute_eval_loss = False
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _ = asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
logprobs_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _ = asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logprobs_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_vocab_change(self, asr_model):
old_vocab = copy.deepcopy(asr_model.joint.vocabulary)
nw1 = asr_model.num_weights
asr_model.change_vocabulary(new_vocabulary=old_vocab)
# No change
assert nw1 == asr_model.num_weights
new_vocab = copy.deepcopy(old_vocab)
new_vocab.append('!')
new_vocab.append('$')
new_vocab.append('@')
asr_model.change_vocabulary(new_vocabulary=new_vocab)
# fully connected + bias
# rnn embedding + joint + bias
pred_embedding = 3 * (asr_model.decoder.pred_hidden)
joint_joint = 3 * (asr_model.joint.joint_hidden + 1)
assert asr_model.num_weights == (nw1 + (pred_embedding + joint_joint))
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_change_conv_asr_se_context_window(self, asr_model):
old_cfg = copy.deepcopy(asr_model.cfg)
asr_model.change_conv_asr_se_context_window(context_window=32) # 32 * 0.01s context
new_config = asr_model.cfg
assert old_cfg.encoder.jasper[0].se_context_size == -1
assert new_config.encoder.jasper[0].se_context_size == 32
for name, m in asr_model.encoder.named_modules():
if type(m).__class__.__name__ == 'SqueezeExcite':
assert m.context_window == 32
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_change_conv_asr_se_context_window_no_config_update(self, asr_model):
old_cfg = copy.deepcopy(asr_model.cfg)
asr_model.change_conv_asr_se_context_window(context_window=32, update_config=False) # 32 * 0.01s context
new_config = asr_model.cfg
assert old_cfg.encoder.jasper[0].se_context_size == -1
assert new_config.encoder.jasper[0].se_context_size == -1 # no change
for name, m in asr_model.encoder.named_modules():
if type(m).__class__.__name__ == 'SqueezeExcite':
assert m.context_window == 32
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_decoding_change(self, asr_model):
assert isinstance(asr_model.decoding.decoding, greedy_decode.GreedyBatchedRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'greedy'
new_strategy.greedy = DictConfig({'max_symbols': 10})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, greedy_decode.GreedyRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 1})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'tsd'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "tsd"
new_strategy = DictConfig({})
new_strategy.strategy = 'alsd'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "alsd"
@pytest.mark.unit
def test_GreedyRNNTInferConfig(self):
# confidence_method_cfg is deprecated
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index', 'confidence_method_cfg']
result = assert_dataclass_signature_match(
greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_GreedyBatchedRNNTInferConfig(self):
# confidence_method_cfg is deprecated
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index', 'confidence_method_cfg']
result = assert_dataclass_signature_match(
greedy_decode.GreedyBatchedRNNTInfer, greedy_decode.GreedyBatchedRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_BeamRNNTInferConfig(self):
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index']
result = assert_dataclass_signature_match(
beam_decode.BeamRNNTInfer, beam_decode.BeamRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyMultiblankRNNTInfer, greedy_decode.GreedyBatchedMultiblankRNNTInfer],
)
def test_multiblank_rnnt_greedy_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
big_blank_durations = [2, 4]
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(
jointnet_cfg, vocab_size, vocabulary=token_list, num_extra_outputs=len(big_blank_durations)
)
greedy = greedy_class(
decoder,
joint_net,
blank_index=len(token_list),
big_blank_durations=big_blank_durations,
max_symbols_per_step=5,
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyMultiblankRNNTInfer, greedy_decode.GreedyBatchedMultiblankRNNTInfer],
)
def test_multiblank_rnnt_greedy_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
big_blank_durations = [2, 4]
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(
jointnet_cfg, vocab_size, vocabulary=token_list, num_extra_outputs=len(big_blank_durations)
)
greedy = greedy_class(
decoder,
joint_net,
blank_index=len(token_list),
big_blank_durations=big_blank_durations,
max_symbols_per_step=5,
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyMultiblankRNNTInfer, greedy_decode.GreedyBatchedMultiblankRNNTInfer],
)
def test_multiblank_rnnt_greedy_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
big_blank_durations = [2, 4]
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(
jointnet_cfg, vocab_size, vocabulary=token_list, num_extra_outputs=len(big_blank_durations)
)
greedy = greedy_class(
decoder,
joint_net,
blank_index=len(token_list),
big_blank_durations=big_blank_durations,
max_symbols_per_step=5,
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer],
)
def test_greedy_multi_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
(partial_hyp) = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
partial_hyp = partial_hyp[0]
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len, partial_hypotheses=partial_hyp)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_stateless_decoder(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = StatelessTransducerDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer],
)
def test_greedy_multi_decoding_stateless_decoder(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = StatelessTransducerDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
(partial_hyp) = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
partial_hyp = partial_hyp[0]
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len, partial_hypotheses=partial_hyp)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_preserve_alignment(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(
decoder, joint_net, blank_index=len(token_list) - 1, preserve_alignments=True, max_symbols_per_step=5
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
hyp = greedy(encoder_output=enc_out, encoded_lengths=enc_len)[0][0] # type: rnnt_utils.Hypothesis
assert hyp.alignments is not None
for t in range(len(hyp.alignments)):
for u in range(len(hyp.alignments[t])):
logp, label = hyp.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[
{"search_type": "greedy"},
{"search_type": "default", "score_norm": False, "return_best_hypothesis": False},
{"search_type": "alsd", "alsd_max_target_len": 20, "return_best_hypothesis": False},
{"search_type": "tsd", "tsd_max_sym_exp_per_step": 3, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 2, "maes_expansion_beta": 2, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 3, "maes_expansion_beta": 1, "return_best_hypothesis": False},
],
)
def test_beam_decoding(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(decoder, joint_net, beam_size=beam_size, **beam_config,)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = beam(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[{"search_type": "greedy"}, {"search_type": "default", "score_norm": False, "return_best_hypothesis": False},],
)
def test_beam_decoding_preserve_alignments(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
for joint_type in [RNNTJoint, HATJoint]:
joint_net = joint_type(jointnet_cfg, vocab_size, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(
decoder, joint_net, beam_size=beam_size, **beam_config, preserve_alignments=True
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
hyp = beam(encoder_output=enc_out, encoded_lengths=enc_len)[0][0] # type: rnnt_utils.Hypothesis
if isinstance(hyp, rnnt_utils.NBestHypotheses):
hyp = hyp.n_best_hypotheses[0] # select top hypothesis only
assert hyp.alignments is not None
for t in range(len(hyp.alignments)):
for u in range(len(hyp.alignments[t])):
logp, label = hyp.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_SampledRNNTJoint(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = SampledRNNTJoint(jointnet_cfg, vocab_size, n_samples=2, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[
{"search_type": "greedy"},
{"search_type": "default", "score_norm": False, "return_best_hypothesis": False},
{"search_type": "alsd", "alsd_max_target_len": 20, "return_best_hypothesis": False},
{"search_type": "tsd", "tsd_max_sym_exp_per_step": 3, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 2, "maes_expansion_beta": 2, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 3, "maes_expansion_beta": 1, "return_best_hypothesis": False},
],
)
def test_beam_decoding_SampledRNNTJoint(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = SampledRNNTJoint(jointnet_cfg, vocab_size, n_samples=2, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(decoder, joint_net, beam_size=beam_size, **beam_config,)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = beam(encoder_output=enc_out, encoded_lengths=enc_len)
| NeMo-main | tests/collections/asr/test_asr_rnnt_encdec_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import pytest
import torch
import torch.nn as nn
from nemo.collections.asr.parts.submodules.batchnorm import (
FusedBatchNorm1d,
replace_bn_with_fused_bn,
replace_bn_with_fused_bn_all,
)
class TestFusedBatchNorm1d:
@pytest.mark.unit
def test_constructor(self):
num_features = 10
fused_bn = FusedBatchNorm1d(num_features=num_features)
assert fused_bn.weight.shape[0] == num_features
assert fused_bn.bias.shape[0] == num_features
# check initialization: weight is ones, bias is zeros (identity)
assert torch.allclose(fused_bn.weight, torch.ones(num_features))
assert torch.allclose(fused_bn.bias, torch.zeros(num_features))
@pytest.mark.unit
def test_from_batchnorm(self):
num_features = 10
# construct batchnorm
bn = nn.BatchNorm1d(num_features=num_features)
# update bn stats
bn.train()
batch_size = 4
for _ in range(10):
_ = bn(torch.rand(batch_size, num_features))
# test eval mode is equivalent
fused_bn = FusedBatchNorm1d.from_batchnorm(bn)
bn.eval()
sample_2d = torch.rand(batch_size, num_features)
assert torch.allclose(bn(sample_2d), fused_bn(sample_2d))
sample_3d = torch.rand(batch_size, num_features, 5)
assert torch.allclose(bn(sample_3d), fused_bn(sample_3d))
class TestReplaceBNWithFusedBN:
@pytest.mark.unit
def test_replace_bn_with_fused_bn(self):
model = nn.Sequential(
OrderedDict(
[
("linear1", nn.Linear(1, 10)),
("bn1", nn.BatchNorm1d(10)),
("relu1", nn.ReLU()),
("linear2", nn.Linear(10, 11)),
("bn2", nn.BatchNorm1d(11)),
(
"submodule1",
nn.Sequential(OrderedDict([("linear3", nn.Linear(11, 12)), ("bn3", nn.BatchNorm1d(12))])),
),
]
)
)
replace_bn_with_fused_bn(model, "submodule1.bn3")
assert isinstance(model.bn1, nn.BatchNorm1d)
assert isinstance(model.bn2, nn.BatchNorm1d)
assert isinstance(model.submodule1.bn3, FusedBatchNorm1d)
@pytest.mark.unit
def test_replace_bn_with_fused_bn_all(self):
model = nn.Sequential(
OrderedDict(
[
("linear1", nn.Linear(1, 10)),
("bn1", nn.BatchNorm1d(10)),
("relu1", nn.ReLU()),
("linear2", nn.Linear(10, 11)),
("bn2", nn.BatchNorm1d(11)),
(
"submodule1",
nn.Sequential(OrderedDict([("linear3", nn.Linear(11, 12)), ("bn3", nn.BatchNorm1d(12))])),
),
]
)
)
replace_bn_with_fused_bn_all(model)
assert isinstance(model.bn1, FusedBatchNorm1d)
assert isinstance(model.bn2, FusedBatchNorm1d)
assert isinstance(model.submodule1.bn3, FusedBatchNorm1d)
| NeMo-main | tests/collections/asr/test_asr_parts_submodules_batchnorm.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import torch
from scipy.optimize import linear_sum_assignment as scipy_linear_sum_assignment
from nemo.collections.asr.data.audio_to_label import repeat_signal
from nemo.collections.asr.parts.utils.offline_clustering import (
SpeakerClustering,
get_scale_interpolated_embs,
getCosAffinityMatrix,
getKneighborsConnections,
split_input_data,
)
from nemo.collections.asr.parts.utils.online_clustering import (
OnlineSpeakerClustering,
get_closest_embeddings,
get_merge_quantity,
get_minimal_indices,
merge_vectors,
run_reducer,
stitch_cluster_labels,
)
from nemo.collections.asr.parts.utils.optimization_utils import LinearSumAssignmentSolver
from nemo.collections.asr.parts.utils.optimization_utils import linear_sum_assignment as nemo_linear_sum_assignment
from nemo.collections.asr.parts.utils.speaker_utils import (
OnlineSegmentor,
check_ranges,
fl2int,
get_new_cursor_for_update,
get_online_segments_from_slices,
get_online_subsegments_from_buffer,
get_speech_labels_for_update,
get_sub_range_list,
get_subsegments,
get_target_sig,
int2fl,
is_overlap,
merge_float_intervals,
merge_int_intervals,
tensor_to_list,
)
def check_range_values(target, source):
bool_list = []
for tgt, src in zip(target, source):
for x, y in zip(src, tgt):
bool_list.append(abs(x - y) < 1e-6)
return all(bool_list)
def check_labels(target, source):
bool_list = []
for x, y in zip(target, source):
bool_list.append(abs(x - y) < 1e-6)
return all(bool_list)
def matrix(mat, use_tensor=True, dtype=torch.long):
if use_tensor:
mat = torch.Tensor(mat).to(dtype)
else:
mat = np.array(mat)
return mat
def generate_orthogonal_embs(total_spks, perturb_sigma, emb_dim):
"""Generate a set of artificial orthogonal embedding vectors from random numbers
"""
gaus = torch.randn(emb_dim, emb_dim)
_svd = torch.linalg.svd(gaus)
orth = _svd[0] @ _svd[2]
orth_embs = orth[:total_spks]
# Assert orthogonality
assert torch.abs(getCosAffinityMatrix(orth_embs) - torch.diag(torch.ones(total_spks))).sum() < 1e-4
return orth_embs
def generate_toy_data(
n_spks=2,
spk_dur=3,
emb_dim=192,
perturb_sigma=0.0,
ms_window=[1.5, 1.0, 0.5],
ms_shift=[0.75, 0.5, 0.25],
torch_seed=0,
):
torch.manual_seed(torch_seed)
spk_timestamps = [(spk_dur * k, spk_dur) for k in range(n_spks)]
emb_list, seg_list = [], []
multiscale_segment_counts = [0 for _ in range(len(ms_window))]
ground_truth = []
random_orthogonal_embs = generate_orthogonal_embs(n_spks, perturb_sigma, emb_dim)
for scale_idx, (window, shift) in enumerate(zip(ms_window, ms_shift)):
for spk_idx, (offset, dur) in enumerate(spk_timestamps):
segments_stt_dur = get_subsegments(offset=offset, window=window, shift=shift, duration=dur)
segments = [[x[0], x[0] + x[1]] for x in segments_stt_dur]
emb_cent = random_orthogonal_embs[spk_idx, :]
emb = emb_cent.tile((len(segments), 1)) + 0.1 * torch.rand(len(segments), emb_dim)
seg_list.extend(segments)
emb_list.append(emb)
multiscale_segment_counts[scale_idx] += emb.shape[0]
if scale_idx == len(multiscale_segment_counts) - 1:
ground_truth.extend([spk_idx] * emb.shape[0])
emb_tensor = torch.concat(emb_list)
multiscale_segment_counts = torch.tensor(multiscale_segment_counts)
segm_tensor = torch.tensor(seg_list)
multiscale_weights = torch.ones(len(ms_window)).unsqueeze(0)
ground_truth = torch.tensor(ground_truth)
return emb_tensor, segm_tensor, multiscale_segment_counts, multiscale_weights, spk_timestamps, ground_truth
class TestDiarizationSequneceUtilFunctions:
"""Tests diarization and speaker-task related utils.
"""
@pytest.mark.unit
@pytest.mark.parametrize("Y", [[3, 3, 3, 4, 4, 5], [100, 100, 100, 104, 104, 1005]])
@pytest.mark.parametrize("target", [[0, 0, 0, 1, 1, 2]])
@pytest.mark.parametrize("offset", [1, 10])
def test_minimal_index_ex2(self, Y, target, offset):
Y = torch.tensor(Y)
target = torch.tensor(target)
min_Y = get_minimal_indices(Y)
assert check_labels(target, min_Y)
min_Y = get_minimal_indices(Y + offset)
assert check_labels(target, min_Y)
@pytest.mark.parametrize("Y", [[4, 0, 0, 5, 4, 5], [14, 12, 12, 19, 14, 19]])
@pytest.mark.parametrize("target", [[1, 0, 0, 2, 1, 2]])
@pytest.mark.parametrize("offset", [1, 10])
def test_minimal_index_ex2(self, Y, target, offset):
Y = torch.tensor(Y)
target = torch.tensor(target)
min_Y = get_minimal_indices(Y)
assert check_labels(target, min_Y)
min_Y = get_minimal_indices(Y + offset)
assert check_labels(target, min_Y)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_minimal_index_same(self, N):
Y = matrix([0] * N + [1] * N + [2] * N)
min_Y = get_minimal_indices(Y)
target = matrix([0] * N + [1] * N + [2] * N)
assert check_labels(target, min_Y)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_stitch_cluster_labels_label_switch(self, N):
Y_old = matrix([0] * N)
Y_new = matrix([0] * N) + 1
target = matrix([0] * N)
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_stitch_cluster_labels_label_many_to_one(self, N):
Y_old = matrix(np.arange(N).tolist())
Y_new = matrix([0] * N)
target = matrix([0] * N)
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_stitch_cluster_labels_label_one_to_many(self, N):
Y_old = matrix(np.arange(N).tolist())
Y_new = matrix([k for k in range(N)])
target = matrix([k for k in range(N)])
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_stitch_cluster_labels_one_label_replaced(self, N):
Y_old = matrix([0] * N + [1] * N + [2] * N)
Y_new = matrix([1] * N + [2] * N + [3] * N)
target = matrix([0] * N + [1] * N + [2] * N)
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 4, 16, 64])
def test_stitch_cluster_labels_confusion_error(self, N):
Y_old = matrix([0] * N + [1] * (N - 1) + [2] * (N + 1))
Y_new = matrix([1] * N + [2] * N + [3] * N)
target = matrix([0] * N + [1] * N + [2] * N)
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 256])
def test_stitch_cluster_labels_speaker_more_speakers(self, N):
Y_old = matrix([0] * N + [1] * (N - 1) + [2] * (N + 1) + [0, 0, 0])
Y_new = matrix([1] * N + [0] * N + [2] * N + [4, 5, 6])
target = matrix([0] * N + [1] * N + [2] * N + [3, 4, 5])
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("N", [2, 256])
def test_stitch_cluster_labels_speaker_longer_sequence(self, N):
Y_old = matrix([0] * N + [1] * N + [2] * N + [0, 0, 0] * N)
Y_new = matrix([1] * N + [2] * N + [0] * N + [1, 2, 3, 1, 2, 3] * N)
target = matrix([0] * N + [1] * N + [2] * N + [0, 1, 3, 0, 1, 3] * N)
result = stitch_cluster_labels(Y_old, Y_new)
assert check_labels(target, result)
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [2, 3, 4, 5])
@pytest.mark.parametrize("merge_quantity", [2, 3])
def test_embedding_merger(self, n_spks, merge_quantity):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(n_spks, spk_dur=5, perturb_sigma=10)
em_s, ts_s = split_input_data(em, ts, mc)
target_speaker_index = 0
pre_clus_labels = gt
ndx = torch.where(pre_clus_labels == target_speaker_index)[0]
pre_embs = em_s[-1]
affinity_mat = getCosAffinityMatrix(pre_embs)
cmat = affinity_mat[:, ndx][ndx, :]
# Check the dimension of the selected affinity values
assert cmat.shape[0] == cmat.shape[1] == torch.sum(pre_clus_labels == target_speaker_index).item()
index_2d, rest_inds = get_closest_embeddings(cmat, merge_quantity)
# Check the most closest affinity value
assert torch.max(cmat.sum(0)) == cmat.sum(0)[index_2d[0]]
spk_cluster_labels, emb_ndx = pre_clus_labels[ndx], pre_embs[ndx]
merged_embs, merged_clus_labels = merge_vectors(index_2d, emb_ndx, spk_cluster_labels)
# Check the number of merged embeddings and labels
assert (torch.sum(gt == target_speaker_index).item() - merge_quantity) == merged_clus_labels.shape[0]
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1, 8])
@pytest.mark.parametrize("spk_dur", [0.2, 0.25, 0.5, 1, 10])
def test_cosine_affinity_calculation(self, n_spks, spk_dur):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(n_spks=n_spks, spk_dur=spk_dur)
em_s, ts_s = split_input_data(em, ts, mc)
affinity_mat = getCosAffinityMatrix(em_s[-1])
# affinity_mat should not contain any nan element
assert torch.any(torch.isnan(affinity_mat)) == False
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1, 8])
@pytest.mark.parametrize("spk_dur", [0.2, 0.25, 0.5, 1, 10])
def test_cosine_affinity_calculation_scale_interpol(self, n_spks, spk_dur):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(n_spks=n_spks, spk_dur=spk_dur)
em_s, ts_s = split_input_data(em, ts, mc)
embs, _ = get_scale_interpolated_embs(mw, em_s, ts_s)
affinity_mat = getCosAffinityMatrix(embs)
# affinity_mat should not contain any nan element
assert torch.any(torch.isnan(affinity_mat)) == False
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [4, 5, 6])
@pytest.mark.parametrize("target_speaker_index", [0, 1, 2])
@pytest.mark.parametrize("merge_quantity", [2, 3])
def test_embedding_reducer(self, n_spks, target_speaker_index, merge_quantity):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(n_spks=n_spks, spk_dur=10)
em_s, ts_s = split_input_data(em, ts, mc)
merged_embs, merged_clus_labels, _ = run_reducer(
pre_embs=em_s[-1], target_spk_idx=target_speaker_index, merge_quantity=merge_quantity, pre_clus_labels=gt,
)
assert (torch.sum(gt == target_speaker_index).item() - merge_quantity) == merged_clus_labels.shape[0]
@pytest.mark.unit
@pytest.mark.parametrize("ntbr", [3])
@pytest.mark.parametrize("pcl", [torch.tensor([0] * 70 + [1] * 32)])
@pytest.mark.parametrize("mspb", [25])
def test_merge_scheduler_2clus(self, ntbr, pcl, mspb):
class_target_vol = get_merge_quantity(num_to_be_removed=ntbr, pre_clus_labels=pcl, min_count_per_cluster=mspb,)
assert all(class_target_vol == torch.tensor([3, 0]))
@pytest.mark.unit
@pytest.mark.parametrize("ntbr", [3])
@pytest.mark.parametrize("pcl", [torch.tensor([0] * 80 + [1] * 35 + [2] * 32)])
@pytest.mark.parametrize("mspb", [0, 25])
def test_merge_scheduler_3clus(self, ntbr, pcl, mspb):
class_target_vol = get_merge_quantity(num_to_be_removed=ntbr, pre_clus_labels=pcl, min_count_per_cluster=mspb,)
assert all(class_target_vol == torch.tensor([3, 0, 0]))
@pytest.mark.unit
@pytest.mark.parametrize("ntbr", [132 - 45])
@pytest.mark.parametrize("pcl", [torch.tensor([2] * 70 + [0] * 32 + [1] * 27 + [3] * 3)])
@pytest.mark.parametrize("mspb", [3, 10])
def test_merge_scheduler_4clus_shuff(self, ntbr, pcl, mspb):
class_target_vol = get_merge_quantity(num_to_be_removed=ntbr, pre_clus_labels=pcl, min_count_per_cluster=mspb,)
assert all(class_target_vol == torch.tensor([18, 13, 56, 0]))
@pytest.mark.unit
@pytest.mark.parametrize("ntbr", [3])
@pytest.mark.parametrize("pcl", [torch.tensor([0] * 5 + [1] * 4 + [2] * 3)])
@pytest.mark.parametrize("mspb", [0, 2])
def test_merge_scheduler_3clus(self, ntbr, pcl, mspb):
class_target_vol = get_merge_quantity(num_to_be_removed=ntbr, pre_clus_labels=pcl, min_count_per_cluster=mspb,)
assert all(class_target_vol == torch.tensor([2, 1, 0]))
@pytest.mark.unit
@pytest.mark.parametrize("ntbr", [2])
@pytest.mark.parametrize("pcl", [torch.tensor([0] * 7 + [1] * 5 + [2] * 3 + [3] * 5)])
@pytest.mark.parametrize("mspb", [2])
def test_merge_scheduler_3clus_repeat(self, ntbr, pcl, mspb):
class_target_vol = get_merge_quantity(num_to_be_removed=ntbr, pre_clus_labels=pcl, min_count_per_cluster=mspb,)
assert all(class_target_vol == torch.tensor([2, 0, 0, 0]))
class TestClassExport:
@pytest.mark.unit
def test_online_segmentor_class_export(self):
_OnlineSegmentor = torch.jit.script(OnlineSegmentor)
online_segmentor = _OnlineSegmentor(sample_rate=16000)
assert isinstance(online_segmentor, OnlineSegmentor)
@pytest.mark.unit
def test_online_segmentor_instance_export(self):
online_segmentor = OnlineSegmentor(sample_rate=16000)
online_segmentor = torch.jit.script(online_segmentor)
isinstance(online_segmentor, torch.jit._script.RecursiveScriptClass)
@pytest.mark.unit
def test_online_speaker_clustering_instance_export(self):
online_clus = OnlineSpeakerClustering(
max_num_speakers=8,
max_rp_threshold=0.15,
sparse_search_volume=30,
history_buffer_size=150,
current_buffer_size=150,
cuda=True,
)
online_clus = torch.jit.script(online_clus)
isinstance(online_clus, torch.jit._script.RecursiveScriptClass)
@pytest.mark.unit
def test_online_speaker_clustering_instance_export(self):
offline_speaker_clustering = SpeakerClustering(maj_vote_spk_count=False, min_samples_for_nmesc=0, cuda=True)
offline_speaker_clustering = torch.jit.script(offline_speaker_clustering)
isinstance(offline_speaker_clustering, torch.jit._script.RecursiveScriptClass)
class TestDiarizationSegmentationUtils:
"""
Test segmentation util functions
"""
@pytest.mark.unit
@pytest.mark.parametrize(
"intervals",
[
[[1, 4], [2, 6], [8, 10], [15, 18]],
[[8, 10], [15, 18], [2, 6], [1, 3]],
[[8, 10], [15, 18], [2, 6], [1, 3], [3, 5]],
[[8, 10], [8, 8], [15, 18], [2, 6], [1, 6], [2, 4]],
],
)
@pytest.mark.parametrize("target", [[[1, 6], [8, 10], [15, 18]]])
def test_merge_int_intervals_ex1(self, intervals, target):
merged = merge_int_intervals(intervals)
assert check_range_values(target, merged)
@pytest.mark.unit
@pytest.mark.parametrize(
"intervals",
[
[[6, 8], [0, 9], [2, 4], [4, 7]],
[[0, 9], [6, 8], [4, 7], [2, 4]],
[[0, 4], [0, 0], [4, 9], [2, 4]],
[[6, 8], [2, 8], [0, 3], [3, 4], [4, 5], [5, 9]],
],
)
@pytest.mark.parametrize("target", [[[0, 9]]])
def test_merge_int_intervals_ex2(self, intervals, target):
merged = merge_int_intervals(intervals)
assert check_range_values(target, merged)
@pytest.mark.unit
@pytest.mark.parametrize("intervals", [[[0, 1], [1, 9]], [[0, 0], [0, 9]], [[0, 9], [0, 9]]])
@pytest.mark.parametrize("target", [[[0, 9]]])
def test_merge_int_intervals_edge_test(self, intervals, target):
merged = merge_int_intervals(intervals)
assert check_range_values(target, merged)
@pytest.mark.unit
@pytest.mark.parametrize("rangeA", [[1.0, 2.0]])
@pytest.mark.parametrize("rangeB", [[0.5, 1.5], [0.9999, 1.0001]])
def test_is_overlap_true(self, rangeA, rangeB):
assert is_overlap(rangeA, rangeB)
@pytest.mark.unit
@pytest.mark.parametrize("rangeA", [[1.0, 2.0]])
@pytest.mark.parametrize("rangeB", [[2.0, 2.5], [-1.0, 1.00]])
def test_is_overlap_false(self, rangeA, rangeB):
assert not is_overlap(rangeA, rangeB)
@pytest.mark.unit
@pytest.mark.parametrize("x", [1.0, 2.3456])
@pytest.mark.parametrize("decimals", [1, 2, 3, 4])
def test_fl2int(self, x, decimals):
assert fl2int(x, decimals) == round(x * 10 ** decimals, 0)
@pytest.mark.unit
@pytest.mark.parametrize("x", [1234])
@pytest.mark.parametrize("decimals", [1, 2, 3, 4,])
def test_int2fl(self, x, decimals):
assert abs(int2fl(x, decimals) - round(x / (10 ** decimals), decimals)) < (10 ** -(decimals + 1))
@pytest.mark.unit
def test_merge_float_intervals_edge_margin_test(self):
intervals = [[0.0, 1.0], [1.0, 2.0]]
target_0 = [[0.0, 2.0]]
merged_0 = merge_float_intervals(intervals, margin=0)
assert check_range_values(target_0, merged_0)
target_1 = [[0.0, 1.0], [1.0, 2.0]]
merged_1 = merge_float_intervals(intervals, margin=1)
assert check_range_values(target_1, merged_1)
target_2 = [[0.0, 1.0], [1.0, 2.0]]
merged_2 = merge_float_intervals(intervals, margin=2)
assert check_range_values(target_2, merged_2)
@pytest.mark.unit
@pytest.mark.parametrize(
"intervals",
[
[[0.25, 1.7], [1.5, 3.0], [2.8, 5.0], [5.5, 10.0]],
[[0.25, 5.0], [5.5, 10.0], [1.5, 3.5]],
[[5.5, 8.05], [8.0, 10.0], [0.25, 5.0]],
[[0.25, 3.0], [1.5, 3.0], [5.5, 10.0], [2.8, 5.0]],
[[0.25, 1.7], [1.5, 3.0], [2.8, 5.0], [5.5, 10.0]],
],
)
@pytest.mark.parametrize("target", [[[0.25, 5.0], [5.5, 10.0]]])
def test_merge_float_overlaps(self, intervals, target):
merged = merge_float_intervals(intervals)
assert check_range_values(target, merged)
@pytest.mark.unit
def test_get_speech_labels_for_update(self):
frame_start = 3.0
buffer_end = 6.0
cumulative_speech_labels = torch.tensor([[0.0000, 3.7600]])
vad_timestamps = torch.tensor([[0.9600, 4.8400]])
cursor_for_old_segments = 1.0
speech_labels_for_update, cumulative_speech_labels = get_speech_labels_for_update(
frame_start, buffer_end, cumulative_speech_labels, vad_timestamps, cursor_for_old_segments,
)
assert (speech_labels_for_update - torch.tensor([[1.0000, 3.7600]])).sum() < 1e-8
assert (cumulative_speech_labels - torch.tensor([[0.9600, 4.8400]])).sum() < 1e-8
# Check if the ranges are containing faulty values
assert check_ranges(speech_labels_for_update)
assert check_ranges(cumulative_speech_labels)
@pytest.mark.unit
def test_get_online_subsegments_from_buffer(self):
torch.manual_seed(0)
sample_rate = 16000
speech_labels_for_update = torch.Tensor([[0.0000, 3.7600]])
audio_buffer = torch.randn(5 * sample_rate)
segment_indexes = []
window = 2.0
shift = 1.0
slice_length = int(window * sample_rate)
range_target = [[0.0, 2.0], [1.0, 3.0], [2.0, 3.76]]
sigs_list, sig_rangel_list, sig_indexes = get_online_subsegments_from_buffer(
buffer_start=0.0,
buffer_end=5.0,
sample_rate=sample_rate,
speech_labels_for_update=speech_labels_for_update,
audio_buffer=audio_buffer,
segment_indexes=segment_indexes,
window=window,
shift=shift,
)
assert check_range_values(target=range_target, source=sig_rangel_list)
for k, rg in enumerate(sig_rangel_list):
signal = get_target_sig(audio_buffer, rg[0], rg[1], slice_length, sample_rate)
if len(signal) < int(window * sample_rate):
signal = repeat_signal(signal, len(signal), slice_length)
assert len(signal) == int(slice_length), "Length mismatch"
assert (np.abs(signal - sigs_list[k])).sum() < 1e-8, "Audio stream mismatch"
assert (torch.tensor(sig_indexes) - torch.arange(len(range_target))).sum() < 1e-8, "Segment index mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("frame_start", [3.0])
@pytest.mark.parametrize("segment_range_ts", [[[0.0, 2.0]]])
@pytest.mark.parametrize("gt_cursor_for_old_segments", [3.0])
@pytest.mark.parametrize("gt_cursor_index", [1])
def test_get_new_cursor_for_update_mulsegs_ex1(
self, frame_start, segment_range_ts, gt_cursor_for_old_segments, gt_cursor_index
):
cursor_for_old_segments, cursor_index = get_new_cursor_for_update(frame_start, segment_range_ts)
assert cursor_for_old_segments == gt_cursor_for_old_segments
assert cursor_index == gt_cursor_index
@pytest.mark.unit
@pytest.mark.parametrize("target_range", [[1.0, 4.0]])
@pytest.mark.parametrize(
"source_range_list", [[[2.0, 3.0], [3.0, 4.0]], [[0.0, 2.0], [3.0, 5.0]], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]]
)
def get_sub_range_list(self, target_range, source_range_list):
sub_range_list = get_sub_range_list(target_range, source_range_list)
assert sub_range_list == [[2.0, 3.0], [3.0, 4.0]]
@pytest.mark.unit
@pytest.mark.parametrize("source_range_list", [[[0.0, 2.0]], [[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]])
def test_tensor_to_list(self, source_range_list):
a_range_tensor = torch.tensor(source_range_list)
converted_list = tensor_to_list(a_range_tensor)
assert source_range_list == converted_list
@pytest.mark.unit
@pytest.mark.parametrize(
"buffer_start, buffer_end, subsegments, ind_offset, window, sample_rate",
[(0.0, 2.0, [[0.5, 1.0], [1.5, 2.0]], 0, 0.1, 16000), (0.0, 5.0, [[0.5, 2.5], [2.7, 5.0]], 0, 1.0, 16000),],
)
def test_get_online_segments_from_slices(
self, buffer_start, buffer_end, subsegments, ind_offset, window, sample_rate
):
sig = torch.randn(int(sample_rate * buffer_end))
ind_offset, sigs_list, sig_rangel_list, sig_indexes = get_online_segments_from_slices(
sig, buffer_start, buffer_end, subsegments, ind_offset, window, sample_rate
)
assert ind_offset == 2
assert len(sigs_list) == 2
assert len(sig_rangel_list) == 2
assert len(sig_indexes) == 2
class TestClusteringUtilFunctions:
@pytest.mark.parametrize("p_value", [1, 5, 9])
@pytest.mark.parametrize("N", [9, 20])
@pytest.mark.parametrize("mask_method", ['binary', 'sigmoid', 'drop'])
def test_get_k_neighbors_connections(self, p_value: int, N: int, mask_method: str, seed=0):
torch.manual_seed(seed)
random_mat = torch.rand(N, N)
affinity_mat = 0.5 * (random_mat + random_mat.T)
affinity_mat = affinity_mat / torch.max(affinity_mat)
binarized_affinity_mat = getKneighborsConnections(affinity_mat, p_value, mask_method)
if mask_method == 'binary':
assert all(binarized_affinity_mat.sum(dim=0) == float(p_value))
elif mask_method == 'sigmoid':
assert all(binarized_affinity_mat.sum(dim=0) <= float(p_value))
elif mask_method == 'drop':
assert all(binarized_affinity_mat.sum(dim=0) <= float(p_value))
class TestSpeakerClustering:
"""
Test speaker clustering module
"""
@pytest.mark.unit
@pytest.mark.parametrize("cuda", [True, False])
def test_offline_clus_script_save_load(self, cuda):
exported_filename = 'speaker_clustering_script.pt'
speaker_clustering_python = SpeakerClustering(maj_vote_spk_count=False, cuda=cuda)
speaker_clustering_scripted_source = torch.jit.script(speaker_clustering_python)
torch.jit.save(speaker_clustering_scripted_source, exported_filename)
assert os.path.exists(exported_filename)
os.remove(exported_filename)
assert not os.path.exists(exported_filename)
@pytest.mark.unit
@pytest.mark.parametrize("cuda", [True, False])
def test_online_clus_script_save_load(self, cuda):
exported_filename = 'speaker_clustering_script.pt'
speaker_clustering_python = OnlineSpeakerClustering(
max_num_speakers=8,
max_rp_threshold=0.15,
sparse_search_volume=30,
history_buffer_size=150,
current_buffer_size=150,
cuda=cuda,
)
speaker_clustering_scripted_source = torch.jit.script(speaker_clustering_python)
torch.jit.save(speaker_clustering_scripted_source, exported_filename)
assert os.path.exists(exported_filename)
os.remove(exported_filename)
assert not os.path.exists(exported_filename)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1, 2, 3, 4, 5, 6, 7])
@pytest.mark.parametrize("total_sec, SSV, perturb_sigma, seed", [(30, 10, 0.1, 0)])
@pytest.mark.parametrize("jit_script", [False, True])
def test_offline_speaker_clustering(self, n_spks, total_sec, SSV, perturb_sigma, seed, jit_script, cuda=True):
spk_dur = total_sec / n_spks
em, ts, mc, mw, spk_ts, gt = generate_toy_data(
n_spks=n_spks, spk_dur=spk_dur, perturb_sigma=perturb_sigma, torch_seed=seed
)
offline_speaker_clustering = SpeakerClustering(maj_vote_spk_count=False, cuda=cuda)
assert isinstance(offline_speaker_clustering, SpeakerClustering)
if jit_script:
offline_speaker_clustering = torch.jit.script(offline_speaker_clustering)
Y_out = offline_speaker_clustering.forward_infer(
embeddings_in_scales=em,
timestamps_in_scales=ts,
multiscale_segment_counts=mc,
multiscale_weights=mw,
oracle_num_speakers=-1,
max_num_speakers=8,
enhanced_count_thres=40,
sparse_search_volume=SSV,
max_rp_threshold=0.15,
fixed_thres=-1.0,
)
permuted_Y = stitch_cluster_labels(Y_old=gt, Y_new=Y_out)
permuted_Y = permuted_Y.to(gt.device)
# mc[-1] is the number of base scale segments
assert len(set(permuted_Y.tolist())) == n_spks
assert Y_out.shape[0] == mc[-1]
assert all(permuted_Y == gt)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1, 2, 3, 4, 5, 6, 7])
@pytest.mark.parametrize("total_sec, SSV, perturb_sigma, seed", [(30, 10, 0.1, 0)])
@pytest.mark.parametrize("jit_script", [False, True])
def test_offline_speaker_clustering_cpu(self, n_spks, total_sec, SSV, perturb_sigma, seed, jit_script, cuda=False):
self.test_offline_speaker_clustering(n_spks, total_sec, SSV, perturb_sigma, seed, jit_script, cuda=cuda)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1])
@pytest.mark.parametrize("spk_dur", [0.25, 0.5, 0.75, 1, 1.5, 2])
@pytest.mark.parametrize("SSV, enhanced_count_thres, min_samples_for_nmesc", [(5, 40, 6)])
@pytest.mark.parametrize("seed", [0])
def test_offline_speaker_clustering_very_short_cpu(
self, n_spks, spk_dur, SSV, enhanced_count_thres, min_samples_for_nmesc, seed,
):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(
n_spks=n_spks, spk_dur=spk_dur, perturb_sigma=0.1, torch_seed=seed
)
offline_speaker_clustering = SpeakerClustering(maj_vote_spk_count=False, min_samples_for_nmesc=0, cuda=False)
assert isinstance(offline_speaker_clustering, SpeakerClustering)
Y_out = offline_speaker_clustering.forward_infer(
embeddings_in_scales=em,
timestamps_in_scales=ts,
multiscale_segment_counts=mc,
multiscale_weights=mw,
oracle_num_speakers=-1,
max_num_speakers=8,
enhanced_count_thres=enhanced_count_thres,
sparse_search_volume=SSV,
max_rp_threshold=0.15,
fixed_thres=-1.0,
)
permuted_Y = stitch_cluster_labels(Y_old=gt, Y_new=Y_out)
permuted_Y = permuted_Y.to(gt.device)
# mc[-1] is the number of base scale segments
assert len(set(permuted_Y.tolist())) == n_spks
assert Y_out.shape[0] == mc[-1]
assert all(permuted_Y == gt)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.parametrize("spk_dur", [0.25, 0.5, 0.75, 1, 2, 4])
@pytest.mark.parametrize("n_spks, SSV, enhanced_count_thres, min_samples_for_nmesc", [(1, 5, 40, 6)])
@pytest.mark.parametrize("seed", [0])
def test_offline_speaker_clustering_very_short_gpu(
self, n_spks, spk_dur, SSV, enhanced_count_thres, min_samples_for_nmesc, seed
):
em, ts, mc, mw, spk_ts, gt = generate_toy_data(
n_spks=n_spks, spk_dur=spk_dur, perturb_sigma=0.1, torch_seed=seed
)
offline_speaker_clustering = SpeakerClustering(maj_vote_spk_count=False, min_samples_for_nmesc=0, cuda=True)
assert isinstance(offline_speaker_clustering, SpeakerClustering)
Y_out = offline_speaker_clustering.forward_infer(
embeddings_in_scales=em,
timestamps_in_scales=ts,
multiscale_segment_counts=mc,
multiscale_weights=mw,
oracle_num_speakers=-1,
max_num_speakers=8,
enhanced_count_thres=enhanced_count_thres,
sparse_search_volume=SSV,
max_rp_threshold=0.15,
fixed_thres=-1.0,
)
permuted_Y = stitch_cluster_labels(Y_old=gt, Y_new=Y_out)
permuted_Y = permuted_Y.to(gt.device)
# mc[-1] is the number of base scale segments
assert Y_out.shape[0] == mc[-1]
assert all(permuted_Y == gt)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
@pytest.mark.parametrize("n_spks", [1, 2, 3])
@pytest.mark.parametrize("total_sec, buffer_size, sigma", [(30, 30, 0.1)])
@pytest.mark.parametrize("seed", [0])
@pytest.mark.parametrize("jit_script", [False, True])
def test_online_speaker_clustering(self, n_spks, total_sec, buffer_size, sigma, seed, jit_script, cuda=True):
step_per_frame = 2
spk_dur = total_sec / n_spks
em, ts, mc, _, _, gt = generate_toy_data(n_spks, spk_dur=spk_dur, perturb_sigma=sigma, torch_seed=seed)
em_s, ts_s = split_input_data(em, ts, mc)
emb_gen = em_s[-1]
segment_indexes = ts_s[-1]
if cuda:
device = torch.cuda.current_device()
emb_gen, segment_indexes = emb_gen.to(device), segment_indexes.to(device)
history_buffer_size = buffer_size
current_buffer_size = buffer_size
online_clus = OnlineSpeakerClustering(
max_num_speakers=8,
max_rp_threshold=0.15,
sparse_search_volume=30,
history_buffer_size=history_buffer_size,
current_buffer_size=current_buffer_size,
cuda=cuda,
)
if jit_script:
online_clus = torch.jit.script(online_clus)
n_frames = int(emb_gen.shape[0] / step_per_frame)
evaluation_list = []
# Simulate online speaker clustering
for frame_index in range(n_frames):
curr_emb = emb_gen[0 : (frame_index + 1) * step_per_frame]
base_segment_indexes = torch.arange(curr_emb.shape[0]).to(curr_emb.device)
# Check history_buffer_size and history labels
assert (
online_clus.history_embedding_buffer_emb.shape[0] <= history_buffer_size
), "History buffer size error"
assert (
online_clus.history_embedding_buffer_emb.shape[0]
== online_clus.history_embedding_buffer_label.shape[0]
)
# Call clustering function
merged_clus_labels = online_clus.forward_infer(
curr_emb=curr_emb, base_segment_indexes=base_segment_indexes, frame_index=frame_index, cuda=cuda
)
# Resolve permutations
assert len(merged_clus_labels) == (frame_index + 1) * step_per_frame
# Resolve permutation issue by using stitch_cluster_labels function
merged_clus_labels = merged_clus_labels.cpu()
merged_clus_labels = stitch_cluster_labels(Y_old=gt[: len(merged_clus_labels)], Y_new=merged_clus_labels)
evaluation_list.extend(list(merged_clus_labels == gt[: len(merged_clus_labels)]))
assert online_clus.is_online
cumul_label_acc = sum(evaluation_list) / len(evaluation_list)
assert cumul_label_acc > 0.9
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
@pytest.mark.parametrize("n_spks, total_sec, buffer_size, sigma, seed", [(3, 30, 30, 0.1, 0)])
@pytest.mark.parametrize("jit_script", [False, True])
def test_online_speaker_clustering_cpu(self, n_spks, total_sec, buffer_size, sigma, seed, jit_script, cuda=False):
self.test_online_speaker_clustering(n_spks, total_sec, buffer_size, sigma, seed, jit_script, cuda)
class TestLinearSumAssignmentAlgorithm:
@pytest.mark.unit
def test_lsa_solver_export_test(self):
cost_matrix = torch.randint(0, 10, (3, 3))
solver = LinearSumAssignmentSolver(cost_matrix)
solver = torch.jit.script(solver)
assert isinstance(solver, torch.jit._script.RecursiveScriptClass)
@pytest.mark.unit
@pytest.mark.parametrize(
"cost_matrix",
[torch.tensor([[7, 6, 2, 9, 2], [6, 2, 1, 3, 9], [5, 6, 8, 9, 5], [6, 8, 5, 8, 6], [9, 5, 6, 4, 7]])],
)
def test_linear_sum_assignment_algorithm_cost_matrix(self, cost_matrix):
"""
Test the linear sum assignment algorithm with a cost matrix
Compare with the scipy implementation and make sure the final cost is the same.
NOTE: There could be multiple solutions with the same cost in linear sum assignment problem.
This test only checks if the cost is the same.
"""
row_ind_nm, col_ind_nm = nemo_linear_sum_assignment(cost_matrix)
row_ind_sc, col_ind_sc = scipy_linear_sum_assignment(cost_matrix.cpu().numpy())
cost_nm = sum(cost_matrix[row_ind_nm, col_ind_nm])
cost_sc = sum(cost_matrix[row_ind_sc, col_ind_sc])
assert cost_nm == cost_sc
@pytest.mark.unit
@pytest.mark.parametrize("seed", [0, 1])
@pytest.mark.parametrize("mat_size", [1, 2, 4, 8])
def test_linear_sum_assignment_algorithm_random_matrix(self, seed, mat_size):
torch.manual_seed(seed)
cost_matrix = torch.randint(0, 10, (mat_size, mat_size))
self.test_linear_sum_assignment_algorithm_cost_matrix(cost_matrix)
| NeMo-main | tests/collections/asr/test_diar_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
import pytest
import torch.nn as nn
from omegaconf import DictConfig
from nemo.collections.asr.models import ASRModel, EncDecCTCModelBPE, EncDecRNNTBPEModel
from nemo.collections.asr.models.hybrid_asr_tts_models import ASRWithTTSModel
from nemo.collections.asr.parts.submodules.batchnorm import FusedBatchNorm1d
from nemo.collections.tts.models import FastPitchModel
@pytest.fixture(scope="module")
def fastpitch_model():
model = FastPitchModel.from_pretrained(model_name="tts_en_fastpitch_multispeaker")
return model
@pytest.fixture(scope="module")
def fastpitch_model_path(fastpitch_model, tmp_path_factory):
path = tmp_path_factory.mktemp("tts_models") / "fastpitch.nemo"
fastpitch_model.save_to(path)
return path
@pytest.fixture(scope="module")
def conformer_ctc_bpe_bn_model():
model = EncDecCTCModelBPE.from_pretrained(model_name="stt_en_conformer_ctc_small")
return model
@pytest.fixture(scope="module")
def conformer_ctc_bpe_bn_model_path(conformer_ctc_bpe_bn_model, tmp_path_factory):
path = tmp_path_factory.mktemp("asr_models") / "conformer-ctc-bpe-bn.nemo"
conformer_ctc_bpe_bn_model.save_to(path)
return path
@pytest.fixture(scope="module")
def conformer_rnnt_bpe_bn_model():
model = EncDecRNNTBPEModel.from_pretrained(model_name="stt_en_conformer_transducer_small")
return model
@pytest.fixture(scope="module")
def conformer_rnnt_bpe_bn_model_path(conformer_rnnt_bpe_bn_model, tmp_path_factory):
path = tmp_path_factory.mktemp("asr_models") / "conformer-rnnt-bpe.nemo"
conformer_rnnt_bpe_bn_model.save_to(path)
return path
@pytest.fixture
def asr_model_ctc_bpe_config(test_data_dir):
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 1024,
'num_classes': -1,
'vocabulary': None,
}
tokenizer = {'dir': str(Path(test_data_dir) / "asr/tokenizers/an4_wpe_128"), 'type': 'wpe'}
model_config = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'tokenizer': DictConfig(tokenizer),
}
)
return model_config
@pytest.fixture
def asr_tts_ctc_bpe_model(asr_model_ctc_bpe_config, fastpitch_model_path):
model = ASRWithTTSModel.from_asr_config(
asr_cfg=asr_model_ctc_bpe_config, asr_model_type="ctc_bpe", tts_model_path=fastpitch_model_path,
)
return model
class TestASRWithTTSModel:
@pytest.mark.with_downloads
@pytest.mark.unit
def test_from_pretrained_ctc_model(self, fastpitch_model_path, conformer_ctc_bpe_bn_model_path):
model = ASRWithTTSModel.from_pretrained_models(
asr_model_path=conformer_ctc_bpe_bn_model_path, tts_model_path=fastpitch_model_path
)
assert isinstance(model.tts_model, FastPitchModel)
assert isinstance(model.asr_model, EncDecCTCModelBPE)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_from_pretrained_rnnt_model(self, fastpitch_model_path, conformer_rnnt_bpe_bn_model_path):
model = ASRWithTTSModel.from_pretrained_models(
asr_model_path=conformer_rnnt_bpe_bn_model_path, tts_model_path=fastpitch_model_path
)
assert isinstance(model.tts_model, FastPitchModel)
assert isinstance(model.asr_model, EncDecRNNTBPEModel)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_from_asr_config(self, asr_model_ctc_bpe_config, fastpitch_model_path):
model = ASRWithTTSModel.from_asr_config(
asr_cfg=asr_model_ctc_bpe_config, asr_model_type="ctc_bpe", tts_model_path=fastpitch_model_path,
)
assert isinstance(model.tts_model, FastPitchModel)
assert isinstance(model.asr_model, EncDecCTCModelBPE)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_save_restore(self, asr_tts_ctc_bpe_model):
with tempfile.TemporaryDirectory() as tmpdir:
save_path = str(Path(tmpdir) / "model.nemo")
asr_tts_ctc_bpe_model.train()
asr_tts_ctc_bpe_model.save_to(save_path)
restored_model = ASRModel.restore_from(save_path)
assert isinstance(restored_model, ASRWithTTSModel)
assert isinstance(restored_model.tts_model, FastPitchModel)
assert isinstance(restored_model.asr_model, EncDecCTCModelBPE)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_save_restore_asr(self, asr_tts_ctc_bpe_model):
with tempfile.TemporaryDirectory() as tmpdir:
save_path = str(Path(tmpdir) / "asr_model.nemo")
asr_tts_ctc_bpe_model.save_asr_model_to(save_path)
restored_model = ASRModel.restore_from(save_path)
assert isinstance(restored_model, EncDecCTCModelBPE)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_from_pretrained_ctc_model_fused_bn(self, fastpitch_model_path, conformer_ctc_bpe_bn_model_path):
model = ASRWithTTSModel.from_pretrained_models(
asr_model_path=conformer_ctc_bpe_bn_model_path,
tts_model_path=fastpitch_model_path,
asr_model_fuse_bn=True,
)
assert isinstance(model.tts_model, FastPitchModel)
assert isinstance(model.asr_model, EncDecCTCModelBPE)
assert model.asr_model.cfg.encoder.conv_norm_type == "fused_batch_norm"
# test model has fused BatchNorm
has_fused_bn = False
for name, module in model.asr_model.named_modules():
assert not isinstance(module, nn.BatchNorm1d)
has_fused_bn = has_fused_bn or isinstance(module, FusedBatchNorm1d)
assert has_fused_bn, "Fused BatchNorm not found model"
with tempfile.TemporaryDirectory() as tmpdir:
save_path = str(Path(tmpdir) / "asr_tts_model.nemo")
model.save_to(save_path)
# check restored model has fused batchnorm
model = ASRWithTTSModel.restore_from(save_path)
assert model.asr_model.cfg.encoder.conv_norm_type == "fused_batch_norm"
has_fused_bn = False
for name, module in model.asr_model.named_modules():
assert not isinstance(module, nn.BatchNorm1d)
has_fused_bn = has_fused_bn or isinstance(module, FusedBatchNorm1d)
assert has_fused_bn, "Fused BatchNorm not found model"
| NeMo-main | tests/collections/asr/test_hybrid_asr_tts_models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.metrics.wer import CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.models import EncDecHybridRNNTCTCModel
from nemo.collections.asr.modules import RNNTDecoder, RNNTJoint, SampledRNNTJoint, StatelessTransducerDecoder
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils.config_utils import assert_dataclass_signature_match
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def hybrid_asr_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
# fmt: off
labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', "'",
]
# fmt: on
model_defaults = {'enc_hidden': 1024, 'pred_hidden': 64}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu'},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
aux_ctc = {
'ctc_loss_weight': 0.3,
'use_cer': False,
'ctc_reduction': 'mean_batch',
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 1024,
'num_classes': len(labels),
'vocabulary': labels,
},
'decoding': DictConfig(CTCDecodingConfig),
}
modelConfig = DictConfig(
{
'labels': ListConfig(labels),
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
'aux_ctc': DictConfig(aux_ctc),
}
)
model_instance = EncDecHybridRNNTCTCModel(cfg=modelConfig)
return model_instance
class TestEncDecHybridRNNTCTCModel:
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_constructor(self, hybrid_asr_model):
hybrid_asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = hybrid_asr_model.to_config_dict()
instance2 = EncDecHybridRNNTCTCModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecHybridRNNTCTCModel)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_forward(self, hybrid_asr_model):
hybrid_asr_model = hybrid_asr_model.eval()
hybrid_asr_model.preprocessor.featurizer.dither = 0.0
hybrid_asr_model.preprocessor.featurizer.pad_to = 0
hybrid_asr_model.compute_eval_loss = False
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _ = hybrid_asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
logprobs_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _ = hybrid_asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logprobs_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_vocab_change(self, hybrid_asr_model):
old_vocab = copy.deepcopy(hybrid_asr_model.joint.vocabulary)
nw1 = hybrid_asr_model.num_weights
hybrid_asr_model.change_vocabulary(new_vocabulary=old_vocab)
# No change
assert nw1 == hybrid_asr_model.num_weights
new_vocab = copy.deepcopy(old_vocab)
new_vocab.append('!')
new_vocab.append('$')
new_vocab.append('@')
hybrid_asr_model.change_vocabulary(new_vocabulary=new_vocab)
# fully connected + bias
# rnn embedding + joint + bias
pred_embedding = 3 * (hybrid_asr_model.decoder.pred_hidden)
joint_joint = 3 * (hybrid_asr_model.joint.joint_hidden + 1)
ctc_decoder = 3 * (hybrid_asr_model.ctc_decoder._feat_in + 1)
assert hybrid_asr_model.num_weights == (nw1 + (pred_embedding + joint_joint) + ctc_decoder)
assert hybrid_asr_model.ctc_decoder.vocabulary == hybrid_asr_model.joint.vocabulary
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_decoding_change(self, hybrid_asr_model):
assert isinstance(hybrid_asr_model.decoding.decoding, greedy_decode.GreedyBatchedRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'greedy'
new_strategy.greedy = DictConfig({'max_symbols': 10})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, greedy_decode.GreedyRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 1})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'tsd'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "tsd"
new_strategy = DictConfig({})
new_strategy.strategy = 'alsd'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "alsd"
assert hybrid_asr_model.ctc_decoding is not None
assert isinstance(hybrid_asr_model.ctc_decoding, CTCDecoding)
assert hybrid_asr_model.ctc_decoding.cfg.strategy == "greedy"
assert hybrid_asr_model.ctc_decoding.preserve_alignments is False
assert hybrid_asr_model.ctc_decoding.compute_timestamps is False
cfg = CTCDecodingConfig(preserve_alignments=True, compute_timestamps=True)
hybrid_asr_model.change_decoding_strategy(cfg, decoder_type="ctc")
assert hybrid_asr_model.ctc_decoding.preserve_alignments is True
assert hybrid_asr_model.ctc_decoding.compute_timestamps is True
@pytest.mark.unit
def test_GreedyRNNTInferConfig(self):
# confidence_method_cfg is deprecated
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index', 'confidence_method_cfg']
result = assert_dataclass_signature_match(
greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_GreedyBatchedRNNTInferConfig(self):
# confidence_method_cfg is deprecated
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index', 'confidence_method_cfg']
result = assert_dataclass_signature_match(
greedy_decode.GreedyBatchedRNNTInfer, greedy_decode.GreedyBatchedRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_BeamRNNTInferConfig(self):
IGNORE_ARGS = ['decoder_model', 'joint_model', 'blank_index']
result = assert_dataclass_signature_match(
beam_decode.BeamRNNTInfer, beam_decode.BeamRNNTInferConfig, ignore_args=IGNORE_ARGS
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer],
)
def test_greedy_multi_decoding(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
(partial_hyp) = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
partial_hyp = partial_hyp[0]
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len, partial_hypotheses=partial_hyp)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_stateless_decoder(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = StatelessTransducerDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer],
)
def test_greedy_multi_decoding_stateless_decoder(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = StatelessTransducerDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
(partial_hyp) = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
partial_hyp = partial_hyp[0]
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len, partial_hypotheses=partial_hyp)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_preserve_alignment(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
greedy = greedy_class(
decoder, joint_net, blank_index=len(token_list) - 1, preserve_alignments=True, max_symbols_per_step=5
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
hyp = greedy(encoder_output=enc_out, encoded_lengths=enc_len)[0][0] # type: rnnt_utils.Hypothesis
assert hyp.alignments is not None
for t in range(len(hyp.alignments)):
for u in range(len(hyp.alignments[t])):
logp, label = hyp.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[
{"search_type": "greedy"},
{"search_type": "default", "score_norm": False, "return_best_hypothesis": False},
{"search_type": "alsd", "alsd_max_target_len": 20, "return_best_hypothesis": False},
{"search_type": "tsd", "tsd_max_sym_exp_per_step": 3, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 2, "maes_expansion_beta": 2, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 3, "maes_expansion_beta": 1, "return_best_hypothesis": False},
],
)
def test_beam_decoding(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(decoder, joint_net, beam_size=beam_size, **beam_config,)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = beam(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[{"search_type": "greedy"}, {"search_type": "default", "score_norm": False, "return_best_hypothesis": False},],
)
def test_beam_decoding_preserve_alignments(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(
decoder, joint_net, beam_size=beam_size, **beam_config, preserve_alignments=True
)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
hyp = beam(encoder_output=enc_out, encoded_lengths=enc_len)[0][0] # type: rnnt_utils.Hypothesis
if isinstance(hyp, rnnt_utils.NBestHypotheses):
hyp = hyp.n_best_hypotheses[0] # select top hypothesis only
assert hyp.alignments is not None
for t in range(len(hyp.alignments)):
for u in range(len(hyp.alignments[t])):
logp, label = hyp.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"greedy_class", [greedy_decode.GreedyRNNTInfer, greedy_decode.GreedyBatchedRNNTInfer],
)
def test_greedy_decoding_SampledRNNTJoint(self, greedy_class):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = SampledRNNTJoint(jointnet_cfg, vocab_size, n_samples=2, vocabulary=token_list)
greedy = greedy_class(decoder, joint_net, blank_index=len(token_list) - 1, max_symbols_per_step=5)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = greedy(encoder_output=enc_out, encoded_lengths=enc_len)
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[
{"search_type": "greedy"},
{"search_type": "default", "score_norm": False, "return_best_hypothesis": False},
{"search_type": "alsd", "alsd_max_target_len": 20, "return_best_hypothesis": False},
{"search_type": "tsd", "tsd_max_sym_exp_per_step": 3, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 2, "maes_expansion_beta": 2, "return_best_hypothesis": False},
{"search_type": "maes", "maes_num_steps": 3, "maes_expansion_beta": 1, "return_best_hypothesis": False},
],
)
def test_beam_decoding_SampledRNNTJoint(self, beam_config):
token_list = [" ", "a", "b", "c"]
vocab_size = len(token_list)
beam_size = 1 if beam_config["search_type"] == "greedy" else 2
encoder_output_size = 4
decoder_output_size = 4
joint_output_shape = 4
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
decoder = RNNTDecoder(prednet_cfg, vocab_size)
joint_net = SampledRNNTJoint(jointnet_cfg, vocab_size, n_samples=2, vocabulary=token_list)
beam = beam_decode.BeamRNNTInfer(decoder, joint_net, beam_size=beam_size, **beam_config,)
# (B, D, T)
enc_out = torch.randn(1, encoder_output_size, 30)
enc_len = torch.tensor([30], dtype=torch.int32)
with torch.no_grad():
_ = beam(encoder_output=enc_out, encoded_lengths=enc_len)
| NeMo-main | tests/collections/asr/test_asr_hybrid_rnnt_ctc_model_char.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig
from nemo.collections.asr.models import ASRModel, EncDecCTCModel
def getattr2(object, attr):
if not '.' in attr:
return getattr(object, attr)
else:
arr = attr.split('.')
return getattr2(getattr(object, arr[0]), '.'.join(arr[1:]))
class TestASRLocalAttention:
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_forward(self):
asr_model = ASRModel.from_pretrained("stt_en_conformer_ctc_small")
asr_model = asr_model.eval()
len = 16000 * 60 * 30 # 30 minutes, OOM without local attention
input_signal_long = torch.randn(size=(1, len), device=asr_model.device)
length_long = torch.tensor([len], device=asr_model.device)
# switch to local attn
asr_model.change_attention_model(self_attention_model="rel_pos_local_attn", att_context_size=(64, 64))
with torch.no_grad():
asr_model.forward(input_signal=input_signal_long, input_signal_length=length_long)
# switch context size only (keep local)
asr_model.change_attention_model(att_context_size=(192, 192))
with torch.no_grad():
asr_model.forward(input_signal=input_signal_long, input_signal_length=length_long)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_change_save_restore(self):
model = ASRModel.from_pretrained("stt_en_conformer_ctc_small")
model.change_attention_model(self_attention_model="rel_pos_local_attn", att_context_size=(64, 64))
attr_for_eq_check = ["encoder.self_attention_model", "encoder.att_context_size"]
with tempfile.TemporaryDirectory() as restore_folder:
with tempfile.TemporaryDirectory() as save_folder:
save_folder_path = save_folder
# Where model will be saved
model_save_path = os.path.join(save_folder, f"{model.__class__.__name__}.nemo")
model.save_to(save_path=model_save_path)
# Where model will be restored from
model_restore_path = os.path.join(restore_folder, f"{model.__class__.__name__}.nemo")
shutil.copy(model_save_path, model_restore_path)
# at this point save_folder should not exist
assert save_folder_path is not None and not os.path.exists(save_folder_path)
assert not os.path.exists(model_save_path)
assert os.path.exists(model_restore_path)
# attempt to restore
model_copy = model.__class__.restore_from(
restore_path=model_restore_path,
map_location=None,
strict=True,
return_config=False,
override_config_path=None,
)
assert model.num_weights == model_copy.num_weights
if attr_for_eq_check is not None and len(attr_for_eq_check) > 0:
for attr in attr_for_eq_check:
assert getattr2(model, attr) == getattr2(model_copy, attr)
@pytest.mark.unit
@pytest.mark.parametrize(
"global_tokens", [0, 1, 4],
)
@pytest.mark.parametrize(
"global_tokens_spacing", [1, 4],
)
def test_train(self, global_tokens, global_tokens_spacing):
preprocessor_config = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
vocabulary = [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
]
encoder_config = {
'_target_': 'nemo.collections.asr.modules.ConformerEncoder',
'feat_in': 64,
'n_layers': 8,
'd_model': 4,
'self_attention_model': 'rel_pos_local_attn',
'att_context_size': [128, 128],
'global_tokens': global_tokens,
'global_tokens_spacing': global_tokens_spacing,
}
decoder_config = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': None,
'num_classes': len(vocabulary),
'vocabulary': vocabulary,
}
model_config = DictConfig(
{
'preprocessor': DictConfig(preprocessor_config),
'encoder': DictConfig(encoder_config),
'decoder': DictConfig(decoder_config),
'optim': {'name': 'adamw'},
}
)
class DummyDataset(torch.utils.data.Dataset):
"""Simply returns a single set of values."""
def __init__(self, values):
self.values = values
def __len__(self):
return 1
def __getitem__(self, idx):
return self.values
input_signal = torch.randn(size=(1, 960000))
input_length = torch.tensor([960000])
target = torch.randint(size=(1, 280), low=0, high=28)
target_length = torch.tensor([280])
asr_model = EncDecCTCModel(cfg=model_config)
asr_model.train()
_ = asr_model.forward(input_signal=input_signal, input_signal_length=input_length)
## Explicitly pass acclerator as cpu, since deafult val in PTL >= 2.0 is auto and it picks cuda
## which further causes an error in all reduce at: https://github.com/NVIDIA/NeMo/blob/v1.18.1/nemo/collections/asr/modules/conformer_encoder.py#L462
## and in ConvASREncoder, SqueezeformerEncoder where device is CPU
trainer = pl.Trainer(max_epochs=1, accelerator='cpu')
trainer.fit(
asr_model,
train_dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
val_dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
)
trainer.test(
asr_model,
dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
)
| NeMo-main | tests/collections/asr/test_asr_local_attn.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
import torch
from omegaconf import DictConfig, OmegaConf, open_dict
import nemo.collections.asr as nemo_asr
from nemo.collections.asr.data import audio_to_text
from nemo.collections.asr.metrics.wer import CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, configs
from nemo.utils.config_utils import assert_dataclass_signature_match, update_model_config
@pytest.fixture()
def asr_model():
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 1024,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
class TestEncDecCTCModel:
@pytest.mark.unit
def test_constructor(self, asr_model):
asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecCTCModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecCTCModel)
@pytest.mark.unit
def test_forward(self, asr_model):
asr_model = asr_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _, _ = asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
print(len(logprobs_ins))
logprobs_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _, _ = asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logprobs_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.unit
def test_vocab_change(self, asr_model):
old_vocab = copy.deepcopy(asr_model.decoder.vocabulary)
nw1 = asr_model.num_weights
asr_model.change_vocabulary(new_vocabulary=old_vocab)
# No change
assert nw1 == asr_model.num_weights
new_vocab = copy.deepcopy(old_vocab)
new_vocab.append('!')
new_vocab.append('$')
new_vocab.append('@')
asr_model.change_vocabulary(new_vocabulary=new_vocab)
# fully connected + bias
assert asr_model.num_weights == nw1 + 3 * (asr_model.decoder._feat_in + 1)
@pytest.mark.unit
def test_decoding_change(self, asr_model):
assert asr_model.decoding is not None
assert isinstance(asr_model.decoding, CTCDecoding)
assert asr_model.decoding.cfg.strategy == "greedy"
assert asr_model.decoding.preserve_alignments is False
assert asr_model.decoding.compute_timestamps is False
cfg = CTCDecodingConfig(preserve_alignments=True, compute_timestamps=True)
asr_model.change_decoding_strategy(cfg)
assert asr_model.decoding.preserve_alignments is True
assert asr_model.decoding.compute_timestamps is True
@pytest.mark.unit
def test_change_conv_asr_se_context_window(self, asr_model):
old_cfg = copy.deepcopy(asr_model.cfg)
asr_model.change_conv_asr_se_context_window(context_window=32) # 32 * 0.01s context
new_config = asr_model.cfg
assert old_cfg.encoder.jasper[0].se_context_size == -1
assert new_config.encoder.jasper[0].se_context_size == 32
for name, m in asr_model.encoder.named_modules():
if type(m).__class__.__name__ == 'SqueezeExcite':
assert m.context_window == 32
@pytest.mark.unit
def test_change_conv_asr_se_context_window_no_config_update(self, asr_model):
old_cfg = copy.deepcopy(asr_model.cfg)
asr_model.change_conv_asr_se_context_window(context_window=32, update_config=False) # 32 * 0.01s context
new_config = asr_model.cfg
assert old_cfg.encoder.jasper[0].se_context_size == -1
assert new_config.encoder.jasper[0].se_context_size == -1 # no change
for name, m in asr_model.encoder.named_modules():
if type(m).__class__.__name__ == 'SqueezeExcite':
assert m.context_window == 32
@pytest.mark.unit
def test_dataclass_instantiation(self, asr_model):
model_cfg = configs.EncDecCTCModelConfig()
# Update mandatory values
vocabulary = asr_model.decoder.vocabulary
model_cfg.model.labels = vocabulary
# Update encoder
model_cfg.model.encoder.activation = 'relu'
model_cfg.model.encoder.feat_in = 64
model_cfg.model.encoder.jasper = [
nemo_asr.modules.conv_asr.JasperEncoderConfig(
filters=1024,
repeat=1,
kernel=[1],
stride=[1],
dilation=[1],
dropout=0.0,
residual=False,
se=True,
se_context_size=-1,
)
]
# Update decoder
model_cfg.model.decoder.feat_in = 1024
model_cfg.model.decoder.num_classes = 28
model_cfg.model.decoder.vocabulary = vocabulary
# Construct the model
asr_cfg = OmegaConf.create({'model': asr_model.cfg})
model_cfg_v1 = update_model_config(model_cfg, asr_cfg)
new_model = EncDecCTCModel(cfg=model_cfg_v1.model)
assert new_model.num_weights == asr_model.num_weights
# trainer and exp manager should be there
# assert 'trainer' in model_cfg_v1
# assert 'exp_manager' in model_cfg_v1
# datasets and optim/sched should not be there after ModelPT.update_model_dataclass()
assert 'train_ds' not in model_cfg_v1.model
assert 'validation_ds' not in model_cfg_v1.model
assert 'test_ds' not in model_cfg_v1.model
assert 'optim' not in model_cfg_v1.model
# Construct the model, without dropping additional keys
asr_cfg = OmegaConf.create({'model': asr_model.cfg})
model_cfg_v2 = update_model_config(model_cfg, asr_cfg, drop_missing_subconfigs=False)
# Assert all components are in config
# assert 'trainer' in model_cfg_v2
# assert 'exp_manager' in model_cfg_v2
assert 'train_ds' in model_cfg_v2.model
assert 'validation_ds' in model_cfg_v2.model
assert 'test_ds' in model_cfg_v2.model
assert 'optim' in model_cfg_v2.model
# Remove extra components (optim and sched can be kept without issue)
with open_dict(model_cfg_v2.model):
model_cfg_v2.model.pop('train_ds')
model_cfg_v2.model.pop('validation_ds')
model_cfg_v2.model.pop('test_ds')
new_model = EncDecCTCModel(cfg=model_cfg_v2.model)
assert new_model.num_weights == asr_model.num_weights
# trainer and exp manager should be there
@pytest.mark.unit
def test_ASRDatasetConfig_for_AudioToCharDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'tarred_audio_filepaths',
'shuffle',
'pin_memory',
'drop_last',
'tarred_shard_strategy',
'shard_manifests',
'shuffle_n',
'use_start_end_token',
'use_start_end_token',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
'channel_selector',
]
REMAP_ARGS = {'trim_silence': 'trim'}
result = assert_dataclass_signature_match(
audio_to_text.AudioToCharDataset, configs.ASRDatasetConfig, ignore_args=IGNORE_ARGS, remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_ASRDatasetConfig_for_TarredAudioToCharDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'shuffle',
'pin_memory',
'drop_last',
'global_rank',
'world_size',
'use_start_end_token',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
'max_utts',
]
REMAP_ARGS = {
'trim_silence': 'trim',
'tarred_audio_filepaths': 'audio_tar_filepaths',
'tarred_shard_strategy': 'shard_strategy',
'shuffle_n': 'shuffle',
}
result = assert_dataclass_signature_match(
audio_to_text.TarredAudioToCharDataset,
configs.ASRDatasetConfig,
ignore_args=IGNORE_ARGS,
remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
| NeMo-main | tests/collections/asr/test_asr_ctcencdec_model.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from nemo.collections.asr.models.msdd_models import NeuralDiarizer
class TestNeuralDiarizerInference:
@pytest.mark.unit
@pytest.mark.parametrize(
"device",
[
torch.device("cpu"),
pytest.param(
torch.device("cuda"),
marks=pytest.mark.skipif(not torch.cuda.is_available(), reason='CUDA required for test.',),
),
],
)
@pytest.mark.parametrize("num_speakers", [None, 1])
@pytest.mark.parametrize("max_num_speakers", [4])
def test_diar_inference(self, tmpdir, test_data_dir, device, num_speakers, max_num_speakers):
"""
Test to ensure diarization inference works correctly.
- Ensures multiple audio files can be diarized sequentially
- Ensures both CPU/CUDA is supported
- Ensures that max speaker and num speaker are set correctly
- Ensures temporary directory is emptied at the end of diarization
- Sanity check to ensure outputs from diarization are reasonable
"""
audio_filenames = ['an22-flrp-b.wav', 'an90-fbbh-b.wav']
audio_paths = [os.path.join(test_data_dir, "asr", "train", "an4", "wav", fp) for fp in audio_filenames]
diarizer = NeuralDiarizer.from_pretrained(model_name='diar_msdd_telephonic').to(device)
out_dir = os.path.join(tmpdir, 'diarize_inference/')
assert diarizer.msdd_model.device.type == device.type
assert diarizer._speaker_model.device.type == device.type
for audio_path in audio_paths:
annotation = diarizer(
audio_path, num_speakers=num_speakers, max_speakers=max_num_speakers, out_dir=out_dir
)
# assert max speakers has been set up correctly
assert diarizer.clustering_embedding.clus_diar_model._cluster_params.max_num_speakers == max_num_speakers
if num_speakers:
assert diarizer._cfg.diarizer.clustering.parameters.oracle_num_speakers
# assert all temporary files are cleaned up
assert len(os.listdir(out_dir)) == 0
# assert only 1 speaker & segment
assert len(annotation.labels()) == 1
assert len(list(annotation.itersegments())) == 1
| NeMo-main | tests/collections/asr/test_diar_neural_inference.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
from typing import List, Type, Union
import numpy as np
import pytest
from numpy.random import default_rng
from nemo.collections.asr.data.data_simulation import (
ArrayGeometry,
check_angle,
convert_placement_to_range,
convert_rir_to_multichannel,
simulate_room_mix,
wrap_to_180,
)
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
class TestDataSimulationUtils:
@pytest.mark.unit
def test_check_angle(self):
"""Test angle checks.
"""
num_examples = 100
random = default_rng()
assert check_angle('azimuth', random.uniform(low=-180, high=180, size=num_examples)) == True
assert check_angle('elevation', random.uniform(low=-90, high=90, size=num_examples)) == True
assert check_angle('yaw', random.uniform(low=-180, high=180, size=num_examples)) == True
assert check_angle('pitch', random.uniform(low=-90, high=90, size=num_examples)) == True
assert check_angle('roll', random.uniform(low=-180, high=180, size=num_examples)) == True
with pytest.raises(ValueError):
check_angle('azimuth', [-200, 200])
with pytest.raises(ValueError):
check_angle('elevation', [-100, 100])
with pytest.raises(ValueError):
check_angle('yaw', [-200, 200])
with pytest.raises(ValueError):
check_angle('pitch', [-200, 200])
with pytest.raises(ValueError):
check_angle('roll', [-200, 200])
@pytest.mark.unit
def test_wrap_to_180(self):
"""Test wrap.
"""
test_cases = []
test_cases.append({'angle': 0, 'wrapped': 0})
test_cases.append({'angle': 45, 'wrapped': 45})
test_cases.append({'angle': -30, 'wrapped': -30})
test_cases.append({'angle': 179, 'wrapped': 179})
test_cases.append({'angle': -179, 'wrapped': -179})
test_cases.append({'angle': 181, 'wrapped': -179})
test_cases.append({'angle': -181, 'wrapped': 179})
test_cases.append({'angle': 270, 'wrapped': -90})
test_cases.append({'angle': -270, 'wrapped': 90})
test_cases.append({'angle': 359, 'wrapped': -1})
test_cases.append({'angle': 360, 'wrapped': 0})
for test_case in test_cases:
assert wrap_to_180(test_case['angle']) == test_case['wrapped']
@pytest.mark.unit
def test_placement_range(self):
"""Test placement range conversion.
"""
# Setup 1:
test_cases = []
test_cases.append(
{
'room_dim': [3, 4, 5],
'placement': {'x': None, 'y': None, 'height': None, 'min_to_wall': 0},
'object_radius': 0,
'expected_range': np.array([[0, 3], [0, 4], [0, 5]]),
}
)
test_cases.append(
{
'room_dim': [3, 4, 5],
'placement': {'x': None, 'y': None, 'height': None, 'min_to_wall': 0},
'object_radius': 0.1,
'expected_range': np.array([[0.1, 2.9], [0.1, 3.9], [0.1, 4.9]]),
}
)
test_cases.append(
{
'room_dim': [3, 4, 5],
'placement': {'x': None, 'y': None, 'height': None, 'min_to_wall': 0.5},
'object_radius': 0.1,
'expected_range': np.array([[0.6, 2.4], [0.6, 3.4], [0.6, 4.4]]),
}
)
test_cases.append(
{
'room_dim': [3, 4, 5],
'placement': {'x': [1, 3], 'y': [0.3, 3.0], 'height': [1.5, 1.8], 'min_to_wall': 0.5},
'object_radius': 0.1,
'expected_range': np.array([[1, 2.4], [0.6, 3.0], [1.5, 1.8]]),
}
)
test_cases.append(
{
'room_dim': [3, 4, 5],
'placement': {'x': 2, 'y': 3, 'height': [1.5, 1.8], 'min_to_wall': 0.5},
'object_radius': 0.1,
'expected_range': np.array([[2, 2], [3, 3], [1.5, 1.8]]),
}
)
for test_case in test_cases:
placement_range = convert_placement_to_range(
test_case['placement'], test_case['room_dim'], test_case['object_radius']
)
assert np.all(placement_range == test_case['expected_range'])
with pytest.raises(ValueError):
# fail because of negative x
convert_placement_to_range(
**{
'room_dim': [3, 4, 5],
'placement': {'x': -1, 'y': None, 'height': None, 'min_to_wall': 0},
'object_radius': 0.1,
}
)
with pytest.raises(ValueError):
# fail because of negative min_to_wall
convert_placement_to_range(
**{
'room_dim': [3, 4, 5],
'placement': {'x': None, 'y': None, 'height': None, 'min_to_wall': -1},
'object_radius': 0.1,
}
)
with pytest.raises(ValueError):
# fail because height range doesn't have exactly two elements
convert_placement_to_range(
**{
'room_dim': [3, 4, 5],
'placement': {'x': None, 'y': None, 'height': [1], 'min_to_wall': 0},
'object_radius': 0.1,
}
)
with pytest.raises(ValueError):
# fail because the room is too small for constraint
convert_placement_to_range(
**{
'room_dim': [1, 2, 3],
'placement': {'x': None, 'y': None, 'height': None, 'min_to_wall': 1},
'object_radius': 0.1,
}
)
@pytest.mark.unit
@pytest.mark.parametrize("num_mics", [2, 4])
@pytest.mark.parametrize("num_sources", [1, 3])
def test_convert_rir_to_mc(self, num_mics: int, num_sources: int):
"""Test conversion of a RIR from list of lists to multichannel array.
"""
len_range = [50, 1000]
random = default_rng()
rir = []
rir_len = []
# Golden reference
for n_mic in range(num_mics):
this_rir = []
this_len = []
for n_source in range(num_sources):
random_len = np.random.randint(low=len_range[0], high=len_range[1])
this_rir.append(np.random.rand(random_len))
this_len.append(random_len)
rir.append(this_rir)
rir_len.append(this_len)
# UUT
mc_rir = convert_rir_to_multichannel(rir)
# Compare
for n_source in range(num_sources):
for n_mic in range(num_mics):
# check RIR
diff_len = rir_len[n_mic][n_source]
diff = mc_rir[n_source][:diff_len, n_mic] - rir[n_mic][n_source]
assert np.all(diff == 0.0), f'Original RIR not matching: source={n_source}, channel={n_mic}'
# check padding
pad = mc_rir[n_source][diff_len:, n_mic]
assert np.all(pad == 0.0), f'Original RIR not matching: source={n_source}, channel={n_mic}'
class TestArrayGeometry:
@pytest.mark.unit
@pytest.mark.parametrize('mic_spacing', [0.05])
@pytest.mark.parametrize("num_mics", [2, 4])
@pytest.mark.parametrize("axis", [0, 1, 2])
def test_array_geometry(self, mic_spacing: float, num_mics: int, axis: int):
max_abs_tol = 1e-8
random = default_rng()
# assume linear arrray along axis
mic_positions = np.zeros((num_mics, 3))
mic_positions[:, axis] = mic_spacing * np.arange(num_mics)
center = np.mean(mic_positions, axis=0)
mic_positions_centered = mic_positions - center
uut = ArrayGeometry(mic_positions)
# test initialization
assert np.max(np.abs(uut.center - center)) < max_abs_tol
assert np.max(np.abs(uut.centered_positions - mic_positions_centered)) < max_abs_tol
assert np.max(np.abs(uut.positions - mic_positions)) < max_abs_tol
# test translation
center = random.uniform(low=-10, high=-10, size=3)
mic_positions = mic_positions_centered + center
uut.translate(to=center)
assert np.max(np.abs(uut.center - center)) < max_abs_tol
assert np.max(np.abs(uut.centered_positions - mic_positions_centered)) < max_abs_tol
assert np.max(np.abs(uut.positions - mic_positions)) < max_abs_tol
# test rotation
center = uut.center
centered_positions = uut.centered_positions
test_cases = []
test_cases.append(
{
'orientation': {'yaw': 90},
'new_positions': np.vstack(
(-centered_positions[:, 1], centered_positions[:, 0], centered_positions[:, 2])
).T,
}
)
test_cases.append(
{
'orientation': {'pitch': 90},
'new_positions': np.vstack(
(centered_positions[:, 2], centered_positions[:, 1], -centered_positions[:, 0])
).T,
}
)
test_cases.append(
{
'orientation': {'roll': 90},
'new_positions': np.vstack(
(centered_positions[:, 0], -centered_positions[:, 2], centered_positions[:, 1])
).T,
}
)
for test_case in test_cases:
new_array = uut.new_rotated_array(**test_case['orientation'])
assert np.max(np.abs(new_array.center - center)) < max_abs_tol
assert np.max(np.abs(new_array.centered_positions - test_case['new_positions'])) < max_abs_tol
# test radius
assert np.max(np.abs(uut.radius - (num_mics - 1) / 2 * mic_spacing)) < max_abs_tol
# test conversion to spherical
# point on x axis
point = np.array([1, 0, 0])
test_cases = []
test_cases.append({'center': 0, 'dist': np.linalg.norm(point - 0), 'azim': 0, 'elev': 0})
test_cases.append(
{
'center': np.array([2, 0, 0]),
'dist': np.linalg.norm(point - np.array([2, 0, 0])),
'azim': -180,
'elev': 0,
}
)
test_cases.append(
{
'center': np.array([1, 1, 1]),
'dist': np.linalg.norm(point - np.array([1, 1, 1])),
'azim': -90,
'elev': -45,
}
)
test_cases.append(
{
'center': np.array([1, 2, -2]),
'dist': np.linalg.norm(point - np.array([1, 2, -2])),
'azim': -90,
'elev': 45,
}
)
for test_case in test_cases:
uut.translate(to=test_case['center'])
dist, azim, elev = uut.spherical_relative_to_array(point)
assert abs(dist - test_case['dist']) < max_abs_tol
assert abs(wrap_to_180(azim - test_case['azim'])) < max_abs_tol
assert abs(elev - test_case['elev']) < max_abs_tol
class TestRoomSimulation:
max_diff_tol = 1e-5
@pytest.mark.unit
def test_simulate_room_mix(self, test_data_dir):
"""Test room simulation for fixed parameters.
"""
# Test setup
data_dir = os.path.join(test_data_dir, 'asr', 'data_simulation')
# Minimal configuration
sample_rate = 16000
target_cfg = {
'room_filepath': os.path.join(data_dir, 'test_room.h5'),
'mic_positions': np.random.rand(6, 3), # random positions
'selected_mics': [0, 1, 2, 3, 4, 5],
'source': 0,
'audio_filepath': os.path.join(data_dir, 'target.wav'),
'duration': 1.5,
}
interference_cfg = [{'source': 1, 'selected_mics': target_cfg['selected_mics']}]
audio_metadata = {
'target': [{'audio_filepath': 'target.wav', 'duration': 1.5, 'offset': 0.8}],
'target_dir': data_dir,
'noise': [{'audio_filepath': 'noise.wav', 'duration': 2.3}],
'noise_dir': data_dir,
'interference': [
{'audio_filepath': 'interference_1.wav', 'duration': 0.8},
{'audio_filepath': 'interference_2.wav', 'duration': 0.75},
],
'interference_dir': data_dir,
}
mix_cfg = {'rsnr': 10, 'rsir': 15, 'ref_mic': 0, 'ref_mic_rms': -30, 'min_duration': None, 'save': {}}
with tempfile.TemporaryDirectory() as output_dir:
# Mix
base_output_filepath = os.path.join(output_dir, 'test_output')
simulate_room_mix(
sample_rate=sample_rate,
target_cfg=target_cfg,
interference_cfg=interference_cfg,
mix_cfg=mix_cfg,
audio_metadata=audio_metadata,
base_output_filepath=base_output_filepath,
)
# Check target + noise + interference = mix
mix_from_parts = 0
for suffix in ['_target_reverberant.wav', '_noise.wav', '_interference.wav']:
mix_from_parts += AudioSegment.from_file(base_output_filepath + suffix).samples
mix_uut = AudioSegment.from_file(base_output_filepath + '_mic.wav')
mix_uut_samples = mix_uut.samples
# Compare UUT to sum of parts
max_diff = np.max(np.abs(mix_uut_samples - mix_from_parts))
assert max_diff < self.max_diff_tol
# Compare the UUT to golden reference
golden_mix_filepath = os.path.join(data_dir, 'test_output_mic.wav')
mix_golden = AudioSegment.from_file(base_output_filepath + '_mic.wav')
assert mix_uut.num_samples == mix_golden.num_samples
assert mix_uut.num_channels == mix_golden.num_channels
assert mix_uut.sample_rate == mix_golden.sample_rate
assert mix_uut.duration == mix_golden.duration
max_diff = np.max(np.abs(mix_uut_samples - mix_golden.samples))
assert max_diff < self.max_diff_tol
| NeMo-main | tests/collections/asr/test_asr_data_simulation.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
from unittest import TestCase
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.models import EncDecSpeakerLabelModel
class EncDecSpeechLabelModelTest(TestCase):
@pytest.mark.unit
def test_constructor(self):
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 512,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': False,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.SpeakerDecoder',
'feat_in': 512,
'num_classes': 2,
'pool_mode': 'xvector',
'emb_sizes': [1024],
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
},
)
speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig)
speaker_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = speaker_model.to_config_dict()
instance2 = EncDecSpeakerLabelModel.from_config_dict(confdict)
self.assertTrue(isinstance(instance2, EncDecSpeakerLabelModel))
@pytest.mark.unit
def test_ecapa_enc_dec(self):
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
}
encoder = {
'_target_': 'nemo.collections.asr.modules.ECAPAEncoder',
'feat_in': 80,
'filters': [4, 4, 4, 4, 3],
'kernel_sizes': [5, 3, 3, 3, 1],
'dilations': [1, 1, 1, 1, 1],
'scale': 2,
}
decoder = {
'_target_': 'nemo.collections.asr.modules.SpeakerDecoder',
'feat_in': 3,
'num_classes': 2,
'pool_mode': 'attention',
'emb_sizes': 192,
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder),}
)
speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig)
speaker_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = speaker_model.to_config_dict()
instance2 = EncDecSpeakerLabelModel.from_config_dict(confdict)
self.assertTrue(isinstance(instance2, EncDecSpeakerLabelModel))
@pytest.mark.unit
def test_titanet_enc_dec(self):
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 256,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.SpeakerDecoder',
'feat_in': 256,
'num_classes': 2,
'pool_mode': 'attention',
'emb_sizes': [1024],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder),}
)
speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig)
speaker_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = speaker_model.to_config_dict()
instance2 = EncDecSpeakerLabelModel.from_config_dict(confdict)
self.assertTrue(isinstance(instance2, EncDecSpeakerLabelModel))
class TestEncDecSpeechLabelModel:
@pytest.mark.unit
def test_pretrained_titanet_embeddings(self, test_data_dir):
model_name = 'titanet_large'
speaker_model = EncDecSpeakerLabelModel.from_pretrained(model_name)
assert isinstance(speaker_model, EncDecSpeakerLabelModel)
relative_filepath = "an4_speaker/an4/wav/an4_clstk/fash/an251-fash-b.wav"
filename = os.path.join(test_data_dir, relative_filepath)
emb, logits = speaker_model.infer_file(filename)
class_id = logits.argmax(axis=-1)
emb_sum = emb.sum()
assert 11144 == class_id
assert (emb_sum + 0.2575) <= 1e-2
@pytest.mark.unit
def test_pretrained_ambernet_logits(self, test_data_dir):
model_name = 'langid_ambernet'
lang_model = EncDecSpeakerLabelModel.from_pretrained(model_name)
assert isinstance(lang_model, EncDecSpeakerLabelModel)
relative_filepath = "an4_speaker/an4/wav/an4_clstk/fash/an255-fash-b.wav"
filename = os.path.join(test_data_dir, relative_filepath)
label = lang_model.get_label(filename)
assert label == "en"
@pytest.mark.unit
def test_pretrained_ambernet_logits_batched(self, test_data_dir):
model_name = 'langid_ambernet'
lang_model = EncDecSpeakerLabelModel.from_pretrained(model_name)
relative_filepath = "an4_speaker/an4/wav/an4_clstk/fash/an255-fash-b.wav"
filename = os.path.join(test_data_dir, relative_filepath)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
with tempfile.TemporaryDirectory() as tmpdir:
temp_manifest = os.path.join(tmpdir, 'manifest.json')
with open(temp_manifest, 'w', encoding='utf-8') as fp:
entry = {"audio_filepath": filename, "duration": 4.5, "label": 'en'}
fp.write(json.dumps(entry) + '\n')
entry = {
"audio_filepath": filename,
"duration": 4.5,
"label": 'test',
} # test sample outside of training set
fp.write(json.dumps(entry) + '\n')
embs, logits, gt_labels, trained_labels = lang_model.batch_inference(temp_manifest, device=device)
pred_label = trained_labels[logits.argmax(axis=-1)[0]]
true_label = gt_labels[0]
assert pred_label == true_label
assert gt_labels[1] == 'test'
| NeMo-main | tests/collections/asr/test_speaker_label_models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.asr.modules.conformer_encoder import ConformerEncoder
class TestStochasticDepth:
"""Testing stochastic depth functionality."""
def test_stochastic_depth_model_creation(self):
"""Testing basic model creation and the drop probs are correctly assigned."""
n_layers = 4
model = ConformerEncoder(feat_in=10, n_layers=n_layers, d_model=4, feat_out=8)
# checking that by default SD is disabled
assert model.layer_drop_probs == [0.0] * n_layers
# linear mode
for drop_prob in [0.3, 0.5, 0.9]:
for start_layer in [1, 3]:
model = ConformerEncoder(
feat_in=10,
n_layers=n_layers,
d_model=4,
feat_out=8,
stochastic_depth_drop_prob=drop_prob,
stochastic_depth_start_layer=start_layer,
)
L = n_layers - start_layer
assert model.layer_drop_probs == [0.0] * start_layer + [drop_prob * l / L for l in range(1, L + 1)]
# uniform mode
for drop_prob in [0.3, 0.5, 0.9]:
model = ConformerEncoder(
feat_in=10,
n_layers=n_layers,
d_model=4,
feat_out=8,
stochastic_depth_drop_prob=drop_prob,
stochastic_depth_mode="uniform",
stochastic_depth_start_layer=start_layer,
)
L = n_layers - start_layer
assert model.layer_drop_probs == [0.0] * start_layer + [drop_prob] * L
# checking for errors
for drop_prob in [-1.0, 1.0]:
with pytest.raises(ValueError, match="stochastic_depth_drop_prob has to be in"):
ConformerEncoder(
feat_in=10,
n_layers=n_layers,
d_model=4,
feat_out=8,
stochastic_depth_drop_prob=drop_prob,
stochastic_depth_mode="uniform",
)
with pytest.raises(ValueError, match="stochastic_depth_mode has to be one of"):
ConformerEncoder(feat_in=10, n_layers=n_layers, d_model=4, feat_out=8, stochastic_depth_mode="weird")
for start_layer in [-1, 0, 5]:
with pytest.raises(ValueError, match="stochastic_depth_start_layer has to be in"):
ConformerEncoder(
feat_in=10, n_layers=n_layers, d_model=4, feat_out=8, stochastic_depth_start_layer=start_layer,
)
def test_stochastic_depth_forward(self):
"""Testing that forward works and we get randomness during training, but not during eval."""
random_input = torch.rand((1, 2, 2))
random_length = torch.tensor([2, 2], dtype=torch.int64)
model = ConformerEncoder(
feat_in=2,
n_layers=3,
d_model=4,
feat_out=4,
stochastic_depth_drop_prob=0.8,
dropout=0.0,
dropout_pre_encoder=0.0,
dropout_emb=0.0,
conv_norm_type="layer_norm",
conv_kernel_size=3,
)
model.train()
outputs = [None] * 5
for i in range(5):
outputs[i] = model(audio_signal=random_input, length=random_length)[0]
# checking that not all outputs are the same
num_diff = 0
for i in range(1, 5):
if not torch.allclose(outputs[i], outputs[0]):
num_diff += 1
assert num_diff > 0
model.eval()
outputs = [None] * 5
for i in range(5):
outputs[i] = model(audio_signal=random_input, length=random_length)[0]
# checking that not all outputs are the same
num_diff = 0
for i in range(1, 5):
if not torch.allclose(outputs[i], outputs[0]):
num_diff += 1
assert num_diff == 0
| NeMo-main | tests/collections/asr/test_conformer_encoder.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import pytest
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.data import audio_to_label
from nemo.collections.asr.models import EncDecClassificationModel, EncDecFrameClassificationModel, configs
from nemo.utils.config_utils import assert_dataclass_signature_match
@pytest.fixture()
def speech_classification_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 32,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoderClassification',
'params': {'feat_in': 32, 'num_classes': 30,},
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'labels': ListConfig(["dummy_cls_{}".format(i + 1) for i in range(30)]),
}
)
model = EncDecClassificationModel(cfg=modelConfig)
return model
@pytest.fixture()
def frame_classification_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 32,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.common.parts.MultiLayerPerceptron',
'params': {'hidden_size': 32, 'num_classes': 5,},
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'labels': ListConfig(["dummy_cls_{}".format(i + 1) for i in range(5)]),
}
)
model = EncDecFrameClassificationModel(cfg=modelConfig)
return model
class TestEncDecClassificationModel:
@pytest.mark.unit
def test_constructor(self, speech_classification_model):
asr_model = speech_classification_model.train()
conv_cnt = (64 * 32 * 1 + 32) + (64 * 1 * 1 + 32) # separable kernel + bias + pointwise kernel + bias
bn_cnt = (4 * 32) * 2 # 2 * moving averages
dec_cnt = 32 * 30 + 30 # fc + bias
param_count = conv_cnt + bn_cnt + dec_cnt
assert asr_model.num_weights == param_count
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecClassificationModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecClassificationModel)
@pytest.mark.unit
def test_forward(self, speech_classification_model):
asr_model = speech_classification_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins = asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
logprobs_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch = asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logprobs_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.unit
def test_vocab_change(self, speech_classification_model):
asr_model = speech_classification_model.train()
old_labels = copy.deepcopy(asr_model._cfg.labels)
nw1 = asr_model.num_weights
asr_model.change_labels(new_labels=old_labels)
# No change
assert nw1 == asr_model.num_weights
new_labels = copy.deepcopy(old_labels)
new_labels.append('dummy_cls_31')
new_labels.append('dummy_cls_32')
new_labels.append('dummy_cls_33')
asr_model.change_labels(new_labels=new_labels)
# fully connected + bias
assert asr_model.num_weights == nw1 + 3 * (asr_model.decoder._feat_in + 1)
@pytest.mark.unit
def test_transcription(self, speech_classification_model, test_data_dir):
# Ground truth labels = ["yes", "no"]
audio_filenames = ['an22-flrp-b.wav', 'an90-fbbh-b.wav']
audio_paths = [os.path.join(test_data_dir, "asr", "train", "an4", "wav", fp) for fp in audio_filenames]
model = speech_classification_model.eval()
# Test Top 1 classification transcription
results = model.transcribe(audio_paths, batch_size=2)
assert len(results) == 2
assert results[0].shape == torch.Size([1])
# Test Top 5 classification transcription
model._accuracy.top_k = [5] # set top k to 5 for accuracy calculation
results = model.transcribe(audio_paths, batch_size=2)
assert len(results) == 2
assert results[0].shape == torch.Size([5])
# Test Top 1 and Top 5 classification transcription
model._accuracy.top_k = [1, 5]
results = model.transcribe(audio_paths, batch_size=2)
assert len(results) == 2
assert results[0].shape == torch.Size([2, 1])
assert results[1].shape == torch.Size([2, 5])
assert model._accuracy.top_k == [1, 5]
# Test log probs extraction
model._accuracy.top_k = [1]
results = model.transcribe(audio_paths, batch_size=2, logprobs=True)
assert len(results) == 2
assert results[0].shape == torch.Size([len(model.cfg.labels)])
# Test log probs extraction remains same for any top_k
model._accuracy.top_k = [5]
results = model.transcribe(audio_paths, batch_size=2, logprobs=True)
assert len(results) == 2
assert results[0].shape == torch.Size([len(model.cfg.labels)])
@pytest.mark.unit
def test_EncDecClassificationDatasetConfig_for_AudioToSpeechLabelDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'tarred_audio_filepaths',
'shuffle',
'pin_memory',
'drop_last',
'tarred_shard_strategy',
'shuffle_n',
# `featurizer` is supplied at runtime
'featurizer',
# additional ignored arguments
'vad_stream',
'int_values',
'sample_rate',
'normalize_audio',
'augmentor',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
]
REMAP_ARGS = {'trim_silence': 'trim'}
result = assert_dataclass_signature_match(
audio_to_label.AudioToSpeechLabelDataset,
configs.EncDecClassificationDatasetConfig,
ignore_args=IGNORE_ARGS,
remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
class TestEncDecFrameClassificationModel(TestEncDecClassificationModel):
@pytest.mark.parametrize(["logits_len", "labels_len"], [(20, 10), (21, 10), (19, 10), (20, 9), (20, 11)])
@pytest.mark.unit
def test_reshape_labels(self, frame_classification_model, logits_len, labels_len):
model = frame_classification_model.eval()
logits = torch.ones(4, logits_len, 2)
labels = torch.ones(4, labels_len)
logits_len = torch.tensor([6, 7, 8, 9])
labels_len = torch.tensor([5, 6, 7, 8])
labels_new, labels_len_new = model.reshape_labels(
logits=logits, labels=labels, logits_len=logits_len, labels_len=labels_len
)
assert labels_new.size(1) == logits.size(1)
assert torch.equal(labels_len_new, torch.tensor([6, 7, 8, 9]))
@pytest.mark.unit
def test_EncDecClassificationDatasetConfig_for_AudioToMultiSpeechLabelDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'tarred_audio_filepaths',
'shuffle',
'pin_memory',
'drop_last',
'tarred_shard_strategy',
'shuffle_n',
# `featurizer` is supplied at runtime
'featurizer',
# additional ignored arguments
'vad_stream',
'int_values',
'sample_rate',
'normalize_audio',
'augmentor',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
'delimiter',
'normalize_audio_db',
'normalize_audio_db_target',
'window_length_in_sec',
'shift_length_in_sec',
]
REMAP_ARGS = {'trim_silence': 'trim'}
result = assert_dataclass_signature_match(
audio_to_label.AudioToMultiLabelDataset,
configs.EncDecClassificationDatasetConfig,
ignore_args=IGNORE_ARGS,
remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
| NeMo-main | tests/collections/asr/test_asr_classification_model.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import os
import shutil
import tempfile
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.data import audio_to_text
from nemo.collections.asr.metrics.wer_bpe import CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models import configs
from nemo.collections.asr.models.ctc_bpe_models import EncDecCTCModelBPE
from nemo.collections.common import tokenizers
from nemo.utils.config_utils import assert_dataclass_signature_match
@pytest.fixture()
def asr_model(test_data_dir):
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 1024,
'num_classes': -1,
'vocabulary': None,
}
tokenizer = {'dir': os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128"), 'type': 'wpe'}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'tokenizer': DictConfig(tokenizer),
}
)
model_instance = EncDecCTCModelBPE(cfg=modelConfig)
return model_instance
class TestEncDecCTCModel:
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor(self, asr_model):
asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecCTCModelBPE.from_config_dict(confdict)
assert isinstance(instance2, EncDecCTCModelBPE)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_forward(self, asr_model):
asr_model = asr_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _, _ = asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
print(len(logprobs_ins))
logprobs_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _, _ = asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logprobs_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_save_restore_artifact(self, asr_model):
with tempfile.TemporaryDirectory() as tmpdir:
save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')
asr_model.train()
asr_model.save_to(save_path)
new_model = EncDecCTCModelBPE.restore_from(save_path)
assert isinstance(new_model, type(asr_model))
assert new_model.vocab_path.endswith('_vocab.txt')
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_save_restore_artifact_spe(self, asr_model, test_data_dir):
with tempfile.TemporaryDirectory() as tmpdir:
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
asr_model.change_vocabulary(new_tokenizer_dir=tokenizer_dir, new_tokenizer_type='bpe')
save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')
asr_model.train()
asr_model.save_to(save_path)
new_model = EncDecCTCModelBPE.restore_from(save_path)
assert isinstance(new_model, type(asr_model))
assert isinstance(new_model.tokenizer, tokenizers.SentencePieceTokenizer)
assert new_model.model_path.endswith('_tokenizer.model')
assert new_model.vocab_path.endswith('_vocab.txt')
assert new_model.spe_vocab_path.endswith('_tokenizer.vocab')
assert new_model.tokenizer.tokenizer.vocab_size == 128
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_save_restore_artifact_agg(self, asr_model, test_data_dir):
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
tok_en = {"dir": tokenizer_dir, "type": "wpe"}
# the below is really an english tokenizer but we pretend it is spanish
tok_es = {"dir": tokenizer_dir, "type": "wpe"}
tcfg = DictConfig({"type": "agg", "langs": {"en": tok_en, "es": tok_es}})
with tempfile.TemporaryDirectory() as tmpdir:
asr_model.change_vocabulary(new_tokenizer_dir=tcfg, new_tokenizer_type="agg")
save_path = os.path.join(tmpdir, "ctc_agg.nemo")
asr_model.train()
asr_model.save_to(save_path)
new_model = EncDecCTCModelBPE.restore_from(save_path)
assert isinstance(new_model, type(asr_model))
assert isinstance(new_model.tokenizer, tokenizers.AggregateTokenizer)
# should be double
assert new_model.tokenizer.tokenizer.vocab_size == 254
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 254
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_vocab_change(self, test_data_dir, asr_model):
old_vocab = copy.deepcopy(asr_model.decoder.vocabulary)
with tempfile.TemporaryDirectory() as save_dir:
save_path = os.path.join(save_dir, 'temp.nemo')
with tempfile.TemporaryDirectory() as tmpdir:
old_tmpdir_path = tmpdir
old_tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
new_tokenizer_dir = os.path.join(tmpdir, 'tokenizer')
os.makedirs(new_tokenizer_dir, exist_ok=True)
shutil.copy2(old_tokenizer_dir, new_tokenizer_dir)
nw1 = asr_model.num_weights
asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# No change
assert nw1 == asr_model.num_weights
with open(os.path.join(new_tokenizer_dir, 'vocab.txt'), 'a+') as f:
f.write("!\n")
f.write('$\n')
f.write('@\n')
asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# fully connected + bias
assert asr_model.num_weights == nw1 + 3 * (asr_model.decoder._feat_in + 1)
new_vocab = copy.deepcopy(asr_model.decoder.vocabulary)
assert len(old_vocab) != len(new_vocab)
# save the model (after change of vocabulary)
asr_model.save_to(save_path)
assert os.path.exists(save_path)
# delete copied version of the vocabulary from nested tmpdir (by scope)
# assert copied vocab no longer exists
assert not os.path.exists(os.path.join(old_tmpdir_path, 'tokenizer', 'vocab.txt'))
# make a copy of the tokenizer before renaming
try:
os.rename(old_tokenizer_dir, old_tokenizer_dir + '.bkp')
assert not os.path.exists(old_tokenizer_dir)
# restore model from .nemo
asr_model2 = EncDecCTCModelBPE.restore_from(save_path)
assert isinstance(asr_model2, EncDecCTCModelBPE)
# Check if vocabulary size is same
assert asr_model.tokenizer.tokenizer.vocab_size == asr_model2.tokenizer.tokenizer.vocab_size
# Make a copy of the tokenizer
new_tokenizer_dir = os.path.join(save_dir, 'tokenizer')
os.makedirs(new_tokenizer_dir, exist_ok=True)
new_tokenizer_path = os.path.join(new_tokenizer_dir, 'vocab.txt')
with open(new_tokenizer_path, 'w') as f:
for v in asr_model2.tokenizer.tokenizer.get_vocab():
f.write(f"{v}\n")
# Add some new tokens too
f.write("^\n")
f.write("^^\n")
f.write("^^^\n")
assert os.path.exists(new_tokenizer_path)
# change vocabulary
asr_model2.change_vocabulary(new_tokenizer_dir, new_tokenizer_type='wpe')
assert asr_model.tokenizer.vocab_size != asr_model2.tokenizer.vocab_size
new_save_path = os.path.join(save_dir, 'temp2.nemo')
asr_model2.save_to(new_save_path)
asr_model3 = EncDecCTCModelBPE.restore_from(new_save_path)
assert isinstance(asr_model3, EncDecCTCModelBPE)
# Check if vocabulary size is same
assert asr_model2.tokenizer.tokenizer.vocab_size == asr_model3.tokenizer.tokenizer.vocab_size
assert asr_model2.vocab_path != asr_model3.vocab_path
# Model PT level checks
assert len(asr_model2.artifacts) == 1
finally:
os.rename(old_tokenizer_dir + '.bkp', old_tokenizer_dir)
@pytest.mark.unit
def test_decoding_change(self, asr_model):
assert asr_model.decoding is not None
assert isinstance(asr_model.decoding, CTCBPEDecoding)
assert asr_model.decoding.cfg.strategy == "greedy"
assert asr_model.decoding.preserve_alignments is False
assert asr_model.decoding.compute_timestamps is False
cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)
asr_model.change_decoding_strategy(cfg)
assert asr_model.decoding.preserve_alignments is True
assert asr_model.decoding.compute_timestamps is True
@pytest.mark.unit
def test_ASRDatasetConfig_for_AudioToBPEDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'tarred_audio_filepaths',
'shuffle',
'pin_memory',
'drop_last',
'tarred_shard_strategy',
'shard_manifests',
'shuffle_n',
'parser',
'normalize',
'unk_index',
'pad_id',
'bos_id',
'eos_id',
'blank_index',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
'channel_selector',
]
REMAP_ARGS = {'trim_silence': 'trim', 'labels': 'tokenizer'}
result = assert_dataclass_signature_match(
audio_to_text.AudioToBPEDataset, configs.ASRDatasetConfig, ignore_args=IGNORE_ARGS, remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_ASRDatasetConfig_for_TarredAudioToBPEDataset(self):
# ignore some additional arguments as dataclass is generic
IGNORE_ARGS = [
'is_tarred',
'num_workers',
'batch_size',
'shuffle',
'pin_memory',
'drop_last',
'parser',
'normalize',
'unk_index',
'pad_id',
'bos_id',
'eos_id',
'blank_index',
'global_rank',
'world_size',
'bucketing_batch_size',
'bucketing_strategy',
'bucketing_weights',
'max_utts',
]
REMAP_ARGS = {
'trim_silence': 'trim',
'tarred_audio_filepaths': 'audio_tar_filepaths',
'tarred_shard_strategy': 'shard_strategy',
'shuffle_n': 'shuffle',
'labels': 'tokenizer',
}
result = assert_dataclass_signature_match(
audio_to_text.TarredAudioToBPEDataset,
configs.ASRDatasetConfig,
ignore_args=IGNORE_ARGS,
remap_args=REMAP_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
| NeMo-main | tests/collections/asr/test_asr_ctc_encoder_model_bpe.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
from typing import List, Type, Union
import numpy as np
import pytest
import soundfile as sf
from nemo.collections.asr.parts.preprocessing.perturb import NoisePerturbation, SilencePerturbation
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.audio_utils import select_channels
class TestAudioSegment:
sample_rate = 16000
signal_duration_sec = 2
max_diff_tol = 1e-9
@property
def num_samples(self):
return self.sample_rate * self.signal_duration_sec
@pytest.mark.unit
@pytest.mark.parametrize("num_channels", [1, 4])
@pytest.mark.parametrize("channel_selector", [None, 'average', 0, 1, [0, 1]])
def test_init_single_channel(self, num_channels: int, channel_selector: Type[Union[str, int, List[int]]]):
"""Test the constructor directly.
"""
if num_channels == 1:
# samples is a one-dimensional vector for single-channel signal
samples = np.random.rand(self.num_samples)
else:
samples = np.random.rand(self.num_samples, num_channels)
if (isinstance(channel_selector, int) and channel_selector >= num_channels) or (
isinstance(channel_selector, list) and max(channel_selector) >= num_channels
):
# Expect a failure if looking for a different channel when input is 1D
with pytest.raises(ValueError):
# Construct UUT
uut = AudioSegment(samples=samples, sample_rate=self.sample_rate, channel_selector=channel_selector)
else:
# Construct UUT
uut = AudioSegment(samples=samples, sample_rate=self.sample_rate, channel_selector=channel_selector)
# Create golden reference
# Note: AudioSegment converts input samples to float32
golden_samples = select_channels(samples.astype('float32'), channel_selector)
expected_num_channels = 1 if golden_samples.ndim == 1 else golden_samples.shape[1]
# Test UUT
assert uut.num_channels == expected_num_channels
assert uut.num_samples == self.num_samples
assert uut.sample_rate == self.sample_rate
assert uut.duration == self.signal_duration_sec
max_diff = np.max(np.abs(uut.samples - golden_samples))
assert max_diff < self.max_diff_tol
# Test zero padding
pad_length = 42
uut.pad(pad_length, symmetric=False)
# compare to golden references
assert uut.num_samples == self.num_samples + pad_length
assert np.all(uut.samples[-pad_length:] == 0.0)
max_diff = np.max(np.abs(uut.samples[:-pad_length] - golden_samples))
assert max_diff < self.max_diff_tol
# Test subsegment
start_time = 0.2 * self.signal_duration_sec
end_time = 0.5 * self.signal_duration_sec
uut.subsegment(start_time=start_time, end_time=end_time)
# compare to golden references
start_sample = int(round(start_time * self.sample_rate))
end_sample = int(round(end_time * self.sample_rate))
max_diff = np.max(np.abs(uut.samples - golden_samples[start_sample:end_sample]))
assert max_diff < self.max_diff_tol
@pytest.mark.unit
@pytest.mark.parametrize("num_channels", [1, 4])
@pytest.mark.parametrize("channel_selector", [None, 'average', 0])
def test_from_file(self, num_channels, channel_selector):
"""Test loading a signal from a file.
"""
with tempfile.TemporaryDirectory() as test_dir:
# Prepare a wav file
audio_file = os.path.join(test_dir, 'audio.wav')
if num_channels == 1:
# samples is a one-dimensional vector for single-channel signal
samples = np.random.rand(self.num_samples)
else:
samples = np.random.rand(self.num_samples, num_channels)
sf.write(audio_file, samples, self.sample_rate, 'float')
# Create UUT
uut = AudioSegment.from_file(audio_file, channel_selector=channel_selector)
# Create golden reference
# Note: AudioSegment converts input samples to float32
golden_samples = select_channels(samples.astype('float32'), channel_selector)
expected_num_channels = 1 if golden_samples.ndim == 1 else golden_samples.shape[1]
# Test UUT
assert uut.num_channels == expected_num_channels
assert uut.num_samples == self.num_samples
assert uut.sample_rate == self.sample_rate
assert uut.duration == self.signal_duration_sec
max_diff = np.max(np.abs(uut.samples - golden_samples))
assert max_diff < self.max_diff_tol
@pytest.mark.unit
@pytest.mark.parametrize("data_channels", [1, 4])
@pytest.mark.parametrize("noise_channels", [1, 4])
def test_noise_perturb_channels(self, data_channels, noise_channels):
"""Test loading a signal from a file.
"""
with tempfile.TemporaryDirectory() as test_dir:
# Prepare a wav file
audio_file = os.path.join(test_dir, 'audio.wav')
if data_channels == 1:
# samples is a one-dimensional vector for single-channel signal
samples = np.random.rand(self.num_samples)
else:
samples = np.random.rand(self.num_samples, data_channels)
sf.write(audio_file, samples, self.sample_rate, 'float')
noise_file = os.path.join(test_dir, 'noise.wav')
if noise_channels == 1:
# samples is a one-dimensional vector for single-channel signal
samples = np.random.rand(self.num_samples)
else:
samples = np.random.rand(self.num_samples, noise_channels)
sf.write(noise_file, samples, self.sample_rate, 'float')
manifest_file = os.path.join(test_dir, 'noise_manifest.json')
with open(manifest_file, 'w') as fout:
item = {'audio_filepath': os.path.abspath(noise_file), 'label': '-', 'duration': 0.1, 'offset': 0.0}
fout.write(f'{json.dumps(item)}\n')
perturber = NoisePerturbation(manifest_file)
audio = AudioSegment.from_file(audio_file)
noise = AudioSegment.from_file(noise_file)
if data_channels == noise_channels:
try:
_ = perturber.perturb_with_input_noise(audio, noise, ref_mic=0)
except ValueError as e:
assert False, "perturb_with_input_noise failed with ref_mic=0"
with pytest.raises(ValueError):
_ = perturber.perturb_with_input_noise(audio, noise, ref_mic=data_channels)
try:
_ = perturber.perturb_with_foreground_noise(audio, noise, ref_mic=0)
except ValueError as e:
assert False, "perturb_with_foreground_noise failed with ref_mic=0"
with pytest.raises(ValueError):
_ = perturber.perturb_with_foreground_noise(audio, noise, ref_mic=data_channels)
else:
with pytest.raises(ValueError):
_ = perturber.perturb_with_input_noise(audio, noise)
with pytest.raises(ValueError):
_ = perturber.perturb_with_foreground_noise(audio, noise)
def test_silence_perturb(self):
"""Test loading a signal from a file and apply silence perturbation
"""
with tempfile.TemporaryDirectory() as test_dir:
# Prepare a wav file
audio_file = os.path.join(test_dir, 'audio.wav')
# samples is a one-dimensional vector for single-channel signal
samples = np.random.rand(self.num_samples)
sf.write(audio_file, samples, self.sample_rate, 'float')
dur = 2
perturber = SilencePerturbation(
min_start_silence_secs=dur,
max_start_silence_secs=dur,
min_end_silence_secs=dur,
max_end_silence_secs=dur,
)
audio = AudioSegment.from_file(audio_file)
ori_audio_len = len(audio._samples)
_ = perturber.perturb(audio)
assert len(audio._samples) == ori_audio_len + 2 * dur * self.sample_rate
| NeMo-main | tests/collections/asr/test_preprocessing_segment.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
from typing import Optional
import numpy as np
import pytest
import torch
from nemo.collections.asr.modules.audio_modules import (
MaskBasedDereverbWPE,
MaskReferenceChannel,
SpectrogramToMultichannelFeatures,
WPEFilter,
)
from nemo.collections.asr.modules.audio_preprocessing import AudioToSpectrogram
from nemo.collections.asr.parts.utils.audio_utils import convmtx_mc_numpy
try:
importlib.import_module('torchaudio')
HAVE_TORCHAUDIO = True
except ModuleNotFoundError:
HAVE_TORCHAUDIO = False
class TestSpectrogramToMultichannelFeatures:
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [256])
@pytest.mark.parametrize('num_channels', [1, 4])
@pytest.mark.parametrize('mag_reduction', [None, 'rms', 'abs_mean', 'mean_abs'])
def test_magnitude(self, fft_length: int, num_channels: int, mag_reduction: Optional[str]):
"""Test calculation of spatial features for multi-channel audio.
"""
atol = 1e-6
batch_size = 8
num_samples = fft_length * 50
num_examples = 25
random_seed = 42
_rng = np.random.default_rng(seed=random_seed)
hop_length = fft_length // 4
audio2spec = AudioToSpectrogram(fft_length=fft_length, hop_length=hop_length)
spec2feat = SpectrogramToMultichannelFeatures(
num_subbands=audio2spec.num_subbands, mag_reduction=mag_reduction, use_ipd=False, mag_normalization=None,
)
for n in range(num_examples):
x = _rng.normal(size=(batch_size, num_channels, num_samples))
spec, spec_len = audio2spec(input=torch.Tensor(x), input_length=torch.Tensor([num_samples] * batch_size))
# UUT output
feat, _ = spec2feat(input=spec, input_length=spec_len)
feat_np = feat.cpu().detach().numpy()
# Golden output
spec_np = spec.cpu().detach().numpy()
if mag_reduction is None:
feat_golden = np.abs(spec_np)
elif mag_reduction == 'rms':
feat_golden = np.sqrt(np.mean(np.abs(spec_np) ** 2, axis=1, keepdims=True))
elif mag_reduction == 'mean_abs':
feat_golden = np.mean(np.abs(spec_np), axis=1, keepdims=True)
elif mag_reduction == 'abs_mean':
feat_golden = np.abs(np.mean(spec_np, axis=1, keepdims=True))
else:
raise NotImplementedError()
# Compare shape
assert feat_np.shape == feat_golden.shape, f'Feature shape not matching for example {n}'
# Compare values
assert np.allclose(feat_np, feat_golden, atol=atol), f'Features not matching for example {n}'
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [256])
@pytest.mark.parametrize('num_channels', [1, 4])
def test_ipd(self, fft_length: int, num_channels: int):
"""Test calculation of IPD spatial features for multi-channel audio.
"""
atol = 1e-5
batch_size = 8
num_samples = fft_length * 50
num_examples = 10
random_seed = 42
_rng = np.random.default_rng(seed=random_seed)
hop_length = fft_length // 4
audio2spec = AudioToSpectrogram(fft_length=fft_length, hop_length=hop_length)
spec2feat = SpectrogramToMultichannelFeatures(
num_subbands=audio2spec.num_subbands,
mag_reduction='rms',
use_ipd=True,
mag_normalization=None,
ipd_normalization=None,
)
for n in range(num_examples):
x = _rng.normal(size=(batch_size, num_channels, num_samples))
spec, spec_len = audio2spec(input=torch.Tensor(x), input_length=torch.Tensor([num_samples] * batch_size))
# UUT output
feat, _ = spec2feat(input=spec, input_length=spec_len)
feat_np = feat.cpu().detach().numpy()
ipd = feat_np[..., audio2spec.num_subbands :, :]
# Golden output
spec_np = spec.cpu().detach().numpy()
spec_mean = np.mean(spec_np, axis=1, keepdims=True)
ipd_golden = np.angle(spec_np) - np.angle(spec_mean)
ipd_golden = np.remainder(ipd_golden + np.pi, 2 * np.pi) - np.pi
# Compare shape
assert ipd.shape == ipd_golden.shape, f'Feature shape not matching for example {n}'
# Compare values
assert np.allclose(ipd, ipd_golden, atol=atol), f'Features not matching for example {n}'
class TestMaskBasedProcessor:
@pytest.mark.unit
@pytest.mark.skipif(not HAVE_TORCHAUDIO, reason="Modules in this test require torchaudio")
@pytest.mark.parametrize('fft_length', [256])
@pytest.mark.parametrize('num_channels', [1, 4])
@pytest.mark.parametrize('num_masks', [1, 2])
def test_mask_reference_channel(self, fft_length: int, num_channels: int, num_masks: int):
"""Test masking of the reference channel.
"""
if num_channels == 1:
# Only one channel available
ref_channels = [0]
else:
# Use first or last channel for MC signals
ref_channels = [0, num_channels - 1]
atol = 1e-6
batch_size = 8
num_samples = fft_length * 50
num_examples = 10
random_seed = 42
_rng = np.random.default_rng(seed=random_seed)
hop_length = fft_length // 4
audio2spec = AudioToSpectrogram(fft_length=fft_length, hop_length=hop_length)
for ref_channel in ref_channels:
mask_processor = MaskReferenceChannel(ref_channel=ref_channel)
for n in range(num_examples):
x = _rng.normal(size=(batch_size, num_channels, num_samples))
spec, spec_len = audio2spec(
input=torch.Tensor(x), input_length=torch.Tensor([num_samples] * batch_size)
)
# Randomly-generated mask
mask = _rng.uniform(
low=0.0, high=1.0, size=(batch_size, num_masks, audio2spec.num_subbands, spec.shape[-1])
)
# UUT output
out, _ = mask_processor(input=spec, input_length=spec_len, mask=torch.tensor(mask))
out_np = out.cpu().detach().numpy()
# Golden output
spec_np = spec.cpu().detach().numpy()
out_golden = np.zeros_like(mask, dtype=spec_np.dtype)
for m in range(num_masks):
out_golden[:, m, ...] = spec_np[:, ref_channel, ...] * mask[:, m, ...]
# Compare shape
assert out_np.shape == out_golden.shape, f'Output shape not matching for example {n}'
# Compare values
assert np.allclose(out_np, out_golden, atol=atol), f'Output not matching for example {n}'
class TestMaskBasedDereverb:
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 3])
@pytest.mark.parametrize('filter_length', [10])
@pytest.mark.parametrize('delay', [0, 5])
def test_wpe_convtensor(self, num_channels: int, filter_length: int, delay: int):
"""Test construction of convolutional tensor in WPE. Compare against
reference implementation convmtx_mc.
"""
atol = 1e-6
random_seed = 42
num_examples = 10
batch_size = 8
num_subbands = 15
num_frames = 21
_rng = np.random.default_rng(seed=random_seed)
input_size = (batch_size, num_channels, num_subbands, num_frames)
for n in range(num_examples):
X = _rng.normal(size=input_size) + 1j * _rng.normal(size=input_size)
# Reference
tilde_X_ref = np.zeros((batch_size, num_subbands, num_frames, num_channels * filter_length), dtype=X.dtype)
for b in range(batch_size):
for f in range(num_subbands):
tilde_X_ref[b, f, :, :] = convmtx_mc_numpy(
X[b, :, f, :].transpose(), filter_length=filter_length, delay=delay
)
# UUT
tilde_X_uut = WPEFilter.convtensor(torch.tensor(X), filter_length=filter_length, delay=delay)
# UUT has vectors arranged in a tensor shape with permuted columns
# Reorganize to match the shape and column permutation
tilde_X_uut = WPEFilter.permute_convtensor(tilde_X_uut)
tilde_X_uut = tilde_X_uut.cpu().detach().numpy()
assert np.allclose(tilde_X_uut, tilde_X_ref, atol=atol), f'Example {n}: comparison failed'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 3])
@pytest.mark.parametrize('filter_length', [10])
@pytest.mark.parametrize('delay', [0, 5])
def test_wpe_filter(self, num_channels: int, filter_length: int, delay: int):
"""Test estimation of correlation matrices, filter and filtering.
"""
atol = 1e-6
random_seed = 42
num_examples = 10
batch_size = 4
num_subbands = 15
num_frames = 50
wpe_filter = WPEFilter(filter_length=filter_length, prediction_delay=delay, diag_reg=None)
_rng = np.random.default_rng(seed=random_seed)
input_size = (batch_size, num_channels, num_subbands, num_frames)
for n in range(num_examples):
X = torch.tensor(_rng.normal(size=input_size) + 1j * _rng.normal(size=input_size))
weight = torch.tensor(_rng.uniform(size=(batch_size, num_subbands, num_frames)))
# Create convtensor (B, C, F, N, filter_length)
tilde_X = wpe_filter.convtensor(X, filter_length=filter_length, delay=delay)
# Test 1:
# estimate_correlation
# Reference
# move channels to back
X_golden = X.permute(0, 2, 3, 1)
# move channels to back and reshape to (B, F, N, C*filter_length)
tilde_X_golden = tilde_X.permute(0, 2, 3, 1, 4).reshape(
batch_size, num_subbands, num_frames, num_channels * filter_length
)
# (B, F, C * filter_length, C * filter_length)
Q_golden = torch.matmul(tilde_X_golden.transpose(-1, -2).conj(), weight[..., None] * tilde_X_golden)
# (B, F, C * filter_length, C)
R_golden = torch.matmul(tilde_X_golden.transpose(-1, -2).conj(), weight[..., None] * X_golden)
# UUT
Q_uut, R_uut = wpe_filter.estimate_correlations(input=X, weight=weight, tilde_input=tilde_X)
# Flatten (B, F, C, filter_length, C, filter_length) into (B, F, C*filter_length, C*filter_length)
Q_uut_flattened = Q_uut.flatten(start_dim=-2, end_dim=-1).flatten(start_dim=-3, end_dim=-2)
# Flatten (B, F, C, filter_length, C, filter_length) into (B, F, C*filter_length, C*filter_length)
R_uut_flattened = R_uut.flatten(start_dim=-3, end_dim=-2)
assert torch.allclose(Q_uut_flattened, Q_golden, atol=atol), f'Example {n}: comparison failed for Q'
assert torch.allclose(R_uut_flattened, R_golden, atol=atol), f'Example {n}: comparison failed for R'
# Test 2:
# estimate_filter
# Reference
G_golden = torch.linalg.solve(Q_golden, R_golden)
# UUT
G_uut = wpe_filter.estimate_filter(Q_uut, R_uut)
# Flatten and move output channels to back
G_uut_flattened = G_uut.reshape(batch_size, num_channels, num_subbands, -1).permute(0, 2, 3, 1)
assert torch.allclose(G_uut_flattened, G_golden, atol=atol), f'Example {n}: comparison failed for G'
# Test 3:
# apply_filter
# Reference
U_golden = torch.matmul(tilde_X_golden, G_golden)
# UUT
U_uut = wpe_filter.apply_filter(filter=G_uut, tilde_input=tilde_X)
U_uut_ref = U_uut.permute(0, 2, 3, 1)
assert torch.allclose(
U_uut_ref, U_golden, atol=atol
), f'Example {n}: comparison failed for undesired output U'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [3])
@pytest.mark.parametrize('filter_length', [5])
@pytest.mark.parametrize('delay', [0, 2])
def test_mask_based_dereverb_init(self, num_channels: int, filter_length: int, delay: int):
"""Test that dereverb can be initialized and can process audio.
"""
num_examples = 10
batch_size = 8
num_subbands = 15
num_frames = 21
num_iterations = 2
input_size = (batch_size, num_subbands, num_frames, num_channels)
dereverb = MaskBasedDereverbWPE(
filter_length=filter_length, prediction_delay=delay, num_iterations=num_iterations
)
for n in range(num_examples):
# multi-channel input
x = torch.randn(input_size) + 1j * torch.randn(input_size)
# random input_length
x_length = torch.randint(1, num_frames, (batch_size,))
# multi-channel mask
mask = torch.rand(input_size)
# UUT
y, y_length = dereverb(input=x, input_length=x_length, mask=mask)
assert y.shape == x.shape, 'Output shape not matching, example {n}'
assert torch.equal(y_length, x_length), 'Length not matching, example {n}'
| NeMo-main | tests/collections/asr/test_audio_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.asr.models import ASRModel
class TestASRSubsamplingConvChunking:
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_forward(self):
asr_model = ASRModel.from_pretrained("stt_en_fastconformer_ctc_large")
asr_model = asr_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
len = 512
input_signal_batch1 = torch.randn(size=(1, len), device=asr_model.device)
length_batch1 = torch.randint(low=161, high=500, size=[1], device=asr_model.device)
input_signal_batch4 = torch.randn(size=(4, len), device=asr_model.device)
length_batch4 = torch.randint(low=161, high=500, size=[4], device=asr_model.device)
with torch.no_grad():
# regular inference
logprobs_batch1_nosplit, _, _ = asr_model.forward(
input_signal=input_signal_batch1, input_signal_length=length_batch1
)
logprobs_batch4_nosplit, _, _ = asr_model.forward(
input_signal=input_signal_batch4, input_signal_length=length_batch4
)
# force chunking to 2
asr_model.change_subsampling_conv_chunking_factor(subsampling_conv_chunking_factor=2)
# chunked inference by channels as batch is 1
logprobs_batch1_split, _, _ = asr_model.forward(
input_signal=input_signal_batch1, input_signal_length=length_batch1
)
# chunked inference by batch as it is 4 [> 1]
logprobs_batch4_split, _, _ = asr_model.forward(
input_signal=input_signal_batch4, input_signal_length=length_batch4
)
diff = torch.mean(torch.abs(logprobs_batch1_split - logprobs_batch1_nosplit))
assert diff <= 1e-6
diff = torch.max(torch.abs(logprobs_batch4_split - logprobs_batch4_nosplit))
assert diff <= 1e-6
| NeMo-main | tests/collections/asr/test_asr_subsampling.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import tempfile
import numpy as np
import pytest
import soundfile as sf
import torch
from nemo.collections.asr.data.audio_to_label import AudioToMultiLabelDataset, TarredAudioToClassificationLabelDataset
from nemo.collections.asr.data.feature_to_label import FeatureToLabelDataset, FeatureToSeqSpeakerLabelDataset
from nemo.collections.asr.parts.preprocessing.feature_loader import ExternalFeatureLoader
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
class TestASRDatasets:
labels = ["fash", "fbbh", "fclc"]
unique_labels_in_seq = ['0', '1', '2', '3', "zero", "one", "two", "three"]
@pytest.mark.unit
def test_tarred_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/tarred_audio_manifest.json'))
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
featurizer = WaveformFeaturizer(sample_rate=16000, int_values=False, augmentor=None)
ds_braceexpand = TarredAudioToClassificationLabelDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, featurizer=featurizer
)
assert len(ds_braceexpand) == 32
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 32
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToClassificationLabelDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, featurizer=featurizer
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 32
@pytest.mark.unit
def test_tarred_dataset_duplicate_name(self, test_data_dir):
manifest_path = os.path.abspath(
os.path.join(test_data_dir, 'asr/tarred_an4/tarred_duplicate_audio_manifest.json')
)
# Test braceexpand loading
tarpath = os.path.abspath(os.path.join(test_data_dir, 'asr/tarred_an4/audio_{0..1}.tar'))
featurizer = WaveformFeaturizer(sample_rate=16000, int_values=False, augmentor=None)
ds_braceexpand = TarredAudioToClassificationLabelDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, featurizer=featurizer
)
assert len(ds_braceexpand) == 6
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 6
# Test loading via list
tarpath = [os.path.abspath(os.path.join(test_data_dir, f'asr/tarred_an4/audio_{i}.tar')) for i in range(2)]
ds_list_load = TarredAudioToClassificationLabelDataset(
audio_tar_filepaths=tarpath, manifest_filepath=manifest_path, labels=self.labels, featurizer=featurizer
)
count = 0
for _ in ds_list_load:
count += 1
assert count == 6
@pytest.mark.unit
def test_feat_seqlabel_dataset(self, test_data_dir):
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/feat/emb.json'))
feature_loader = ExternalFeatureLoader(augmentor=None)
ds_braceexpand = FeatureToSeqSpeakerLabelDataset(
manifest_filepath=manifest_path, labels=self.unique_labels_in_seq, feature_loader=feature_loader
)
# fmt: off
correct_label = torch.tensor(
[0.0, 1.0, 2.0, 2.0, 1.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0, 2.0, 3.0, 1.0, 2.0, 2.0, 2.0, 0.0, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0, 2.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 2.0, 0.0, 2.0, 2.0, 2.0, 1.0, 2.0, 1.0, 1.0, 0.0, 1.0, 1.0, 0.0, 2.0, 1.0, 2.0, 1.0,]
)
# fmt: on
correct_label_length = torch.tensor(50)
assert ds_braceexpand[0][0].shape == (50, 32)
assert torch.equal(ds_braceexpand[0][2], correct_label)
assert torch.equal(ds_braceexpand[0][3], correct_label_length)
count = 0
for _ in ds_braceexpand:
count += 1
assert count == 2
@pytest.mark.unit
def test_feat_label_dataset(self):
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
for i in range(2):
feat_file = os.path.join(tmpdir, f"feat_{i}.pt")
torch.save(torch.randn(80, 5), feat_file)
entry = {'feature_file': feat_file, 'duration': 100000, 'label': '0'}
fp.write(json.dumps(entry) + '\n')
dataset = FeatureToLabelDataset(manifest_filepath=manifest_path, labels=self.unique_labels_in_seq)
correct_label = torch.tensor(self.unique_labels_in_seq.index('0'))
correct_label_length = torch.tensor(1)
assert dataset[0][0].shape == (80, 5)
assert torch.equal(dataset[0][2], correct_label)
assert torch.equal(dataset[0][3], correct_label_length)
count = 0
for _ in dataset:
count += 1
assert count == 2
@pytest.mark.unit
def test_audio_multilabel_dataset(self):
with tempfile.TemporaryDirectory() as tmpdir:
manifest_path = os.path.join(tmpdir, 'manifest_input.json')
with open(manifest_path, 'w', encoding='utf-8') as fp:
for i in range(2):
audio_file = os.path.join(tmpdir, f"audio_{i}.wav")
data = np.random.normal(0, 1, 16000 * 10)
sf.write(audio_file, data, 16000)
entry = {'audio_filepath': audio_file, 'duration': 10, 'label': '0 1 0 1'}
fp.write(json.dumps(entry) + '\n')
dataset = AudioToMultiLabelDataset(manifest_filepath=manifest_path, sample_rate=16000, labels=['0', '1'])
correct_label = torch.tensor([0, 1, 0, 1])
correct_label_length = torch.tensor(4)
assert dataset[0][0].shape == torch.tensor([0.1] * 160000).shape
assert torch.equal(dataset[0][2], correct_label)
assert torch.equal(dataset[0][3], correct_label_length)
count = 0
for _ in dataset:
count += 1
assert count == 2
| NeMo-main | tests/collections/asr/test_label_datasets.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import joblib
import pytest
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, EncDecHybridRNNTCTCModel, EncDecRNNTModel
from nemo.collections.asr.models.confidence_ensemble import ConfidenceEnsembleModel
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig, ConfidenceMeasureConfig
def get_model_config(model_class):
preprocessor_config = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
vocabulary = [' ', "'", 'a', 'b', 'c'] # does not matter, so keeping small
encoder_config = {
'_target_': 'nemo.collections.asr.modules.ConformerEncoder',
'feat_in': 64,
'n_layers': 8,
'd_model': 4,
}
if model_class is EncDecCTCModel:
decoder_config = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': None,
'num_classes': len(vocabulary),
'vocabulary': vocabulary,
}
model_config = DictConfig(
{
'compute_eval_loss': True, # will be ignored by the model
'preprocessor': DictConfig(preprocessor_config),
'encoder': DictConfig(encoder_config),
'decoder': DictConfig(decoder_config),
}
)
else:
decoder_config = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': 4, 'pred_rnn_layers': 1},
}
joint_config = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 4, 'activation': 'relu'},
}
decoding_config = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
loss_config = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
model_config = DictConfig(
{
'compute_eval_loss': True,
'labels': ListConfig(vocabulary),
'preprocessor': DictConfig(preprocessor_config),
'model_defaults': DictConfig({'enc_hidden': 4, 'pred_hidden': 4}),
'encoder': DictConfig(encoder_config),
'decoder': DictConfig(decoder_config),
'joint': DictConfig(joint_config),
'decoding': DictConfig(decoding_config),
'loss': DictConfig(loss_config),
'optim': {'name': 'adamw'},
'aux_ctc': {
'ctc_loss_weight': 0.3,
'use_cer': False,
'ctc_reduction': 'mean_batch',
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': None,
'num_classes': len(vocabulary),
'vocabulary': vocabulary,
},
'decoding': DictConfig(CTCDecodingConfig),
},
}
)
model_config['target'] = f'{model_class.__module__}.{model_class.__name__}'
return model_config
class TestConfidenceEnsembles:
"""Only basic tests that are very fast to run.
There are much more extensive integration tests available in
scripts/confidence_ensembles/test_confidence_ensembles.py
"""
@pytest.mark.unit
@pytest.mark.parametrize(
"model_class0", [EncDecCTCModel, EncDecRNNTModel, EncDecHybridRNNTCTCModel],
)
@pytest.mark.parametrize(
"model_class1", [EncDecCTCModel, EncDecRNNTModel, EncDecHybridRNNTCTCModel],
)
def test_model_creation_2models(self, tmp_path, model_class0, model_class1):
"""Basic test to check that ensemble of 2 models can be created."""
model_config0 = get_model_config(model_class0)
model_config1 = get_model_config(model_class1)
# dummy pickle file for the model selection block
joblib.dump({}, tmp_path / 'dummy.pkl')
# default confidence
confidence_config = ConfidenceConfig(
# we keep frame confidences and apply aggregation manually to get full-utterance confidence
preserve_frame_confidence=True,
exclude_blank=True,
aggregation="mean",
measure_cfg=ConfidenceMeasureConfig(name="entropy", entropy_type="renyi", alpha=0.25, entropy_norm="lin",),
)
# just checking that no errors are raised when creating the model
ConfidenceEnsembleModel(
cfg=DictConfig(
{
'model_selection_block': str(tmp_path / 'dummy.pkl'),
'confidence': confidence_config,
'temperature': 1.0,
'num_models': 2,
'model0': model_config0,
'model1': model_config1,
}
),
trainer=None,
)
def test_model_creation_5models(self, tmp_path):
"""Basic test to check that ensemble of 5 models can be created."""
model_configs = [get_model_config(EncDecCTCModel) for _ in range(5)]
# dummy pickle file for the model selection block
joblib.dump({}, tmp_path / 'dummy.pkl')
# default confidence
confidence_config = ConfidenceConfig(
# we keep frame confidences and apply aggregation manually to get full-utterance confidence
preserve_frame_confidence=True,
exclude_blank=True,
aggregation="mean",
measure_cfg=ConfidenceMeasureConfig(name="entropy", entropy_type="renyi", alpha=0.25, entropy_norm="lin",),
)
# just checking that no errors are raised when creating the model
ConfidenceEnsembleModel(
cfg=DictConfig(
{
'model_selection_block': str(tmp_path / 'dummy.pkl'),
'confidence': confidence_config,
'temperature': 1.0,
'num_models': 2,
'model0': model_configs[0],
'model1': model_configs[1],
'model2': model_configs[2],
'model3': model_configs[3],
'model4': model_configs[4],
}
),
trainer=None,
)
| NeMo-main | tests/collections/asr/test_confidence_ensembles.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from omegaconf import DictConfig
from nemo.collections.asr.models.classification_models import EncDecRegressionModel
@pytest.fixture()
def speech_regression_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 32,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.conv_asr.ConvASRDecoderClassification',
'params': {'feat_in': 32, 'return_logits': True, 'num_classes': 1},
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'labels': None,
'is_regression_task': True,
}
)
model = EncDecRegressionModel(cfg=modelConfig)
return model
class TestEncDecRegressionModel:
@pytest.mark.unit
def test_constructor(self, speech_regression_model):
asr_model = speech_regression_model.train()
conv_cnt = (64 * 32 * 1 + 32) + (64 * 1 * 1 + 32) # separable kernel + bias + pointwise kernel + bias
bn_cnt = (4 * 32) * 2 # 2 * moving averages
dec_cnt = 32 * 1 + 1 # fc + bias
param_count = conv_cnt + bn_cnt + dec_cnt
assert asr_model.num_weights == param_count
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecRegressionModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecRegressionModel)
@pytest.mark.unit
def test_transcription(self, speech_regression_model, test_data_dir):
audio_filenames = ['an22-flrp-b.wav', 'an90-fbbh-b.wav']
audio_paths = [os.path.join(test_data_dir, "asr", "train", "an4", "wav", fp) for fp in audio_filenames]
model = speech_regression_model.eval()
# Test Top 1 classification transcription
results = model.transcribe(audio_paths, batch_size=2)
assert len(results) == 2
| NeMo-main | tests/collections/asr/test_asr_regression_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.metrics.wer_bpe import CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.models.hybrid_rnnt_ctc_bpe_models import EncDecHybridRNNTCTCBPEModel
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.common import tokenizers
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def hybrid_asr_model(test_data_dir):
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
model_defaults = {'enc_hidden': 1024, 'pred_hidden': 64}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1,},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu',},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
tokenizer = {'dir': os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128"), 'type': 'wpe'}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
aux_ctc = {
'ctc_loss_weight': 0.3,
'use_cer': False,
'ctc_reduction': 'mean_batch',
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 1024,
'num_classes': -2,
'vocabulary': None,
},
'decoding': DictConfig(CTCBPEDecodingConfig),
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'tokenizer': DictConfig(tokenizer),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
'aux_ctc': DictConfig(aux_ctc),
}
)
model_instance = EncDecHybridRNNTCTCBPEModel(cfg=modelConfig)
return model_instance
class TestEncDecHybridRNNTCTCBPEModel:
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor(self, hybrid_asr_model):
hybrid_asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = hybrid_asr_model.to_config_dict()
instance2 = EncDecHybridRNNTCTCBPEModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecHybridRNNTCTCBPEModel)
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_forward(self, hybrid_asr_model):
hybrid_asr_model = hybrid_asr_model.eval()
hybrid_asr_model.preprocessor.featurizer.dither = 0.0
hybrid_asr_model.preprocessor.featurizer.pad_to = 0
hybrid_asr_model.compute_eval_loss = False
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _ = hybrid_asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
logits_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _ = hybrid_asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logits_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logits_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logits_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_save_restore_artifact(self, hybrid_asr_model):
hybrid_asr_model.train()
with tempfile.TemporaryDirectory() as tmp_dir:
path = os.path.join(tmp_dir, 'rnnt_bpe.nemo')
hybrid_asr_model.save_to(path)
new_model = EncDecHybridRNNTCTCBPEModel.restore_from(path)
assert isinstance(new_model, type(hybrid_asr_model))
assert new_model.vocab_path.endswith('_vocab.txt')
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_save_restore_artifact_spe(self, hybrid_asr_model, test_data_dir):
hybrid_asr_model.train()
with tempfile.TemporaryDirectory() as tmpdir:
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
hybrid_asr_model.change_vocabulary(new_tokenizer_dir=tokenizer_dir, new_tokenizer_type='bpe')
save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')
hybrid_asr_model.train()
hybrid_asr_model.save_to(save_path)
new_model = EncDecHybridRNNTCTCBPEModel.restore_from(save_path)
assert isinstance(new_model, type(hybrid_asr_model))
assert isinstance(new_model.tokenizer, tokenizers.SentencePieceTokenizer)
assert new_model.model_path.endswith('_tokenizer.model')
assert new_model.vocab_path.endswith('_vocab.txt')
assert new_model.spe_vocab_path.endswith('_tokenizer.vocab')
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_save_restore_artifact_agg(self, hybrid_asr_model, test_data_dir):
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
tok_en = {"dir": tokenizer_dir, "type": "wpe"}
# the below is really an english tokenizer but we pretend it is spanish
tok_es = {"dir": tokenizer_dir, "type": "wpe"}
tcfg = DictConfig({"type": "agg", "langs": {"en": tok_en, "es": tok_es}})
with tempfile.TemporaryDirectory() as tmpdir:
hybrid_asr_model.change_vocabulary(new_tokenizer_dir=tcfg, new_tokenizer_type="agg")
save_path = os.path.join(tmpdir, "ctc_agg.nemo")
hybrid_asr_model.train()
hybrid_asr_model.save_to(save_path)
new_model = EncDecHybridRNNTCTCBPEModel.restore_from(save_path)
assert isinstance(new_model, type(hybrid_asr_model))
assert isinstance(new_model.tokenizer, tokenizers.AggregateTokenizer)
# should be double
assert new_model.tokenizer.tokenizer.vocab_size == 254
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 254
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_vocab_change(self, test_data_dir, hybrid_asr_model):
with tempfile.TemporaryDirectory() as tmpdir:
old_tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
new_tokenizer_dir = os.path.join(tmpdir, 'tokenizer')
os.makedirs(new_tokenizer_dir, exist_ok=True)
shutil.copy2(old_tokenizer_dir, new_tokenizer_dir)
nw1 = hybrid_asr_model.num_weights
hybrid_asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# No change
assert nw1 == hybrid_asr_model.num_weights
with open(os.path.join(new_tokenizer_dir, 'vocab.txt'), 'a+') as f:
f.write("!\n")
f.write('$\n')
f.write('@\n')
hybrid_asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# rnn embedding + joint + bias
pred_embedding = 3 * (hybrid_asr_model.decoder.pred_hidden)
joint_joint = 3 * (hybrid_asr_model.joint.joint_hidden + 1)
ctc_decoder = 3 * (hybrid_asr_model.ctc_decoder._feat_in + 1)
assert hybrid_asr_model.num_weights == (nw1 + (pred_embedding + joint_joint) + ctc_decoder)
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_decoding_change(self, hybrid_asr_model):
assert isinstance(hybrid_asr_model.decoding.decoding, greedy_decode.GreedyBatchedRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'greedy'
new_strategy.greedy = DictConfig({'max_symbols': 10})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, greedy_decode.GreedyRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 1})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'tsd'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "tsd"
new_strategy = DictConfig({})
new_strategy.strategy = 'alsd'
new_strategy.beam = DictConfig({'beam_size': 2})
hybrid_asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(hybrid_asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert hybrid_asr_model.decoding.decoding.search_type == "alsd"
assert hybrid_asr_model.ctc_decoding is not None
assert isinstance(hybrid_asr_model.ctc_decoding, CTCBPEDecoding)
assert hybrid_asr_model.ctc_decoding.cfg.strategy == "greedy"
assert hybrid_asr_model.ctc_decoding.preserve_alignments is False
assert hybrid_asr_model.ctc_decoding.compute_timestamps is False
cfg = CTCBPEDecodingConfig(preserve_alignments=True, compute_timestamps=True)
hybrid_asr_model.change_decoding_strategy(cfg, decoder_type="ctc")
assert hybrid_asr_model.ctc_decoding.preserve_alignments is True
assert hybrid_asr_model.ctc_decoding.compute_timestamps is True
assert hybrid_asr_model.cur_decoder == "ctc"
| NeMo-main | tests/collections/asr/test_asr_hybrid_rnnt_ctc_model_bpe.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from nemo.collections.asr.losses.audio_losses import (
SDRLoss,
calculate_sdr_batch,
convolution_invariant_target,
scale_invariant_target,
)
from nemo.collections.asr.parts.utils.audio_utils import (
calculate_sdr_numpy,
convolution_invariant_target_numpy,
scale_invariant_target_numpy,
)
class TestAudioLosses:
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr(self, num_channels: int):
"""Test SDR calculation
"""
test_eps = [0, 1e-16, 1e-1]
batch_size = 8
num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
for remove_mean in [True, False]:
for eps in test_eps:
sdr_loss = SDRLoss(eps=eps, remove_mean=remove_mean)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.01, high=1) * _rng.normal(size=(batch_size, num_channels, num_samples))
# Estimate
estimate = target + noise
# DC bias for both
target += _rng.uniform(low=-1, high=1)
estimate += _rng.uniform(low=-1, high=1)
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
# Reference SDR
golden_sdr = np.zeros((batch_size, num_channels))
for b in range(batch_size):
for m in range(num_channels):
golden_sdr[b, m] = calculate_sdr_numpy(
estimate=estimate[b, m, :], target=target[b, m, :], remove_mean=remove_mean, eps=eps,
)
# Calculate SDR in torch
uut_sdr = calculate_sdr_batch(
estimate=tensor_estimate, target=tensor_target, remove_mean=remove_mean, eps=eps,
)
# Calculate SDR loss
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target)
# Compare torch SDR vs numpy
assert np.allclose(
uut_sdr.cpu().detach().numpy(), golden_sdr, atol=atol
), f'SDR not matching for example {n}, eps={eps}, remove_mean={remove_mean}'
# Compare SDR loss vs average of torch SDR
assert np.isclose(
uut_sdr_loss, -uut_sdr.mean(), atol=atol
), f'SDRLoss not matching for example {n}, eps={eps}, remove_mean={remove_mean}'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr_weighted(self, num_channels: int):
"""Test SDR calculation with weighting for channels
"""
batch_size = 8
num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
channel_weight = _rng.uniform(low=0.01, high=1.0, size=num_channels)
channel_weight = channel_weight / np.sum(channel_weight)
sdr_loss = SDRLoss(weight=channel_weight)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
# Reference SDR
golden_sdr = 0
for b in range(batch_size):
sdr = [
calculate_sdr_numpy(estimate=estimate[b, m, :], target=target[b, m, :])
for m in range(num_channels)
]
# weighted sum
sdr = np.sum(np.array(sdr) * channel_weight)
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Calculate SDR
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target)
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr_input_length(self, num_channels):
"""Test SDR calculation with input length.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
sdr_loss = SDRLoss()
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to random input_length samples
input_length = _rng.integers(low=1, high=max_num_samples, size=batch_size)
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
tensor_input_length = torch.tensor(input_length)
# Reference SDR
golden_sdr = 0
for b, b_len in enumerate(input_length):
sdr = [
calculate_sdr_numpy(estimate=estimate[b, m, :b_len], target=target[b, m, :b_len])
for m in range(num_channels)
]
sdr = np.mean(np.array(sdr))
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Calculate SDR
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target, input_length=tensor_input_length)
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr_scale_invariant(self, num_channels: int):
"""Test SDR calculation with scale invariant option.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
sdr_loss = SDRLoss(scale_invariant=True)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to random input_length samples
input_length = _rng.integers(low=1, high=max_num_samples, size=batch_size)
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
tensor_input_length = torch.tensor(input_length)
# Reference SDR
golden_sdr = 0
for b, b_len in enumerate(input_length):
sdr = [
calculate_sdr_numpy(
estimate=estimate[b, m, :b_len], target=target[b, m, :b_len], scale_invariant=True
)
for m in range(num_channels)
]
sdr = np.mean(np.array(sdr))
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Calculate SDR loss
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target, input_length=tensor_input_length)
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr_binary_mask(self, num_channels):
"""Test SDR calculation with temporal mask.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
sdr_loss = SDRLoss()
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to masked samples
mask = _rng.integers(low=0, high=2, size=(batch_size, max_num_samples))
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
tensor_mask = torch.tensor(mask)
# Reference SDR
golden_sdr = 0
for b in range(batch_size):
sdr = [
calculate_sdr_numpy(estimate=estimate[b, m, mask[b, :] > 0], target=target[b, m, mask[b, :] > 0])
for m in range(num_channels)
]
sdr = np.mean(np.array(sdr))
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Calculate SDR loss
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target, mask=tensor_mask)
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1])
@pytest.mark.parametrize('sdr_max', [10, 0])
def test_sdr_max(self, num_channels: int, sdr_max: float):
"""Test SDR calculation with soft max threshold.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
sdr_loss = SDRLoss(sdr_max=sdr_max)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to random input_length samples
input_length = _rng.integers(low=1, high=max_num_samples, size=batch_size)
# Tensors for testing the loss
tensor_estimate = torch.tensor(estimate)
tensor_target = torch.tensor(target)
tensor_input_length = torch.tensor(input_length)
# Reference SDR
golden_sdr = 0
for b, b_len in enumerate(input_length):
sdr = [
calculate_sdr_numpy(estimate=estimate[b, m, :b_len], target=target[b, m, :b_len], sdr_max=sdr_max)
for m in range(num_channels)
]
sdr = np.mean(np.array(sdr))
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Calculate SDR loss
uut_sdr_loss = sdr_loss(estimate=tensor_estimate, target=tensor_target, input_length=tensor_input_length)
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
@pytest.mark.unit
@pytest.mark.parametrize('filter_length', [1, 32])
@pytest.mark.parametrize('num_channels', [1, 4])
def test_target_calculation(self, num_channels: int, filter_length: int):
"""Test target calculation with scale and convolution invariance.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to random input_length samples
input_length = _rng.integers(low=filter_length, high=max_num_samples, size=batch_size)
# UUT
si_target = scale_invariant_target(
estimate=torch.tensor(estimate),
target=torch.tensor(target),
input_length=torch.tensor(input_length),
mask=None,
)
ci_target = convolution_invariant_target(
estimate=torch.tensor(estimate),
target=torch.tensor(target),
input_length=torch.tensor(input_length),
mask=None,
filter_length=filter_length,
)
if filter_length == 1:
assert torch.allclose(ci_target, si_target), f'SI and CI should match for filter_length=1'
# Compare against numpy
for b, b_len in enumerate(input_length):
for m in range(num_channels):
# Scale invariant reference
si_target_ref = scale_invariant_target_numpy(
estimate=estimate[b, m, :b_len], target=target[b, m, :b_len]
)
assert np.allclose(
si_target[b, m, :b_len].cpu().detach().numpy(), si_target_ref, atol=atol
), f'SI not matching for example {n}, channel {m}'
# Convolution invariant reference
ci_target_ref = convolution_invariant_target_numpy(
estimate=estimate[b, m, :b_len], target=target[b, m, :b_len], filter_length=filter_length
)
assert np.allclose(
ci_target[b, m, :b_len].cpu().detach().numpy(), ci_target_ref, atol=atol
), f'CI not matching for example {n}, channel {m}'
@pytest.mark.unit
@pytest.mark.parametrize('filter_length', [1, 32])
@pytest.mark.parametrize('num_channels', [1, 4])
def test_sdr_convolution_invariant(self, num_channels: int, filter_length: int):
"""Test SDR calculation with convolution invariant option.
"""
batch_size = 8
max_num_samples = 50
num_batches = 10
random_seed = 42
atol = 1e-6
_rng = np.random.default_rng(seed=random_seed)
sdr_loss = SDRLoss(convolution_invariant=True, convolution_filter_length=filter_length)
for n in range(num_batches):
# Generate random signal
target = _rng.normal(size=(batch_size, num_channels, max_num_samples))
# Random noise + scaling
noise = _rng.uniform(low=0.001, high=10) * _rng.normal(size=target.shape)
# Estimate
estimate = target + noise
# Limit calculation to random input_length samples
input_length = _rng.integers(low=filter_length, high=max_num_samples, size=batch_size)
# Calculate SDR loss
uut_sdr_loss = sdr_loss(
estimate=torch.tensor(estimate), target=torch.tensor(target), input_length=torch.tensor(input_length)
)
# Reference SDR
golden_sdr = 0
for b, b_len in enumerate(input_length):
sdr = [
calculate_sdr_numpy(
estimate=estimate[b, m, :b_len],
target=target[b, m, :b_len],
convolution_invariant=True,
convolution_filter_length=filter_length,
)
for m in range(num_channels)
]
sdr = np.mean(np.array(sdr))
golden_sdr += sdr
golden_sdr /= batch_size # average over batch
# Compare
assert np.allclose(
uut_sdr_loss.cpu().detach().numpy(), -golden_sdr, atol=atol
), f'SDRLoss not matching for example {n}'
| NeMo-main | tests/collections/asr/test_asr_losses.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import shutil
import tempfile
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.models.rnnt_bpe_models import EncDecRNNTBPEModel
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.common import tokenizers
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def asr_model(test_data_dir):
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
model_defaults = {'enc_hidden': 1024, 'pred_hidden': 64}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1,},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu',},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
tokenizer = {'dir': os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128"), 'type': 'wpe'}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'tokenizer': DictConfig(tokenizer),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
}
)
model_instance = EncDecRNNTBPEModel(cfg=modelConfig)
return model_instance
class NestedRNNTModel(ASRModel):
def __init__(self, cfg: DictConfig, trainer: 'Trainer' = None):
super().__init__(cfg=cfg, trainer=trainer)
if 'inner_model' in self.cfg:
self.register_nemo_submodule(
"inner_model", config_field="inner_model", model=EncDecRNNTBPEModel(self.cfg.inner_model)
)
else:
# Restore a model from pretrained checkpoint
self.register_nemo_submodule(
"inner_model",
config_field="inner_model",
model=ASRModel.from_pretrained('stt_en_conformer_transducer_small', map_location='cpu'),
)
self.linear = torch.nn.Linear(
self.inner_model.tokenizer.vocab_size + 1, self.inner_model.tokenizer.vocab_size + 1
)
self.inner_model.freeze()
setup_training_data = lambda *args, **kwargs: None
setup_validation_data = lambda *args, **kwargs: None
transcribe = lambda *args, **kwargs: []
class TestEncDecRNNTBPEModel:
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor(self, asr_model):
asr_model.train()
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = asr_model.to_config_dict()
instance2 = EncDecRNNTBPEModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecRNNTBPEModel)
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_forward(self, asr_model):
asr_model = asr_model.eval()
asr_model.preprocessor.featurizer.dither = 0.0
asr_model.preprocessor.featurizer.pad_to = 0
asr_model.compute_eval_loss = False
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
logprobs_instance = []
for i in range(input_signal.size(0)):
logprobs_ins, _ = asr_model.forward(
input_signal=input_signal[i : i + 1], input_signal_length=length[i : i + 1]
)
logprobs_instance.append(logprobs_ins)
logits_instance = torch.cat(logprobs_instance, 0)
# batch size 4
logprobs_batch, _ = asr_model.forward(input_signal=input_signal, input_signal_length=length)
assert logits_instance.shape == logprobs_batch.shape
diff = torch.mean(torch.abs(logits_instance - logprobs_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(logits_instance - logprobs_batch))
assert diff <= 1e-6
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_save_restore_artifact(self, asr_model):
asr_model.train()
with tempfile.TemporaryDirectory() as tmp_dir:
path = os.path.join(tmp_dir, 'rnnt_bpe.nemo')
asr_model.save_to(path)
new_model = EncDecRNNTBPEModel.restore_from(path)
assert isinstance(new_model, type(asr_model))
assert new_model.vocab_path.endswith('_vocab.txt')
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 128
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_save_restore_artifact_spe(self, asr_model, test_data_dir):
asr_model.train()
with tempfile.TemporaryDirectory() as tmpdir:
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
asr_model.change_vocabulary(new_tokenizer_dir=tokenizer_dir, new_tokenizer_type='bpe')
save_path = os.path.join(tmpdir, 'ctc_bpe.nemo')
asr_model.train()
asr_model.save_to(save_path)
new_model = EncDecRNNTBPEModel.restore_from(save_path)
assert isinstance(new_model, type(asr_model))
assert isinstance(new_model.tokenizer, tokenizers.SentencePieceTokenizer)
assert new_model.model_path.endswith('_tokenizer.model')
assert new_model.vocab_path.endswith('_vocab.txt')
assert new_model.spe_vocab_path.endswith('_tokenizer.vocab')
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_save_restore_artifact_agg(self, asr_model, test_data_dir):
tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_spe_128")
tok_en = {"dir": tokenizer_dir, "type": "wpe"}
# the below is really an english tokenizer but we pretend it is spanish
tok_es = {"dir": tokenizer_dir, "type": "wpe"}
tcfg = DictConfig({"type": "agg", "langs": {"en": tok_en, "es": tok_es}})
with tempfile.TemporaryDirectory() as tmpdir:
asr_model.change_vocabulary(new_tokenizer_dir=tcfg, new_tokenizer_type="agg")
save_path = os.path.join(tmpdir, "ctc_agg.nemo")
asr_model.train()
asr_model.save_to(save_path)
new_model = EncDecRNNTBPEModel.restore_from(save_path)
assert isinstance(new_model, type(asr_model))
assert isinstance(new_model.tokenizer, tokenizers.AggregateTokenizer)
# should be double
assert new_model.tokenizer.tokenizer.vocab_size == 254
assert len(new_model.tokenizer.tokenizer.get_vocab()) == 254
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_vocab_change(self, test_data_dir, asr_model):
with tempfile.TemporaryDirectory() as tmpdir:
old_tokenizer_dir = os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128", 'vocab.txt')
new_tokenizer_dir = os.path.join(tmpdir, 'tokenizer')
os.makedirs(new_tokenizer_dir, exist_ok=True)
shutil.copy2(old_tokenizer_dir, new_tokenizer_dir)
nw1 = asr_model.num_weights
asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# No change
assert nw1 == asr_model.num_weights
with open(os.path.join(new_tokenizer_dir, 'vocab.txt'), 'a+') as f:
f.write("!\n")
f.write('$\n')
f.write('@\n')
asr_model.change_vocabulary(new_tokenizer_dir=new_tokenizer_dir, new_tokenizer_type='wpe')
# rnn embedding + joint + bias
pred_embedding = 3 * (asr_model.decoder.pred_hidden)
joint_joint = 3 * (asr_model.joint.joint_hidden + 1)
assert asr_model.num_weights == (nw1 + (pred_embedding + joint_joint))
@pytest.mark.with_downloads()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_decoding_change(self, asr_model):
assert isinstance(asr_model.decoding.decoding, greedy_decode.GreedyBatchedRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'greedy'
new_strategy.greedy = DictConfig({'max_symbols': 10})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, greedy_decode.GreedyRNNTInfer)
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 1})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'beam'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "default"
new_strategy = DictConfig({})
new_strategy.strategy = 'tsd'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "tsd"
new_strategy = DictConfig({})
new_strategy.strategy = 'alsd'
new_strategy.beam = DictConfig({'beam_size': 2})
asr_model.change_decoding_strategy(decoding_cfg=new_strategy)
assert isinstance(asr_model.decoding.decoding, beam_decode.BeamRNNTInfer)
assert asr_model.decoding.decoding.search_type == "alsd"
@pytest.mark.with_downloads()
@pytest.mark.unit
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
def test_save_restore_nested_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
model = NestedRNNTModel(cfg=DictConfig({}), trainer=None)
path = os.path.join(tmp_dir, 'rnnt_bpe.nemo')
model.save_to(path)
new_model = NestedRNNTModel.restore_from(path, map_location='cpu')
assert model.__class__.__name__ == NestedRNNTModel.__name__
assert new_model.__class__.__name__ == NestedRNNTModel.__name__
assert isinstance(new_model, type(model))
assert new_model.inner_model.vocab_path.endswith('_vocab.txt')
assert len(new_model.inner_model.tokenizer.tokenizer.get_vocab()) == 1024
# Unpack the nemo file
NestedRNNTModel._save_restore_connector._unpack_nemo_file(path, tmp_dir)
# Check size of the checkpoint, which contains weights from pretrained model + linear layer
fp_weights = os.path.join(tmp_dir, 'model_weights.ckpt')
assert os.path.getsize(fp_weights) > 50 * (2 ** 20) # Assert the weights are more than 50 MB
# Check if param after restoration is exact match
original_state_dict = model.inner_model.state_dict()
new_state_dict = new_model.inner_model.state_dict()
for (old_name, old_param), (new_name, new_param) in zip(
original_state_dict.items(), new_state_dict.items()
):
assert old_name == new_name
assert (old_param - new_param).float().abs().mean() < 1e-6
| NeMo-main | tests/collections/asr/test_asr_rnnt_encoder_model_bpe.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.asr import modules
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils import config_utils, logging
class TestASRModulesBasicTests:
@pytest.mark.unit
def test_AudioToMelSpectrogramPreprocessor_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.AudioToMelSpectrogramPreprocessor,
modules.audio_preprocessing.AudioToMelSpectrogramPreprocessorConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_AudioToMelSpectrogramPreprocessor_batch(self):
# Test 1 that should test the pure stft implementation as much as possible
instance1 = modules.AudioToMelSpectrogramPreprocessor(normalize="per_feature", dither=0, pad_to=0)
# Ensure that the two functions behave similarily
for _ in range(10):
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
with torch.no_grad():
# batch size 1
res_instance, length_instance = [], []
for i in range(input_signal.size(0)):
res_ins, length_ins = instance1(input_signal=input_signal[i : i + 1], length=length[i : i + 1])
res_instance.append(res_ins)
length_instance.append(length_ins)
res_instance = torch.cat(res_instance, 0)
length_instance = torch.cat(length_instance, 0)
# batch size 4
res_batch, length_batch = instance1(input_signal=input_signal, length=length)
assert res_instance.shape == res_batch.shape
assert length_instance.shape == length_batch.shape
diff = torch.mean(torch.abs(res_instance - res_batch))
assert diff <= 1e-3
diff = torch.max(torch.abs(res_instance - res_batch))
assert diff <= 1e-3
@pytest.mark.unit
def test_SpectrogramAugmentationr(self):
# Make sure constructor works
instance1 = modules.SpectrogramAugmentation(
freq_masks=10, time_masks=3, rect_masks=3, use_numba_spec_augment=False
)
assert isinstance(instance1, modules.SpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_SpectrogramAugmentationr_numba_kernel(self, caplog):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
logging._logger.propagate = True
original_verbosity = logging.get_verbosity()
logging.set_verbosity(logging.DEBUG)
caplog.set_level(logging.DEBUG)
# Make sure constructor works
instance1 = modules.SpectrogramAugmentation(
freq_masks=10, time_masks=3, rect_masks=3, use_numba_spec_augment=True
)
assert isinstance(instance1, modules.SpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(8, 512))
length = torch.randint(low=161, high=500, size=[8])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
# check tha numba kernel debug message indicates that it is available for use
assert """Numba SpecAugment kernel is available""" in caplog.text
logging._logger.propagate = False
logging.set_verbosity(original_verbosity)
@pytest.mark.unit
def test_SpectrogramAugmentationr_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.SpectrogramAugmentation, modules.audio_preprocessing.SpectrogramAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_CropOrPadSpectrogramAugmentation(self):
# Make sure constructor works
audio_length = 128
instance1 = modules.CropOrPadSpectrogramAugmentation(audio_length=audio_length)
assert isinstance(instance1, modules.CropOrPadSpectrogramAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res, new_length = instance1(input_signal=res0[0], length=length)
assert res.shape == torch.Size([4, 64, audio_length])
assert all(new_length == torch.tensor([128] * 4))
@pytest.mark.unit
def test_CropOrPadSpectrogramAugmentation_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.CropOrPadSpectrogramAugmentation,
modules.audio_preprocessing.CropOrPadSpectrogramAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_MaskedPatchAugmentation(self):
# Make sure constructor works
audio_length = 128
instance1 = modules.MaskedPatchAugmentation(patch_size=16, mask_patches=0.5, freq_masks=2, freq_width=10)
assert isinstance(instance1, modules.MaskedPatchAugmentation)
# Make sure forward doesn't throw with expected input
instance0 = modules.AudioToMelSpectrogramPreprocessor(dither=0)
input_signal = torch.randn(size=(4, 512))
length = torch.randint(low=161, high=500, size=[4])
res0 = instance0(input_signal=input_signal, length=length)
res = instance1(input_spec=res0[0], length=length)
assert res.shape == res0[0].shape
@pytest.mark.unit
def test_MaskedPatchAugmentation_config(self):
# Test that dataclass matches signature of module
result = config_utils.assert_dataclass_signature_match(
modules.MaskedPatchAugmentation, modules.audio_preprocessing.MaskedPatchAugmentationConfig,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_RNNTDecoder(self):
vocab = list(range(10))
vocab = [str(x) for x in vocab]
vocab_size = len(vocab)
pred_config = OmegaConf.create(
{
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': 32, 'pred_rnn_layers': 1,},
'vocab_size': vocab_size,
'blank_as_pad': True,
}
)
prednet = modules.RNNTDecoder.from_config_dict(pred_config)
# num params
pred_hidden = pred_config.prednet.pred_hidden
embed = (vocab_size + 1) * pred_hidden # embedding with blank
rnn = (
2 * 4 * (pred_hidden * pred_hidden + pred_hidden)
) # (ih + hh) * (ifco gates) * (indim * hiddendim + bias)
assert prednet.num_weights == (embed + rnn)
# State initialization
x_ = torch.zeros(4, dtype=torch.float32)
states = prednet.initialize_state(x_)
for state_i in states:
assert state_i.dtype == x_.dtype
assert state_i.device == x_.device
assert state_i.shape[1] == len(x_)
# Blank hypotheses test
blank = vocab_size
hyp = Hypothesis(score=0.0, y_sequence=[blank])
cache = {}
pred, states, _ = prednet.score_hypothesis(hyp, cache)
assert pred.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
for state_i in states:
assert state_i.dtype == pred.dtype
assert state_i.device == pred.device
assert state_i.shape[1] == len(pred)
# Blank stateless predict
g, states = prednet.predict(y=None, state=None, add_sos=False, batch_size=1)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
for state_i in states:
assert state_i.dtype == g.dtype
assert state_i.device == g.device
assert state_i.shape[1] == len(g)
# Blank stateful predict
g, states2 = prednet.predict(y=None, state=states, add_sos=False, batch_size=1)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states2) == 2
for state_i, state_j in zip(states, states2):
assert (state_i - state_j).square().sum().sqrt() > 0.0
# Predict with token and state
token = torch.full([1, 1], fill_value=0, dtype=torch.long)
g, states = prednet.predict(y=token, state=states2, add_sos=False, batch_size=None)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
# Predict with blank token and no state
token = torch.full([1, 1], fill_value=blank, dtype=torch.long)
g, states = prednet.predict(y=token, state=None, add_sos=False, batch_size=None)
assert g.shape == torch.Size([1, 1, pred_hidden])
assert len(states) == 2
@pytest.mark.unit
def test_RNNTJoint(self):
vocab = list(range(10))
vocab = [str(x) for x in vocab]
vocab_size = len(vocab)
batchsize = 4
encoder_hidden = 64
pred_hidden = 32
joint_hidden = 16
joint_cfg = OmegaConf.create(
{
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'num_classes': vocab_size,
'vocabulary': vocab,
'jointnet': {
'encoder_hidden': encoder_hidden,
'pred_hidden': pred_hidden,
'joint_hidden': joint_hidden,
'activation': 'relu',
},
}
)
jointnet = modules.RNNTJoint.from_config_dict(joint_cfg)
enc = torch.zeros(batchsize, encoder_hidden, 48) # [B, D1, T]
dec = torch.zeros(batchsize, pred_hidden, 24) # [B, D2, U]
# forward call test
out = jointnet(encoder_outputs=enc, decoder_outputs=dec)
assert out.shape == torch.Size([batchsize, 48, 24, vocab_size + 1]) # [B, T, U, V + 1]
# joint() step test
enc2 = enc.transpose(1, 2) # [B, T, D1]
dec2 = dec.transpose(1, 2) # [B, U, D2]
out2 = jointnet.joint(enc2, dec2) # [B, T, U, V + 1]
assert (out - out2).abs().sum() <= 1e-5
# assert vocab size
assert jointnet.num_classes_with_blank == vocab_size + 1
@pytest.mark.unit
def test_HATJoint(self):
vocab = list(range(10))
vocab = [str(x) for x in vocab]
vocab_size = len(vocab)
batchsize = 4
encoder_hidden = 64
pred_hidden = 32
joint_hidden = 16
joint_cfg = OmegaConf.create(
{
'_target_': 'nemo.collections.asr.modules.HATJoint',
'num_classes': vocab_size,
'vocabulary': vocab,
'jointnet': {
'encoder_hidden': encoder_hidden,
'pred_hidden': pred_hidden,
'joint_hidden': joint_hidden,
'activation': 'relu',
},
}
)
jointnet = modules.HATJoint.from_config_dict(joint_cfg)
enc = torch.zeros(batchsize, encoder_hidden, 48) # [B, D1, T]
dec = torch.zeros(batchsize, pred_hidden, 24) # [B, D2, U]
# forward call test
out = jointnet(encoder_outputs=enc, decoder_outputs=dec)
assert out.shape == torch.Size([batchsize, 48, 24, vocab_size + 1]) # [B, T, U, V + 1]
# joint() step test
enc2 = enc.transpose(1, 2) # [B, T, D1]
dec2 = dec.transpose(1, 2) # [B, U, D2]
out2 = jointnet.joint(enc2, dec2) # [B, T, U, V + 1]
assert (out - out2).abs().sum() <= 1e-5
# joint() step test for internal LM subtraction
jointnet.return_hat_ilm = True
hat_output = jointnet.joint(enc2, dec2) # HATJointOutput dataclass
out3, ilm = hat_output.hat_logprobs, hat_output.ilm_logprobs # [B, T, U, V + 1] and [B, 1, U, V]
assert (out - out3).abs().sum() <= 1e-5
assert ilm.shape == torch.Size([batchsize, 1, 24, vocab_size]) # [B, 1, U, V] without blank simbol
# assert vocab size
assert jointnet.num_classes_with_blank == vocab_size + 1
| NeMo-main | tests/collections/asr/test_asr_modules.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import librosa
import numpy as np
import pytest
import torch
from nemo.collections.asr.parts.preprocessing.features import FilterbankFeatures
class TestFilterbankFeatures:
@pytest.mark.unit
def test_seq_len(self):
fb_module = FilterbankFeatures(exact_pad=False, pad_to=1)
test_1 = torch.randn(1, 800)
test_1_len = torch.tensor([800])
fb_spec, fb_len = fb_module(test_1, test_1_len)
assert fb_spec.shape[2] == fb_len[0], f"{fb_spec.shape} != {fb_len}"
librosa_spec = librosa.stft(test_1.cpu().detach().numpy().squeeze(), n_fft=512, hop_length=160, win_length=320)
assert librosa_spec.shape[1] == fb_spec.shape[2], f"{librosa_spec.shape} != {fb_spec.shape}"
@pytest.mark.unit
def test_random_stft_sizes(self):
for _ in range(5):
nfft = 2 ** np.random.randint(7, 12)
window_size = np.random.randint(100, nfft)
hop_size = np.random.randint(64, window_size)
fb_module = FilterbankFeatures(
exact_pad=False,
pad_to=1,
n_fft=nfft,
n_window_size=window_size,
n_window_stride=hop_size,
normalize=False,
)
audio_length = np.random.randint(nfft, 2 ** 16)
test_1 = torch.randn(1, audio_length)
test_1_len = torch.tensor([audio_length])
fb_spec, fb_len = fb_module(test_1, test_1_len)
assert (
fb_spec.shape[2] == fb_len[0]
), f"{fb_spec.shape} != {fb_len}: {nfft}, {window_size}, {hop_size}, {audio_length}"
librosa_spec = librosa.stft(
test_1.cpu().detach().numpy().squeeze(), n_fft=nfft, hop_length=hop_size, win_length=window_size
)
assert (
librosa_spec.shape[1] == fb_spec.shape[2]
), f"{librosa_spec.shape} != {fb_spec.shape}: {nfft}, {window_size}, {hop_size}, {audio_length}"
@pytest.mark.unit
def test_random_stft_sizes_exact_pad(self):
for _ in range(5):
nfft = 2 ** np.random.randint(7, 12)
window_size = np.random.randint(100, nfft)
hop_size = np.random.randint(64, window_size)
if hop_size % 2 == 1:
hop_size = hop_size - 1
fb_module = FilterbankFeatures(
exact_pad=True,
pad_to=1,
n_fft=nfft,
n_window_size=window_size,
n_window_stride=hop_size,
normalize=False,
)
audio_length = np.random.randint(nfft, 2 ** 16)
test_1 = torch.randn(1, audio_length)
test_1_len = torch.tensor([audio_length])
fb_spec, fb_len = fb_module(test_1, test_1_len)
assert (
fb_spec.shape[2] == fb_len[0]
), f"{fb_spec.shape} != {fb_len}: {nfft}, {window_size}, {hop_size}, {audio_length}"
test_2 = test_1.cpu().detach().numpy().squeeze()
test_2 = np.pad(test_2, int((nfft - hop_size) // 2), mode="reflect")
librosa_spec = librosa.stft(test_2, n_fft=nfft, hop_length=hop_size, win_length=window_size, center=False,)
assert (
fb_spec.shape[2] == librosa_spec.shape[1]
), f"{fb_spec.shape} != {librosa_spec.shape}: {nfft}, {window_size}, {hop_size}, {audio_length}"
assert (
fb_spec.shape[2] == audio_length // hop_size
), f"{fb_spec.shape}, {nfft}, {window_size}, {hop_size}, {audio_length}, {audio_length // hop_size}"
| NeMo-main | tests/collections/asr/test_asr_filterbankfeatures_seq_len.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
import pytest
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import EncDecCTCModel, EncDecHybridRNNTCTCModel
from nemo.core.classes.mixins import AccessMixin
def jasper_encoder_config(num_layers=1) -> Dict:
return {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 4,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
]
* num_layers,
}
def conformer_encoder_config() -> Dict:
return {
'_target_': 'nemo.collections.asr.modules.ConformerEncoder',
'feat_in': 64,
'n_layers': 8,
'd_model': 4,
}
def squeezeformer_encoder_config() -> Dict:
return {
'_target_': 'nemo.collections.asr.modules.SqueezeformerEncoder',
'feat_in': 64,
'n_layers': 8,
'd_model': 4,
}
class TestInterCTCLoss:
@pytest.mark.unit
@pytest.mark.parametrize(
"model_class", [EncDecCTCModel, EncDecHybridRNNTCTCModel],
)
@pytest.mark.parametrize(
"encoder_config",
[jasper_encoder_config(num_layers=8), conformer_encoder_config(), squeezeformer_encoder_config()],
)
@pytest.mark.parametrize(
"apply_at_layers,loss_weights",
[
([2, 4], [0.1, 0.3]),
([4], [0.3]),
([], []),
# errors
([2, 4], [0.1]),
([2], [0.1, 0.3]),
([], [0.3]),
],
)
def test_forward(self, model_class, encoder_config, apply_at_layers, loss_weights):
preprocessor_config = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
vocabulary = [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
]
if model_class is EncDecCTCModel:
decoder_config = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': None,
'num_classes': len(vocabulary),
'vocabulary': vocabulary,
}
model_config = DictConfig(
{
'compute_eval_loss': True, # will be ignored by the model
'preprocessor': DictConfig(preprocessor_config),
'encoder': DictConfig(encoder_config),
'decoder': DictConfig(decoder_config),
}
)
else:
decoder_config = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': 4, 'pred_rnn_layers': 1},
}
joint_config = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 4, 'activation': 'relu'},
}
decoding_config = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 30}}
loss_config = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
aux_ctc_config = {
'ctc_loss_weight': 0.3,
'use_cer': False,
'ctc_reduction': 'mean_batch',
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': None,
'num_classes': len(vocabulary),
'vocabulary': vocabulary,
},
'decoding': DictConfig(CTCDecodingConfig),
}
model_config = DictConfig(
{
'compute_eval_loss': True,
'labels': ListConfig(vocabulary),
'preprocessor': DictConfig(preprocessor_config),
'model_defaults': DictConfig({'enc_hidden': 4, 'pred_hidden': 4}),
'encoder': DictConfig(encoder_config),
'decoder': DictConfig(decoder_config),
'joint': DictConfig(joint_config),
'decoding': DictConfig(decoding_config),
'loss': DictConfig(loss_config),
'aux_ctc': DictConfig(aux_ctc_config),
}
)
# to avoid adding additional tests, we will always disable eval loss
# when encoder is Squeezeformer - there is nothing specific to
# particular encoder here, just picking a random one to test disabled
# loss use-case.
if encoder_config['_target_'] == 'nemo.collections.asr.modules.SqueezeformerEncoder':
model_config['compute_eval_loss'] = False
model_config.update(
{
'interctc': {'loss_weights': loss_weights, 'apply_at_layers': apply_at_layers},
'optim': {'name': 'adamw'},
}
)
class DummyDataset(torch.utils.data.Dataset):
"""Simply returns a single set of values."""
def __init__(self, values):
self.values = values
def __len__(self):
return 1
def __getitem__(self, idx):
return self.values
# this sometimes results in all zeros in the output which breaks tests
# so using this only for the ptl calls in the bottom, but using
# processed signal directly initially to remove the chance of
# this edge-case
input_signal = torch.randn(size=(1, 512))
input_length = torch.randint(low=161, high=500, size=[1])
target = torch.randint(size=(1, input_length[0]), low=0, high=28)
target_length = torch.tensor([input_length[0]])
processed_signal = torch.randn(size=([1, 64, 12]))
processed_length = torch.tensor([8])
if len(apply_at_layers) != len(loss_weights):
# has to throw an error here
with pytest.raises(
ValueError, match="Length of interctc.apply_at_layers has to match interctc.loss_weights"
):
asr_model = model_class(cfg=model_config)
asr_model.train()
logprobs, _, _ = asr_model.forward(input_signal=input_signal, input_signal_length=input_length)
else:
asr_model = model_class(cfg=model_config)
asr_model.train()
AccessMixin.set_access_enabled(access_enabled=True)
logprobs, *_ = asr_model.forward(
processed_signal=processed_signal, processed_signal_length=processed_length
)
captured_tensors = asr_model.get_captured_interctc_tensors()
AccessMixin.reset_registry(asr_model)
assert len(captured_tensors) == len(apply_at_layers)
for output in captured_tensors:
# checking that values are not the same, if shape is the same
assert output[0].shape != logprobs.shape or not torch.allclose(output[0], logprobs)
# hybrid model returns output of encoder, so it's not expected to match
if model_class is EncDecCTCModel:
assert output[0].shape == logprobs.shape
## Explicitly pass acclerator as cpu, since deafult val in PTL >= 2.0 is auto and it picks cuda
## which further causes an error in all reduce at: https://github.com/NVIDIA/NeMo/blob/v1.18.1/nemo/collections/asr/modules/conv_asr.py#L209
## and in https://github.com/NVIDIA/NeMo/blob/v1.18.1/nemo/collections/asr/modules/squeezeformer_encoder.py#L392 where device is CPU
trainer = pl.Trainer(max_epochs=1, accelerator='cpu')
trainer.fit(
asr_model,
train_dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
val_dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
)
required_metrics = ['final_loss'] if len(loss_weights) > 0 else []
required_metrics += [f'inter_ctc_loss_l{idx}' for idx in apply_at_layers]
prefix = "val_"
required_metrics += [f'{prefix}{metric}' for metric in required_metrics]
required_metrics += [f'{prefix}wer'] + [f'{prefix}inter_wer_l{idx}' for idx in apply_at_layers]
for metric in required_metrics:
if 'loss' in metric and 'val_' in metric:
if model_config['compute_eval_loss']:
assert metric in trainer.logged_metrics
else:
assert metric not in trainer.logged_metrics
else:
assert metric in trainer.logged_metrics
trainer.test(
asr_model,
dataloaders=torch.utils.data.DataLoader(
DummyDataset([input_signal, input_length, target, target_length]), collate_fn=lambda x: x[0],
),
)
required_metrics = [f'inter_ctc_loss_l{idx}' for idx in apply_at_layers]
prefix = 'test_'
# note that "=" is on purpose here, not "+=", since we only log test metrics
required_metrics = [f'{prefix}{metric}' for metric in required_metrics]
required_metrics += [f'{prefix}wer'] + [f'{prefix}inter_wer_l{idx}' for idx in apply_at_layers]
for metric in required_metrics:
if 'loss' in metric:
if model_config['compute_eval_loss']:
assert metric in trainer.logged_metrics
else:
assert metric not in trainer.logged_metrics
else:
assert metric in trainer.logged_metrics
| NeMo-main | tests/collections/asr/test_asr_interctc_models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.models import EncDecDiarLabelModel
@pytest.fixture()
def msdd_model():
preprocessor = {
'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'params': {"features": 80, "window_size": 0.025, "window_stride": 0.01, "sample_rate": 16000,},
}
speaker_model_encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 80,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 512,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': False,
}
],
},
}
speaker_model_decoder = {
'cls': 'nemo.collections.asr.modules.SpeakerDecoder',
'params': {'feat_in': 512, 'num_classes': 2, 'pool_mode': 'xvector', 'emb_sizes': [1024]},
}
speaker_model_cfg = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(speaker_model_encoder),
'decoder': DictConfig(speaker_model_decoder),
}
)
msdd_module = {
'cls': 'nemo.collections.asr.modules.MSDD_module',
'params': {
"num_spks": 2,
"hidden_size": 256,
"num_lstm_layers": 3,
"dropout_rate": 0.5,
"cnn_output_ch": 32,
"conv_repeat": 2,
"emb_dim": 192,
"scale_n": 5,
"weighting_scheme": 'conv_scale_weight',
"context_vector_type": 'cos_sim',
},
}
loss = {'cls': 'nemo.collections.asr.losses.bce_loss.BCELoss', 'params': {"weight": None}}
diarizer = {
'out_dir': None,
'oracle_vad': True,
"speaker_embeddings": {
"model_path": None,
"parameters": {
"window_length_in_sec": [1.5, 1.25, 1.0, 0.75, 0.5],
"shift_length_in_sec": [0.75, 0.625, 0.5, 0.375, 0.25],
"multiscale_weights": [1, 1, 1, 1, 1],
"save_embeddings": True,
},
},
}
modelConfig = DictConfig(
{
'msdd_module': DictConfig(msdd_module),
'preprocessor': DictConfig(preprocessor),
'diarizer': DictConfig(diarizer),
'loss': DictConfig(loss),
'max_num_of_spks': 2,
'num_workers': 5,
'emb_batch_size': 0,
'soft_label_thres': 0.5,
'scale_n': 5,
'speaker_model_cfg': speaker_model_cfg,
}
)
model = EncDecDiarLabelModel(cfg=modelConfig)
return model
class TestEncDecDiarLabelModel:
@pytest.mark.unit
def test_constructor(self, msdd_model):
diar_model = msdd_model.train()
assert diar_model.cfg.scale_n == len(
diar_model.cfg.diarizer.speaker_embeddings.parameters.window_length_in_sec
)
assert diar_model.cfg.scale_n == len(diar_model.cfg.diarizer.speaker_embeddings.parameters.shift_length_in_sec)
assert diar_model.cfg.scale_n == len(diar_model.cfg.diarizer.speaker_embeddings.parameters.multiscale_weights)
assert diar_model.cfg.msdd_module.num_spks == diar_model.cfg.max_num_of_spks
# TODO: make proper config and assert correct number of weights
# Check to/from config_dict:
confdict = diar_model.to_config_dict()
instance2 = EncDecDiarLabelModel.from_config_dict(confdict)
assert isinstance(instance2, EncDecDiarLabelModel)
@pytest.mark.unit
def test_forward_infer(self, msdd_model):
diar_model = msdd_model.eval()
# batch_size 4, scale_n 5, length 25, emb_dim 192
input_signal = torch.randn(size=(4, 25, 5, 192))
input_signal_length = 25 * torch.ones(4, dtype=torch.int)
emb_vectors = torch.randn(size=(4, 5, 192, 2))
targets = torch.randint(2, size=(4, 25, 2), dtype=torch.int)
with torch.no_grad():
# batch size 1
preds_list, scale_weights_list = [], []
for i in range(input_signal.size(0)):
preds, scale_weights = diar_model.forward_infer(
input_signal[i : i + 1], input_signal_length[i : i + 1], emb_vectors[i : i + 1], targets[i : i + 1]
)
preds_list.append(preds)
scale_weights_list.append(scale_weights)
preds_instance = torch.cat(preds_list, 0)
scale_weights_instance = torch.cat(scale_weights_list, 0)
# batch size 4
preds_batch, scale_weights_batch = diar_model.forward_infer(
input_signal, input_signal_length, emb_vectors, targets
)
assert preds_instance.shape == preds_batch.shape
assert scale_weights_instance.shape == scale_weights_batch.shape
diff = torch.mean(torch.abs(preds_instance - preds_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(preds_instance - preds_batch))
assert diff <= 1e-6
diff = torch.mean(torch.abs(scale_weights_instance - scale_weights_batch))
assert diff <= 1e-6
diff = torch.max(torch.abs(scale_weights_instance - scale_weights_batch))
assert diff <= 1e-6
| NeMo-main | tests/collections/asr/test_diar_label_models.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import onnx
import pytest
import torch.cuda
from omegaconf import DictConfig, ListConfig, OmegaConf
from nemo.collections.asr.models import (
EncDecClassificationModel,
EncDecCTCModel,
EncDecRNNTModel,
EncDecSpeakerLabelModel,
)
from nemo.collections.asr.parts.utils import asr_module_utils
from nemo.collections.common.parts.adapter_modules import LinearAdapterConfig
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
class TestExportable:
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecCTCModel_export_to_onnx(self):
model_config = DictConfig(
{
'preprocessor': DictConfig(self.preprocessor),
'encoder': DictConfig(self.encoder_dict),
'decoder': DictConfig(self.decoder_dict),
}
)
model = EncDecCTCModel(cfg=model_config).cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'qn.onnx')
model.export(
output=filename, check_trace=True,
)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.output[0].name == 'logprobs'
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecClassificationModel_export_to_onnx(self, speech_classification_model):
model = speech_classification_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'edc.onnx')
model.export(
output=filename, check_trace=True,
)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.output[0].name == 'logits'
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecSpeakerLabelModel_export_to_onnx(self, speaker_label_model):
model = speaker_label_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'sl.onnx')
model.export(output=filename)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.output[0].name == 'logits'
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecCitrinetModel_export_to_onnx(self, citrinet_model):
model = citrinet_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'citri.onnx')
model.export(output=filename)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.input[1].name == 'length'
assert onnx_model.graph.output[0].name == 'logprobs'
@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_ConformerModel_export_to_onnx(self, conformer_model):
model = conformer_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir, torch.cuda.amp.autocast():
filename = os.path.join(tmpdir, 'conf.onnx')
device = next(model.parameters()).device
input_example = torch.randn(4, model.encoder._feat_in, 777, device=device)
input_example_length = torch.full(size=(input_example.shape[0],), fill_value=777, device=device)
model.export(
output=filename, input_example=tuple([input_example, input_example_length]), check_trace=True,
)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_SqueezeformerModel_export_to_onnx(self, squeezeformer_model):
model = squeezeformer_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir, torch.cuda.amp.autocast():
filename = os.path.join(tmpdir, 'squeeze.ts')
device = next(model.parameters()).device
input_example = torch.randn(4, model.encoder._feat_in, 777, device=device)
input_example_length = torch.full(size=(input_example.shape[0],), fill_value=777, device=device)
model.export(output=filename, input_example=tuple([input_example, input_example_length]), check_trace=True)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecCitrinetModel_limited_SE_export_to_onnx(self, citrinet_model):
model = citrinet_model.cuda()
asr_module_utils.change_conv_asr_se_context_window(model, context_window=24, update_config=False)
with tempfile.TemporaryDirectory() as tmpdir, torch.cuda.amp.autocast():
filename = os.path.join(tmpdir, 'citri_se.onnx')
model.export(
output=filename, check_trace=True,
)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.input[1].name == 'length'
assert onnx_model.graph.output[0].name == 'logprobs'
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecRNNTModel_export_to_onnx(self, citrinet_rnnt_model):
model = citrinet_rnnt_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
fn = 'citri_rnnt.onnx'
filename = os.path.join(tmpdir, fn)
files, descr = model.export(output=filename, verbose=False)
encoder_filename = os.path.join(tmpdir, 'encoder-' + fn)
assert files[0] == encoder_filename
assert os.path.exists(encoder_filename)
onnx_model = onnx.load(encoder_filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert len(onnx_model.graph.input) == 2
assert len(onnx_model.graph.output) == 2
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.input[1].name == 'length'
assert onnx_model.graph.output[0].name == 'outputs'
assert onnx_model.graph.output[1].name == 'encoded_lengths'
decoder_joint_filename = os.path.join(tmpdir, 'decoder_joint-' + fn)
assert files[1] == decoder_joint_filename
assert os.path.exists(decoder_joint_filename)
onnx_model = onnx.load(decoder_joint_filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
input_examples = model.decoder.input_example()
assert type(input_examples[-1]) == tuple
num_states = len(input_examples[-1])
state_name = list(model.decoder.output_types.keys())[-1]
# enc_logits + (all decoder inputs - state tuple) + flattened state list
assert len(onnx_model.graph.input) == (1 + (len(input_examples) - 1) + num_states)
assert onnx_model.graph.input[0].name == 'encoder_outputs'
assert onnx_model.graph.input[1].name == 'targets'
assert onnx_model.graph.input[2].name == 'target_length'
if num_states > 0:
for idx, ip in enumerate(onnx_model.graph.input[3:]):
assert ip.name == "input_" + state_name + '_' + str(idx + 1)
assert len(onnx_model.graph.output) == (len(input_examples) - 1) + num_states
assert onnx_model.graph.output[0].name == 'outputs'
assert onnx_model.graph.output[1].name == 'prednet_lengths'
if num_states > 0:
for idx, op in enumerate(onnx_model.graph.output[2:]):
assert op.name == "output_" + state_name + '_' + str(idx + 1)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecRNNTModel_export_to_ts(self, citrinet_rnnt_model):
model = citrinet_rnnt_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
fn = 'citri_rnnt.ts'
filename = os.path.join(tmpdir, fn)
# Perform export + test with the input examples of the RNNT model.
files, descr = model.export(output=filename, verbose=False, check_trace=True)
encoder_filename = os.path.join(tmpdir, 'encoder-' + fn)
assert files[0] == encoder_filename
assert os.path.exists(encoder_filename)
ts_encoder = torch.jit.load(encoder_filename)
assert ts_encoder is not None
arguments = ts_encoder.forward.schema.arguments[1:] # First value is `self`
assert arguments[0].name == 'audio_signal'
assert arguments[1].name == 'length'
decoder_joint_filename = os.path.join(tmpdir, 'decoder_joint-' + fn)
assert files[1] == decoder_joint_filename
assert os.path.exists(decoder_joint_filename)
ts_decoder_joint = torch.jit.load(decoder_joint_filename)
assert ts_decoder_joint is not None
ts_decoder_joint_args = ts_decoder_joint.forward.schema.arguments[1:] # First value is self
input_examples = model.decoder.input_example()
assert type(input_examples[-1]) == tuple
num_states = len(input_examples[-1])
state_name = list(model.decoder.output_types.keys())[-1]
# enc_logits + (all decoder inputs - state tuple) + flattened state list
assert len(ts_decoder_joint_args) == (1 + (len(input_examples) - 1) + num_states)
assert ts_decoder_joint_args[0].name == 'encoder_outputs'
assert ts_decoder_joint_args[1].name == 'targets'
assert ts_decoder_joint_args[2].name == 'target_length'
if num_states > 0:
for idx, ip in enumerate(ts_decoder_joint_args[3:]):
assert ip.name == "input_" + state_name + '_' + str(idx + 1)
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_EncDecCTCModel_adapted_export_to_onnx(self):
model_config = DictConfig(
{
'preprocessor': DictConfig(self.preprocessor),
'encoder': DictConfig(self.encoder_dict),
'decoder': DictConfig(self.decoder_dict),
}
)
# support adapter in encoder
model_config.encoder.cls = model_config.encoder.cls + 'Adapter' # ConvASREncoderAdapter
# load model
model = EncDecCTCModel(cfg=model_config)
# add adapter
adapter_cfg = OmegaConf.structured(
LinearAdapterConfig(in_features=model_config.encoder.params.jasper[0].filters, dim=32)
)
model.add_adapter('temp', cfg=adapter_cfg)
model = model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'qn.onnx')
model.export(
output=filename, check_trace=True,
)
onnx_model = onnx.load(filename)
onnx.checker.check_model(onnx_model, full_check=True) # throws when failed
assert onnx_model.graph.input[0].name == 'audio_signal'
assert onnx_model.graph.output[0].name == 'logprobs'
def setup_method(self):
self.preprocessor = {
'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'params': dict({}),
}
self.encoder_dict = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 1024,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
self.decoder_dict = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {
'feat_in': 1024,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
},
}
@pytest.fixture()
def speech_classification_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 32,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoderClassification',
'params': {'feat_in': 32, 'num_classes': 30,},
}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'labels': ListConfig(["dummy_cls_{}".format(i + 1) for i in range(30)]),
}
)
model = EncDecClassificationModel(cfg=modelConfig)
return model
@pytest.fixture()
def speaker_label_model():
preprocessor = {
'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 512,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': False,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.SpeakerDecoder',
'feat_in': 512,
'num_classes': 2,
'pool_mode': 'attention',
'emb_sizes': [1024],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
speaker_model = EncDecSpeakerLabelModel(cfg=modelConfig)
return speaker_model
@pytest.fixture()
def citrinet_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 80,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 512,
'repeat': 1,
'kernel': [5],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': 512,
'repeat': 5,
'kernel': [11],
'stride': [2],
'dilation': [1],
'dropout': 0.1,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
'stride_last': True,
'residual_mode': 'stride_add',
},
{
'filters': 512,
'repeat': 5,
'kernel': [13],
'stride': [1],
'dilation': [1],
'dropout': 0.1,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': 640,
'repeat': 1,
'kernel': [41],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
},
],
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {'feat_in': 640, 'num_classes': 1024, 'vocabulary': list(chr(i % 28) for i in range(0, 1024))},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
citri_model = EncDecCTCModel(cfg=modelConfig)
return citri_model
@pytest.fixture()
def citrinet_rnnt_model():
labels = list(chr(i % 28) for i in range(0, 1024))
model_defaults = {'enc_hidden': 640, 'pred_hidden': 256, 'joint_hidden': 320}
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoder',
'feat_in': 80,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 512,
'repeat': 1,
'kernel': [5],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': 512,
'repeat': 5,
'kernel': [11],
'stride': [2],
'dilation': [1],
'dropout': 0.1,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
'stride_last': True,
'residual_mode': 'stride_add',
},
{
'filters': 512,
'repeat': 5,
'kernel': [13],
'stride': [1],
'dilation': [1],
'dropout': 0.1,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': 640,
'repeat': 1,
'kernel': [41],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': True,
'separable': True,
'se': True,
'se_context_size': -1,
},
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': 256, 'pred_rnn_layers': 1, 'dropout': 0.0},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'fuse_loss_wer': False,
'jointnet': {'joint_hidden': 320, 'activation': 'relu', 'dropout': 0.0},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 5}}
modelConfig = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'labels': labels,
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'decoding': DictConfig(decoding),
}
)
citri_model = EncDecRNNTModel(cfg=modelConfig)
return citri_model
@pytest.fixture()
def conformer_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.ConformerEncoder',
'params': {
'feat_in': 80,
'feat_out': -1,
'n_layers': 2,
'd_model': 256,
'subsampling': 'striding',
'subsampling_factor': 4,
'subsampling_conv_channels': 512,
'reduction': None,
'reduction_position': None,
'reduction_factor': 1,
'ff_expansion_factor': 4,
'self_attention_model': 'rel_pos',
'n_heads': 8,
'att_context_size': [-1, -1],
'xscaling': True,
'untie_biases': True,
'pos_emb_max_len': 500,
'conv_kernel_size': 31,
'dropout': 0.1,
'dropout_pre_encoder': 0.1,
'dropout_emb': 0.0,
'dropout_att': 0.1,
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {'feat_in': 256, 'num_classes': 1024, 'vocabulary': list(chr(i % 28) for i in range(0, 1024))},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
conformer_model = EncDecCTCModel(cfg=modelConfig)
return conformer_model
@pytest.fixture()
def squeezeformer_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
encoder = {
'cls': 'nemo.collections.asr.modules.SqueezeformerEncoder',
'params': {
'feat_in': 80,
'feat_out': -1,
'n_layers': 2,
'adaptive_scale': True,
'time_reduce_idx': 1,
'time_recovery_idx': None,
'd_model': 256,
'subsampling': 'dw_striding',
'subsampling_factor': 4,
'subsampling_conv_channels': 512,
'ff_expansion_factor': 4,
'self_attention_model': 'rel_pos',
'n_heads': 8,
'att_context_size': [-1, -1],
'xscaling': True,
'untie_biases': True,
'pos_emb_max_len': 500,
'conv_kernel_size': 31,
'dropout': 0.1,
'dropout_emb': 0.0,
'dropout_att': 0.1,
},
}
decoder = {
'cls': 'nemo.collections.asr.modules.ConvASRDecoder',
'params': {'feat_in': 256, 'num_classes': 1024, 'vocabulary': list(chr(i % 28) for i in range(0, 1024))},
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
conformer_model = EncDecCTCModel(cfg=modelConfig)
return conformer_model
| NeMo-main | tests/collections/asr/test_asr_exportables.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import pytest
import torch
from omegaconf import DictConfig, ListConfig
from nemo.collections.asr.models import SpeechEncDecSelfSupervisedModel
@pytest.fixture()
def ssl_model():
preprocessor = {
'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor',
'params': dict({'pad_to': 16, 'dither': 0}),
}
model_defaults = {'enc_hidden': 32, 'dec_out': 128}
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
},
],
},
}
spec_augment = {
'_target_': 'nemo.collections.asr.modules.MaskedPatchAugmentation',
'freq_masks': 3,
'freq_width': 20,
'patch_size': 16,
'mask_patches': 0.5,
}
loss_list_contr_mlm = {
'contr': {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoderReconstruction',
'feat_in': model_defaults['enc_hidden'],
'feat_hidden': 128,
'feat_out': model_defaults['dec_out'],
'stride_layers': 0,
'non_stride_layers': 0,
'stride_transpose': False,
},
'loss': {
'_target_': 'nemo.collections.asr.losses.ContrastiveLoss',
'in_dim': 64,
'proj_dim': model_defaults['dec_out'],
'combine_time_steps': 1,
'quantized_targets': True,
'codebook_size': 64,
'sample_from_same_utterance_only': True,
'sample_from_non_masked': False,
'num_negatives': 3,
},
},
'mlm': {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'targets_from_loss': "contr",
},
}
modelConfig_contr_mlm = DictConfig(
{
'preprocessor': DictConfig(preprocessor),
'spec_augment': DictConfig(spec_augment),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'loss_list': DictConfig(loss_list_contr_mlm),
}
)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_mlm)
return ssl_model
class TestSSLModel:
@pytest.mark.unit
def test_constructor(self, ssl_model):
confdict = ssl_model.to_config_dict()
instance2 = SpeechEncDecSelfSupervisedModel.from_config_dict(confdict)
assert isinstance(instance2, SpeechEncDecSelfSupervisedModel)
@pytest.mark.unit
def test_contr_nonquant(self, ssl_model):
modelConfig_contr_nonquant = ssl_model.to_config_dict()
loss_list_contr_nonquant = dict(modelConfig_contr_nonquant['loss_list'])
del loss_list_contr_nonquant['mlm']
loss_list_contr_nonquant['contr']['loss']['quantized_targets'] = False
modelConfig_contr_nonquant['loss_list'] = DictConfig(loss_list_contr_nonquant)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_nonquant)
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 1
@pytest.mark.unit
def test_contr_mlm(self, ssl_model):
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 2
@pytest.mark.unit
def test_contr_mlm_multi(self, ssl_model):
modelConfig_contr_mlm_multi = ssl_model.to_config_dict()
model_defaults = modelConfig_contr_mlm_multi['model_defaults']
loss_list_contr_mlm_multi = dict(modelConfig_contr_mlm_multi['loss_list'])
loss_list_contr_mlm_multi['mlm_2'] = {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'output_from_layer': "encoder.0",
'targets_from_loss': "contr",
}
loss_list_contr_mlm_multi['mlm_3'] = {
'decoder': {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': model_defaults['enc_hidden'],
'num_classes': 4096,
},
'loss': {'_target_': 'nemo.collections.asr.losses.MLMLoss', 'combine_time_steps': 1},
'output_from_layer': "encoder.1",
'targets_from_loss': "contr",
}
modelConfig_contr_mlm_multi['loss_list'] = DictConfig(loss_list_contr_mlm_multi)
ssl_model = SpeechEncDecSelfSupervisedModel(cfg=modelConfig_contr_mlm_multi)
input_signal = torch.randn(size=(4, 64000))
length = torch.randint(low=48000, high=64000, size=[4])
with torch.no_grad():
spectrograms, spec_masks, encoded, encoded_len = ssl_model.forward(
input_signal=input_signal, input_signal_length=length
)
loss_value, loss_val_dict = ssl_model.decoder_loss_step(spectrograms, spec_masks, encoded, encoded_len)
assert len(loss_val_dict) == 4
| NeMo-main | tests/collections/asr/test_ssl_models.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import tempfile
import numpy as np
import pytest
from scipy.stats import uniform
from nemo.collections.asr.parts.utils.confidence_metrics import (
auc_nt,
auc_pr,
auc_roc,
auc_yc,
ece,
nce,
save_confidence_hist,
save_custom_confidence_curve,
save_nt_curve,
save_pr_curve,
save_roc_curve,
)
# set convenient name2metric mapping
name2metric = {
f.__name__: (f, ans)
for f, ans in zip((auc_roc, auc_pr, auc_nt, auc_yc, ece, nce), (0.833, 0.917, 0.833, 0.421, 0.232, 0.403))
}
# ece does not have a default value
name2metric_all_correct = {
f.__name__: (f, ans) for f, ans in zip((auc_roc, auc_pr, auc_nt, auc_yc, nce), (0.5, 1.0, 0.0, 0.0, -math.inf))
}
name2metric_all_incorrect = {
f.__name__: (f, ans) for f, ans in zip((auc_roc, auc_pr, auc_nt, auc_yc, nce), (0.5, 0.0, 1.0, 0.0, -math.inf))
}
# Initialize data
Y_TRUE = [1, 0, 0, 1, 1]
Y_TRUE_ALL_CORRECT = [1, 1, 1, 1, 1]
Y_TRUE_ALL_INCORRECT = [0, 0, 0, 0, 0]
Y_SCORE = [0.6, 0.7, 0.02, 0.95, 0.8]
Y_TRUE_RANDOM = np.random.choice(2, 1000, p=[0.2, 0.8])
# probability distribution with mean ~= 0.65 and std ~= 0.25
Y_SCORE_RANDOM = uniform.rvs(size=1000, loc=0.5, scale=0.5) - 0.5 * np.random.choice(2, 1000, p=[0.8, 0.2])
TOL_DEGREE = 3
TOL = 1 / math.pow(10, TOL_DEGREE)
class TestConfidenceMetrics:
@pytest.mark.unit
@pytest.mark.parametrize('metric_name', name2metric.keys())
def test_metric_main(self, metric_name):
metric, ans = name2metric[metric_name]
assert round(metric(Y_TRUE, Y_SCORE), TOL_DEGREE) == ans
@pytest.mark.unit
@pytest.mark.parametrize('metric_name', name2metric_all_correct.keys())
def test_metric_all_correct(self, metric_name):
metric, ans = name2metric_all_correct[metric_name]
assert round(metric(Y_TRUE_ALL_CORRECT, Y_SCORE), TOL_DEGREE) == ans
@pytest.mark.unit
@pytest.mark.parametrize('metric_name', name2metric_all_incorrect.keys())
def test_metric_all_incorrect(self, metric_name):
metric, ans = name2metric_all_incorrect[metric_name]
assert round(metric(Y_TRUE_ALL_INCORRECT, Y_SCORE), TOL_DEGREE) == ans
@pytest.mark.unit
def test_metric_auc_yc_aux(self):
n_bins = 10
result, result_std, result_max, (thresholds, yc_curve) = auc_yc(
Y_TRUE, Y_SCORE, n_bins=n_bins, return_std_maximum=True, return_curve=True
)
assert round(result_std, TOL_DEGREE) == 0.228
assert round(result_max, TOL_DEGREE) == 0.667
assert np.allclose(np.array(thresholds), np.array([i / n_bins for i in range(0, n_bins + 1)]), atol=TOL)
assert np.allclose(
np.array(yc_curve), np.array([0.0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.167, 0.667, 0.667, 0.333, 0.0]), atol=TOL
)
class TestSaveConfidencePlot:
@pytest.mark.unit
def test_save_confidence_hist(self):
with tempfile.TemporaryDirectory() as tmpdir:
save_confidence_hist(Y_SCORE_RANDOM, tmpdir)
@pytest.mark.unit
@pytest.mark.parametrize('plot_func', (save_roc_curve, save_pr_curve, save_nt_curve))
def test_save_simple_confidence_curve(self, plot_func):
with tempfile.TemporaryDirectory() as tmpdir:
plot_func(Y_TRUE_RANDOM, Y_SCORE_RANDOM, tmpdir)
@pytest.mark.unit
def test_save_custom_confidence_curve(self):
with tempfile.TemporaryDirectory() as tmpdir:
ranges = np.arange(0, 1, 0.01)
save_custom_confidence_curve(ranges, ranges, tmpdir)
| NeMo-main | tests/collections/asr/confidence/test_asr_confidence_metrics.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import pytest
import torch
from nemo.collections.asr.parts.utils.asr_confidence_utils import (
get_confidence_aggregation_bank,
get_confidence_measure_bank,
)
# Initialize probability vectors
VOCAB_SIZES = (100, 1000, 10000)
ONE_VEC_SET, ZERO_VEC_SET, RAND_VEC_SET, OVERFIT_RAND_VEC_SET = {}, {}, {}, {}
for vocab_size in VOCAB_SIZES:
# batch size 2 to test different positions of probability one
ONE_VEC_SET[vocab_size] = torch.nan_to_num(
torch.cat(
[
torch.tensor([[0] + [float('-inf')] * (vocab_size - 1)]),
torch.tensor([[float('-inf')] * (vocab_size - 3) + [0] + [float('-inf')] * 2]),
]
)
)
ZERO_VEC_SET[vocab_size] = torch.nan_to_num(torch.tensor([[math.log(1 / vocab_size)] * vocab_size] * 2))
# batch size 1
rand_logit = torch.rand((1, vocab_size))
rand_logit_overfit = rand_logit.clone()
rand_logit_overfit[0, 0] += vocab_size
RAND_VEC_SET[vocab_size] = torch.nan_to_num(torch.nn.functional.log_softmax(rand_logit, -1))
OVERFIT_RAND_VEC_SET[vocab_size] = torch.nan_to_num(torch.nn.functional.log_softmax(rand_logit_overfit, -1))
AGGREGATION_VEC_SIMPLE = [0.0, 0.5, 1]
TOL_DEGREE = 6
TOL = 1 / math.pow(10, TOL_DEGREE)
def get_measure_parametrize_ranges():
confidence_measure_bank = {}
alpha_range = (0.25, 0.5, 1.0)
bank_exception = None
try:
confidence_measure_bank = get_confidence_measure_bank()
except Exception as e:
alpha_range = ()
bank_exception = e
return confidence_measure_bank, alpha_range, bank_exception
def get_aggregation_parametrize_ranges():
confidence_aggregation_bank = {}
bank_exception = None
try:
confidence_aggregation_bank = get_confidence_aggregation_bank()
except Exception as e:
bank_exception = e
return confidence_aggregation_bank, bank_exception
class TestConfidenceMeasureBank:
measure_bank, alphas, bank_build_exception = get_measure_parametrize_ranges()
@pytest.mark.unit
def test_measure_bank(self):
if self.bank_build_exception is not None:
raise self.bank_build_exception
assert isinstance(self.measure_bank, dict)
assert len(self.measure_bank) > 0
@pytest.mark.unit
@pytest.mark.parametrize('measure_name', measure_bank.keys())
@pytest.mark.parametrize('alpha', alphas)
@pytest.mark.parametrize('vocab_size', VOCAB_SIZES)
def test_confidence_measures_one(self, measure_name, alpha, vocab_size):
measure = self.measure_bank[measure_name]
assert torch.allclose(measure(ONE_VEC_SET[vocab_size], vocab_size, alpha), torch.tensor([1.0, 1.0]), atol=TOL)
@pytest.mark.unit
@pytest.mark.parametrize('measure_name', measure_bank.keys())
@pytest.mark.parametrize('alpha', alphas)
@pytest.mark.parametrize('vocab_size', VOCAB_SIZES)
def test_confidence_measures_zero(self, measure_name, alpha, vocab_size):
measure = self.measure_bank[measure_name]
assert torch.allclose(measure(ZERO_VEC_SET[vocab_size], vocab_size, alpha), torch.tensor([0.0, 0.0]), atol=TOL)
@pytest.mark.unit
@pytest.mark.parametrize('measure_name', measure_bank.keys())
@pytest.mark.parametrize('alpha', alphas)
@pytest.mark.parametrize('vocab_size', VOCAB_SIZES)
def test_confidence_measures_partial_order(self, measure_name, alpha, vocab_size):
measure = self.measure_bank[measure_name]
value_normal = round(float(measure(RAND_VEC_SET[vocab_size], vocab_size, alpha)[0]), TOL_DEGREE)
value_overfit = round(float(measure(OVERFIT_RAND_VEC_SET[vocab_size], vocab_size, alpha)[0]), TOL_DEGREE)
assert 0 <= value_normal < value_overfit <= 1, (
measure(RAND_VEC_SET[vocab_size], vocab_size, alpha),
measure(OVERFIT_RAND_VEC_SET[vocab_size], vocab_size, alpha),
)
class TestConfidenceAggregationBank:
aggregation_bank, bank_build_exception = get_aggregation_parametrize_ranges()
@pytest.mark.unit
def test_aggregation_bank(self):
if self.bank_build_exception is not None:
raise self.bank_build_exception
assert isinstance(self.aggregation_bank, dict)
assert len(self.aggregation_bank) > 0
@pytest.mark.unit
@pytest.mark.parametrize('aggregation_name', aggregation_bank.keys())
def test_confidence_agregation_simple(self, aggregation_name):
# alaptev: would skipif work with parametrize arguments?
if aggregation_name not in ("mean", "min", "max", "prod"):
pytest.skip(f"{aggregation_name} is not a simple aggregation")
aggregation = self.aggregation_bank[aggregation_name]
if aggregation_name == "mean":
assert aggregation(AGGREGATION_VEC_SIMPLE) == 0.5
elif aggregation_name == "min":
assert aggregation(AGGREGATION_VEC_SIMPLE) == 0.0
if aggregation_name == "max":
assert aggregation(AGGREGATION_VEC_SIMPLE) == 1.0
if aggregation_name == "prod":
assert aggregation(AGGREGATION_VEC_SIMPLE) == 0.0
| NeMo-main | tests/collections/asr/confidence/test_asr_confidence_primitives.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import tempfile
from pathlib import Path
import numpy as np
import pytest
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import ASRModel, EncDecCTCModelBPE, EncDecRNNTBPEModel
from nemo.collections.asr.parts.submodules.ctc_greedy_decoding import GreedyCTCInferConfig
from nemo.collections.asr.parts.submodules.rnnt_greedy_decoding import GreedyRNNTInferConfig
from nemo.collections.asr.parts.utils.asr_confidence_benchmarking_utils import run_confidence_benchmark
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig
# both models recognize the test data without errors, thus every metric except ece return default values
ECE_VALUES = {("token", "ctc"): 0.87, ("token", "rnnt"): 0.82, ("word", "ctc"): 0.91, ("word", "rnnt"): 0.88}
TOL_DEGREE = 2
TOL = 1 / math.pow(10, TOL_DEGREE)
@pytest.fixture(scope="module")
def conformer_ctc_bpe_model():
model = EncDecCTCModelBPE.from_pretrained(model_name="stt_en_conformer_ctc_small")
model.set_trainer(Trainer(devices=1, accelerator="cpu"))
model = model.eval()
return model
@pytest.fixture(scope="module")
def conformer_rnnt_bpe_model():
model = EncDecRNNTBPEModel.from_pretrained(model_name="stt_en_conformer_transducer_small")
model.set_trainer(Trainer(devices=1, accelerator="cpu"))
model = model.eval()
return model
@pytest.mark.with_downloads
@pytest.fixture(scope="module")
# @pytest.fixture
def audio_and_texts(test_data_dir):
# get filenames and reference texts from manifest
filepaths = []
reference_texts = []
manifest = Path(test_data_dir) / Path("asr/an4_val.json")
with open(manifest, 'r') as f:
for line in f:
item = json.loads(line)
# alaptev: maybe fix those paths in the manifest?
audio_file = Path(item['audio_filepath'].replace("/data/", "/.data/"))
filepaths.append(str(audio_file.absolute()))
reference_texts.append(item['text'])
return filepaths, reference_texts
class TestASRConfidenceBenchmark:
@pytest.mark.integration
@pytest.mark.with_downloads
@pytest.mark.parametrize('model_name', ("ctc", "rnnt"))
@pytest.mark.parametrize('target_level', ("token", "word"))
def test_run_confidence_benchmark(
self, model_name, target_level, audio_and_texts, conformer_ctc_bpe_model, conformer_rnnt_bpe_model
):
model = conformer_ctc_bpe_model if model_name == "ctc" else conformer_rnnt_bpe_model
assert isinstance(model, ASRModel)
filepaths, reference_texts = audio_and_texts
confidence_cfg = (
ConfidenceConfig(preserve_token_confidence=True)
if target_level == "token"
else ConfidenceConfig(preserve_word_confidence=True)
)
model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy_batch", confidence_cfg=confidence_cfg)
if model_name == "rnnt"
else CTCDecodingConfig(confidence_cfg=confidence_cfg)
)
with tempfile.TemporaryDirectory() as tmpdir:
assert np.allclose(
np.array(
run_confidence_benchmark(model, target_level, filepaths, reference_texts, plot_dir=tmpdir)[
target_level
]
),
np.array([0.5, 1.0, 0.0, -math.inf, ECE_VALUES[(target_level, model_name)], 0.0, 0.0, 0.0]),
atol=TOL,
)
@pytest.mark.integration
@pytest.mark.with_downloads
@pytest.mark.parametrize('model_name', ("ctc", "rnnt"))
@pytest.mark.parametrize('arg', ("method_cfg", "temperature", "all"))
def test_deprecated_config_args(self, model_name, arg, conformer_ctc_bpe_model, conformer_rnnt_bpe_model):
assert ConfidenceConfig().measure_cfg.alpha == 0.33, "default `alpha` is supposed to be 0.33"
model = conformer_ctc_bpe_model if model_name == "ctc" else conformer_rnnt_bpe_model
assert isinstance(model, ASRModel)
if arg == "all":
conf = OmegaConf.create({"temperature": 0.5})
test_args_main = {"method_cfg": conf}
test_args_greedy = {"confidence_method_cfg": conf}
elif arg == "method_cfg":
conf = OmegaConf.create({"alpha": 0.5})
test_args_main = {"method_cfg": conf}
test_args_greedy = {"confidence_method_cfg": conf}
elif arg == "temperature":
conf = OmegaConf.create({"temperature": 0.5})
test_args_main = {"measure_cfg": conf}
test_args_greedy = {"confidence_measure_cfg": conf}
else:
raise NotImplementedError(arg)
confidence_cfg = ConfidenceConfig(preserve_word_confidence=True, **test_args_main)
model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy", confidence_cfg=confidence_cfg)
if model_name == "rnnt"
else CTCDecodingConfig(confidence_cfg=confidence_cfg)
)
assert model.cfg.decoding.confidence_cfg.measure_cfg.alpha == 0.5
model.change_decoding_strategy(
RNNTDecodingConfig(
fused_batch_size=-1,
strategy="greedy",
greedy=GreedyRNNTInferConfig(preserve_frame_confidence=True, **test_args_greedy),
)
if model_name == "rnnt"
else CTCDecodingConfig(greedy=GreedyCTCInferConfig(preserve_frame_confidence=True, **test_args_greedy))
)
assert model.cfg.decoding.greedy.confidence_measure_cfg.alpha == 0.5
| NeMo-main | tests/collections/asr/confidence/test_asr_confidence.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import DictConfig, ListConfig, OmegaConf
from nemo.collections.asr.models import ASRModel, EncDecCTCModel, EncDecRNNTModel
from nemo.collections.asr.parts.submodules.adapters import multi_head_attention_adapter_module
from nemo.collections.asr.parts.utils import adapter_utils
from nemo.collections.common.parts import adapter_modules
from nemo.core.classes.mixins.access_mixins import AccessMixin
from nemo.core.classes.mixins.adapter_mixins import AdapterModuleMixin, get_registered_adapter
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
from nemo.utils import config_utils, model_utils
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
@pytest.fixture()
def model():
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConvASREncoderAdapter',
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': 50,
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 50,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
@pytest.fixture()
def conformer_ctc_adapter():
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.ConformerEncoderAdapter',
'feat_in': 64,
'feat_out': -1,
'n_layers': 2,
'd_model': 128,
'subsampling': 'striding',
'subsampling_factor': 4,
'self_attention_model': 'rel_pos',
'n_heads': 4,
'conv_kernel_size': 31,
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 128,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
@pytest.fixture()
def squeezeformer_ctc_adapter():
preprocessor = {'_target_': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor'}
encoder = {
'_target_': 'nemo.collections.asr.modules.SqueezeformerEncoderAdapter',
'feat_in': 64,
'feat_out': -1,
'n_layers': 2,
'd_model': 128,
'time_reduce_idx': 1,
'subsampling': 'dw_striding',
'subsampling_factor': 4,
'self_attention_model': 'rel_pos',
'n_heads': 4,
'conv_kernel_size': 31,
}
decoder = {
'_target_': 'nemo.collections.asr.modules.ConvASRDecoder',
'feat_in': 128,
'num_classes': 28,
'vocabulary': [
' ',
'a',
'b',
'c',
'd',
'e',
'f',
'g',
'h',
'i',
'j',
'k',
'l',
'm',
'n',
'o',
'p',
'q',
'r',
's',
't',
'u',
'v',
'w',
'x',
'y',
'z',
"'",
],
}
modelConfig = DictConfig(
{'preprocessor': DictConfig(preprocessor), 'encoder': DictConfig(encoder), 'decoder': DictConfig(decoder)}
)
model_instance = EncDecCTCModel(cfg=modelConfig)
return model_instance
@pytest.fixture()
def rnnt_model():
preprocessor = {'cls': 'nemo.collections.asr.modules.AudioToMelSpectrogramPreprocessor', 'params': dict({})}
# fmt: off
labels = [' ', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w',
'x', 'y', 'z', "'",
]
# fmt: on
model_defaults = {'enc_hidden': 96, 'pred_hidden': 64}
# Test case where Encoder (default) is not adapter compatible
encoder = {
'cls': 'nemo.collections.asr.modules.ConvASREncoder',
'params': {
'feat_in': 64,
'activation': 'relu',
'conv_mask': True,
'jasper': [
{
'filters': model_defaults['enc_hidden'],
'repeat': 1,
'kernel': [1],
'stride': [1],
'dilation': [1],
'dropout': 0.0,
'residual': False,
'separable': True,
'se': True,
'se_context_size': -1,
}
],
},
}
decoder = {
'_target_': 'nemo.collections.asr.modules.RNNTDecoder',
'prednet': {'pred_hidden': model_defaults['pred_hidden'], 'pred_rnn_layers': 1},
}
joint = {
'_target_': 'nemo.collections.asr.modules.RNNTJoint',
'jointnet': {'joint_hidden': 32, 'activation': 'relu'},
}
decoding = {'strategy': 'greedy_batch', 'greedy': {'max_symbols': 10}}
loss = {'loss_name': 'default', 'warprnnt_numba_kwargs': {'fastemit_lambda': 0.001}}
modelConfig = DictConfig(
{
'labels': ListConfig(labels),
'preprocessor': DictConfig(preprocessor),
'model_defaults': DictConfig(model_defaults),
'encoder': DictConfig(encoder),
'decoder': DictConfig(decoder),
'joint': DictConfig(joint),
'decoding': DictConfig(decoding),
'loss': DictConfig(loss),
}
)
model_instance = EncDecRNNTModel(cfg=modelConfig)
return model_instance
def get_adapter_cfg(in_features=50, dim=100, norm_pos='pre', atype='linear', **kwargs):
valid_types = ['linear', 'mha', 'relmha']
if atype not in valid_types:
raise ValueError(f"Invalid type. Valid types = {atype}")
if atype == 'linear':
cfg = adapter_modules.LinearAdapterConfig(in_features=in_features, dim=dim, norm_position=norm_pos)
elif atype == 'mha':
cfg = multi_head_attention_adapter_module.MultiHeadAttentionAdapterConfig(
n_head=kwargs.get('n_head', 1), n_feat=in_features
)
elif atype == 'relmha':
cfg = multi_head_attention_adapter_module.RelPositionMultiHeadAttentionAdapterConfig(
n_head=kwargs.get('n_head', 1), n_feat=in_features
)
print(cfg._target_)
cfg = OmegaConf.structured(cfg)
return cfg
class TestASRAdapterMixin:
@pytest.mark.unit
def test_class_paths_are_correct(self):
# Resolve all object names in module
obj_keys = list(dir(adapter_utils))
for key in obj_keys:
if 'CLASSPATH' in key:
classpath = getattr(adapter_utils, key)
# This will raise import error if it fails
_ = model_utils.import_class_by_path(classpath)
# Try getting thmulti_head_attention_adapter_module.pye config of the class
config_path = classpath + "Config"
_ = model_utils.import_class_by_path(config_path)
@pytest.mark.unit
def test_asr_model_constructor(self, model):
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_asr_model_constructor_mha_adapter(self, model):
with pytest.raises(ValueError):
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg(atype='mha'))
@pytest.mark.unit
def test_conformer_constructor_mha_adapter(self, conformer_ctc_adapter):
original_num_params = conformer_ctc_adapter.num_weights
conformer_ctc_adapter.add_adapter(name='adapter_0', cfg=get_adapter_cfg(atype='relmha'))
new_num_params = conformer_ctc_adapter.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_squeezeformer_constructor_mha_adapter(self, squeezeformer_ctc_adapter):
original_num_params = squeezeformer_ctc_adapter.num_weights
squeezeformer_ctc_adapter.add_adapter(name='adapter_0', cfg=get_adapter_cfg(atype='relmha'))
new_num_params = squeezeformer_ctc_adapter.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_asr_model_constructor_encoder_module(self, model):
original_num_params = model.num_weights
model.add_adapter(name='encoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
def test_asr_model_constructor_decoder_module(self, model):
original_num_params = model.num_weights
model.add_adapter(name='decoder:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
assert model.decoder.is_adapter_available()
assert model.decoder.get_enabled_adapters()[0] == 'adapter_0'
@pytest.mark.unit
def test_asr_model_constructor_joint_module_ctc_skip(self, model):
original_num_params = model.num_weights
# this step should exit without adding adapters and without errors
model.add_adapter(name='joint:adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params == original_num_params
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.unit
def test_asr_model_constructor_joint_module_rnnt(self, rnnt_model):
original_num_params = rnnt_model.num_weights
rnnt_model.add_adapter(name='joint:adapter_0', cfg=get_adapter_cfg())
new_num_params = rnnt_model.num_weights
assert new_num_params > original_num_params
assert rnnt_model.joint.is_adapter_available()
assert rnnt_model.joint.get_enabled_adapters()[0] == 'adapter_0'
@pytest.mark.unit
def test_asr_multiple_adapter(self, model):
original_num_params = model.num_weights
model.add_adapter(name='adapter_0', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
original_num_params = new_num_params
model.add_adapter(name='adapter_1', cfg=get_adapter_cfg())
new_num_params = model.num_weights
assert new_num_params > original_num_params
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_linear_pre(self, model, name):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name, cfg=get_adapter_cfg())
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_linear_post(self, model, name):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name, cfg=get_adapter_cfg(norm_pos='post'))
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0'])
def test_conformer_forward_mha(self, conformer_ctc_adapter, name):
conformer_ctc_adapter.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = conformer_ctc_adapter(input_signal=input_signal, input_signal_length=input_signal_length)[0]
conformer_ctc_adapter.add_adapter(name=name, cfg=get_adapter_cfg(in_features=128, atype='mha'))
new_output = conformer_ctc_adapter(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0'])
def test_squeezeformer_forward_mha(self, squeezeformer_ctc_adapter, name):
squeezeformer_ctc_adapter.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = squeezeformer_ctc_adapter(input_signal=input_signal, input_signal_length=input_signal_length)[
0
]
squeezeformer_ctc_adapter.add_adapter(name=name, cfg=get_adapter_cfg(in_features=128, atype='mha'))
new_output = squeezeformer_ctc_adapter(input_signal=input_signal, input_signal_length=input_signal_length)[0]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name1', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
@pytest.mark.parametrize('name2', ['adapter_1', 'encoder:adapter_1', 'decoder:adapter_1'])
def test_asr_multi_adapter_forward(self, model, name1, name2):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name1, cfg=get_adapter_cfg())
model.add_adapter(name=name2, cfg=get_adapter_cfg())
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name1 = model.resolve_adapter_module_name_(name1)[-1]
resolved_name2 = model.resolve_adapter_module_name_(name2)[-1]
assert model.get_enabled_adapters() == [resolved_name1, resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.parametrize('name1', ['decoder:adapter_0', 'joint:adapter_0'])
@pytest.mark.parametrize('name2', ['decoder:adapter_1', 'joint:adapter_1'])
@pytest.mark.unit
def test_asr_multi_adapter_forward(self, rnnt_model, name1, name2):
rnnt_model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = rnnt_model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
rnnt_model.add_adapter(name=name1, cfg=get_adapter_cfg())
rnnt_model.add_adapter(name=name2, cfg=get_adapter_cfg())
new_output = rnnt_model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name1 = rnnt_model.resolve_adapter_module_name_(name1)[-1]
resolved_name2 = rnnt_model.resolve_adapter_module_name_(name2)[-1]
assert rnnt_model.get_enabled_adapters() == [resolved_name1, resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name1', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
@pytest.mark.parametrize('name2', ['adapter_1', 'encoder:adapter_1', 'decoder:adapter_1'])
def test_asr_multi_adapter_partial_forward(self, model, name1, name2):
model.eval()
torch.random.manual_seed(0)
input_signal = torch.randn(2, 512)
input_signal_length = torch.tensor([512, 512], dtype=torch.int32)
origial_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
model.add_adapter(name=name1, cfg=get_adapter_cfg())
model.add_adapter(name=name2, cfg=get_adapter_cfg())
model.set_enabled_adapters(name=name1, enabled=False)
new_output = model(input_signal=input_signal, input_signal_length=input_signal_length)[0]
resolved_name2 = model.resolve_adapter_module_name_(name2)[-1]
assert model.get_enabled_adapters() == [resolved_name2]
assert torch.mean(torch.abs(origial_output - new_output)) < 1e-5
@pytest.mark.unit
@pytest.mark.parametrize('name', ['adapter_0', 'encoder:adapter_0', 'decoder:adapter_0'])
def test_asr_forward_unfrozen_adapters(self, model, name):
model.eval()
original_num_params = model.num_weights
dim = 10
model.add_adapter(name=name, cfg=get_adapter_cfg(dim=dim))
model.freeze()
model.unfreeze_enabled_adapters()
assert original_num_params == 5443
original_params = 0
adapter_params = 0
for name, param in model.named_parameters():
if 'adapter' not in name:
assert param.requires_grad is False
original_params += param.numel()
else:
assert param.requires_grad is True
adapter_params += param.numel()
for mname, module in model.named_modules():
if isinstance(module, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
assert module.track_running_stats is False
assert original_params > adapter_params
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor_pretrained(self):
# Check to/from config_dict:
cfg = ASRModel.from_pretrained('stt_en_citrinet_256', map_location='cpu', return_config=True)
adapter_metadata = get_registered_adapter(cfg.encoder._target_)
if adapter_metadata is not None:
cfg.encoder._target_ = adapter_metadata.adapter_class_path
model = ASRModel.from_pretrained('stt_en_citrinet_256', override_config_path=cfg)
assert isinstance(model, AdapterModuleMixin)
assert hasattr(model, 'encoder')
assert isinstance(model.encoder, AdapterModuleMixin)
model.add_adapter('adapter_0', cfg=get_adapter_cfg(in_features=cfg.encoder.jasper[0].filters, dim=5))
assert model.is_adapter_available()
model.freeze()
model.unfreeze_enabled_adapters()
assert model.num_weights < 1e5
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads()
@pytest.mark.unit
def test_constructor_pretrained_rnnt(self):
# Check to/from config_dict:
cfg = ASRModel.from_pretrained('stt_en_contextnet_256', map_location='cpu', return_config=True)
adapter_metadata = get_registered_adapter(cfg.encoder._target_)
if adapter_metadata is not None:
cfg.encoder._target_ = adapter_metadata.adapter_class_path
model = ASRModel.from_pretrained('stt_en_contextnet_256', override_config_path=cfg)
assert isinstance(model, AdapterModuleMixin)
assert hasattr(model, 'encoder')
assert isinstance(model.encoder, AdapterModuleMixin)
assert hasattr(model, 'decoder')
assert isinstance(model.decoder, AdapterModuleMixin)
assert hasattr(model, 'joint')
assert isinstance(model.joint, AdapterModuleMixin)
model.add_adapter('adapter_0', cfg=get_adapter_cfg(in_features=cfg.encoder.jasper[0].filters, dim=5))
model.add_adapter('decoder:adapter_1', cfg=get_adapter_cfg(in_features=cfg.decoder.prednet.pred_hidden, dim=5))
model.add_adapter('joint:adapter_2', cfg=get_adapter_cfg(in_features=cfg.joint.jointnet.joint_hidden, dim=5))
assert model.is_adapter_available()
model.freeze()
model.unfreeze_enabled_adapters()
assert model.num_weights < 1e5
@pytest.mark.unit
def test_asr_model_adapter_loss(self, model):
original_num_params = model.num_weights
x = torch.randn(2, 512)
x_len = torch.tensor([256, 512], dtype=torch.int32)
adapter_cfg = get_adapter_cfg() # type: adapter_modules.LinearAdapterConfig
adapter_cfg.adapter_strategy.l2_lambda = 0.01
model.add_adapter(name='adapter_0', cfg=adapter_cfg)
new_num_params = model.num_weights
assert new_num_params > original_num_params
model.train() # set training mode to true
with torch.no_grad():
AccessMixin.reset_registry(model)
AccessMixin.update_access_cfg({'save_encoder_tensors': False})
_ = model(input_signal=x, input_signal_length=x_len)
# extract losses
auxiliary_losses = AccessMixin.get_module_registry(model)
loss = list(auxiliary_losses.values())[0]
assert 'adapter_loss' in loss
assert loss['adapter_loss'][0] == torch.tensor(0.0) # initially adapter is 0 init, no loss required.
AccessMixin.reset_registry(model)
| NeMo-main | tests/collections/asr/mixins/adapters/test_asr_adapter_mixin.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from nemo.collections.asr.parts.submodules import adapters as adapter_modules
from nemo.core.classes.mixins import adapter_mixin_strategies
from nemo.utils import config_utils
def _create_masks(att_mask, max_audio_length, padding_length):
# pad_mask is the masking to be used to ignore paddings
pad_mask = torch.arange(0, max_audio_length).expand(padding_length.size(0), -1) < padding_length.unsqueeze(-1)
# pad_mask_for_att_mask is the mask which helps to ignore paddings
pad_mask_for_att_mask = pad_mask.unsqueeze(1).repeat([1, max_audio_length, 1])
pad_mask_for_att_mask = torch.logical_and(pad_mask_for_att_mask, pad_mask_for_att_mask.transpose(1, 2))
# att_mask is the masking to be used by the MHA layers to ignore the tokens not supposed to be visible
att_mask = att_mask[:, :max_audio_length, :max_audio_length]
# paddings should also get ignored, so pad_mask_for_att_mask is used to ignore their corresponding scores
att_mask = torch.logical_and(pad_mask_for_att_mask, att_mask.to(pad_mask_for_att_mask.device))
pad_mask = ~pad_mask
att_mask = ~att_mask
return pad_mask, att_mask
def get_mask(lengths: torch.Tensor):
max_seq_len = lengths.max()
att_mask = torch.ones(1, max_seq_len, max_seq_len, dtype=torch.bool)
pad_mask, att_mask = _create_masks(att_mask, max_seq_len, lengths)
return pad_mask, att_mask
class TestASRAdapterModules:
@pytest.mark.unit
def test_mha_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.MultiHeadAttentionAdapter,
adapter_modules.MultiHeadAttentionAdapterConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_relpos_mha_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.RelPositionMultiHeadAttentionAdapter,
adapter_modules.RelPositionMultiHeadAttentionAdapterConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_abs_pos_encoding_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.PositionalEncodingAdapter,
adapter_modules.PositionalEncodingAdapterConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
def test_rel_pos_encoding_adapter_config(self):
IGNORED_ARGS = ['_target_']
result = config_utils.assert_dataclass_signature_match(
adapter_modules.RelPositionalEncodingAdapter,
adapter_modules.RelPositionalEncodingAdapterConfig,
ignore_args=IGNORED_ARGS,
)
signatures_match, cls_subset, dataclass_subset = result
assert signatures_match
assert cls_subset is None
assert dataclass_subset is None
@pytest.mark.unit
@pytest.mark.parametrize('n_head', [1, 2, 10])
@pytest.mark.parametrize('proj_dim', [None, -1])
def test_mha_adapter_init(self, n_head, proj_dim):
torch.random.manual_seed(0)
x = torch.randn(2, 32, 50)
lengths = torch.randint(1, x.size(1), size=(x.size(0),))
lengths[torch.randint(0, x.size(0), size=(1,))[0]] = x.size(1)
adapter = adapter_modules.MultiHeadAttentionAdapter(
n_head=n_head, n_feat=50, dropout_rate=0.0, proj_dim=proj_dim
)
pad_mask, att_mask = get_mask(lengths)
with torch.no_grad():
assert adapter.linear_out.weight.sum() == 0
if hasattr(adapter.linear_out, 'bias') and adapter.linear_out.bias is not None:
assert adapter.linear_out.bias.sum() == 0
out = adapter(x, x, x, att_mask)
assert out.sum().abs() <= 1e-8
assert out.shape == x.shape
@pytest.mark.unit
@pytest.mark.parametrize('n_head', [1, 2, 10])
@pytest.mark.parametrize('proj_dim', [None, -1])
def test_relmha_adapter_init(self, n_head, proj_dim):
torch.random.manual_seed(0)
x = torch.randn(2, 32, 50)
lengths = torch.randint(1, x.size(1), size=(x.size(0),))
lengths[torch.randint(0, x.size(0), size=(1,))[0]] = x.size(1)
adapter = adapter_modules.RelPositionMultiHeadAttentionAdapter(
n_head=n_head, n_feat=50, dropout_rate=0.0, proj_dim=proj_dim
)
relpos_enc = adapter_modules.RelPositionalEncodingAdapter(d_model=50)
pad_mask, att_mask = get_mask(lengths)
relpos_enc.extend_pe(lengths.max(), device='cpu')
with torch.no_grad():
assert adapter.linear_out.weight.sum() == 0
if hasattr(adapter.linear_out, 'bias') and adapter.linear_out.bias is not None:
assert adapter.linear_out.bias.sum() == 0
_, pos_emb = relpos_enc(x)
out = adapter(x, x, x, att_mask, pos_emb)
assert out.sum().abs() <= 1e-8
assert out.shape == x.shape
@pytest.mark.unit
def test_abspos_encoding_init(self):
torch.random.manual_seed(0)
x = torch.randn(2, 32, 50)
lengths = torch.randint(1, x.size(1), size=(x.size(0),))
lengths[torch.randint(0, x.size(0), size=(1,))[0]] = x.size(1)
relpos_enc = adapter_modules.PositionalEncodingAdapter(d_model=50)
relpos_enc.extend_pe(lengths.max(), device='cpu')
with torch.no_grad():
out, pos_emb = relpos_enc(x)
assert (out - pos_emb - x).sum().abs() <= 1e-5
assert out.shape == x.shape
@pytest.mark.unit
def test_relpos_encoding_init(self):
torch.random.manual_seed(0)
x = torch.randn(2, 32, 50)
lengths = torch.randint(1, x.size(1), size=(x.size(0),))
lengths[torch.randint(0, x.size(0), size=(1,))[0]] = x.size(1)
relpos_enc = adapter_modules.RelPositionalEncodingAdapter(d_model=50)
relpos_enc.extend_pe(lengths.max(), device='cpu')
with torch.no_grad():
out, pos_emb = relpos_enc(x)
assert (out - x).sum().abs() <= 1e-8
assert out.shape == x.shape
@pytest.mark.unit
def test_mha_adapter_strategy(self):
adapter = adapter_modules.MultiHeadAttentionAdapter(n_head=1, n_feat=50, dropout_rate=0.0)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_modules.MHAResidualAddAdapterStrategy)
@pytest.mark.unit
def test_relpos_mha_adapter_strategy(self):
adapter = adapter_modules.RelPositionMultiHeadAttentionAdapter(n_head=1, n_feat=50, dropout_rate=0.0)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_modules.MHAResidualAddAdapterStrategy)
@pytest.mark.unit
def test_abspos_encoding_adapter_strategy(self):
adapter = adapter_modules.PositionalEncodingAdapter(d_model=50)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_mixin_strategies.ReturnResultAdapterStrategy)
@pytest.mark.unit
def test_relpos_encoding_adapter_strategy(self):
adapter = adapter_modules.RelPositionalEncodingAdapter(d_model=50)
assert hasattr(adapter, 'adapter_strategy')
assert adapter.adapter_strategy is not None
# assert default strategy is set
assert isinstance(adapter.adapter_strategy, adapter_mixin_strategies.ReturnResultAdapterStrategy)
| NeMo-main | tests/collections/asr/mixins/adapters/test_asr_adapter_modules.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from torch.nn import CTCLoss as CTCLoss_Pytorch
DEVICES = ['cpu']
if torch.cuda.is_available():
DEVICES.append('cuda')
def wrap_and_call(fn, acts, labels, device):
if not torch.is_tensor(acts):
acts = torch.FloatTensor(acts)
if 'cuda' in device:
acts = acts.cuda()
if not acts.requires_grad:
acts.requires_grad = True
lengths = [acts.shape[1]] * acts.shape[0]
label_lengths = [len(l) for l in labels]
labels = torch.LongTensor(labels)
lengths = torch.LongTensor(lengths)
label_lengths = torch.LongTensor(label_lengths)
log_probs = torch.nn.functional.log_softmax(acts.transpose(0, 1), -1)
if 'cuda' in device:
labels = labels.cuda()
lengths = lengths.cuda()
label_lengths = label_lengths.cuda()
costs = fn(log_probs, labels, lengths, label_lengths)
cost = torch.sum(costs)
cost.backward()
if 'cuda' in device:
torch.cuda.synchronize()
if acts.grad is not None:
grad = acts.grad.data.cpu().numpy()
else:
grad = None
return costs.data.cpu().numpy(), grad
def init_k2_ctc(**kwargs):
from nemo.collections.asr.parts.k2.ml_loss import CtcLoss
ctc = CtcLoss(**kwargs)
return lambda log_probs, labels, lengths, label_lengths: ctc(
log_probs.transpose(0, 1), labels, lengths, label_lengths
)[0]
def skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled):
if device == 'cpu':
supported, msg = k2_is_appropriate
elif device == 'cuda':
supported, msg = k2_cuda_is_enabled
else:
raise ValueError(f"Unknown device: {device}")
if not supported:
pytest.skip(f"k2 test is skipped. Reason : {msg}")
class TestCTCLossK2:
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
acts = np.array(
[
[
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.6, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.8, 0.1],
[0.1, 0.6, 0.1, 0.1, 0.1],
[0.1, 0.1, 0.2, 0.1, 0.1],
[0.7, 0.1, 0.2, 0.1, 0.1],
]
]
)
labels = [[1, 2, 3]]
fn_k2 = init_k2_ctc(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
expected_cost = 5.0279555
expected_grads = np.array(
[
[
[0.00157518, -0.53266853, 0.17703111, 0.17703111, 0.17703111],
[-0.02431531, -0.17048728, -0.15925968, 0.17703113, 0.17703113],
[-0.06871005, 0.03236287, -0.2943067, 0.16722652, 0.16342735],
[-0.09178554, 0.25313747, -0.17673965, -0.16164337, 0.17703108],
[-0.10229809, 0.19587973, 0.05823242, -0.34769377, 0.19587973],
[-0.22203964, 0.1687112, 0.18645471, -0.30183747, 0.1687112],
]
]
)
assert np.allclose(k2_cost, expected_cost, rtol=1e-6), "small_test costs mismatch."
assert np.allclose(k2_grads, expected_grads, atol=1e-6), "small_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small_blank_last(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
acts = np.array(
[
[
[0.0, 1.0, 3.0],
[0.0, 2.0, 3.0],
[1.0, 1.0, 3.0],
[2.0, 3.0, 2.0],
[0.0, 0.0, 1.0],
[0.0, 1.0, 1.0],
[1.0, 0.0, 1.0],
[2.0, 2.0, 0.0],
[0.0, 2.0, 5.0],
[0.0, 3.0, 5.0],
[1.0, 2.0, 5.0],
[2.0, 4.0, 4.0],
[0.0, 3.0, 4.0],
[0.0, 4.0, 4.0],
[1.0, 3.0, 4.0],
[2.0, 5.0, 3.0],
[2.0, 2.0, 1.0],
[2.0, 3.0, 1.0],
[3.0, 2.0, 1.0],
[4.0, 4.0, 0.0],
]
]
)
labels = [[0, 1, 0, 0, 1, 0]]
fn_k2 = init_k2_ctc(num_classes=acts.shape[-1], blank=acts.shape[-1] - 1, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
expected_cost = 6.823422
expected_grads = np.array(
[
[
[-0.09792291, 0.11419516, -0.01627225],
[-0.08915664, 0.22963384, -0.14047718],
[-0.19687234, 0.06477807, 0.13209426],
[-0.22838503, 0.1980845, 0.03030053],
[-0.07985485, -0.0589368, 0.13879165],
[-0.04722299, 0.01424287, 0.03298012],
[0.01492161, 0.02710512, -0.04202673],
[-0.43219852, 0.4305843, 0.00161422],
[-0.00332598, 0.0440818, -0.04075582],
[-0.01329869, 0.11521607, -0.10191737],
[-0.03721291, 0.04389342, -0.00668051],
[-0.2723349, 0.43273386, -0.16039898],
[-0.03499417, 0.1896997, -0.15470551],
[-0.02911933, 0.29706067, -0.26794133],
[-0.04593367, -0.04479058, 0.09072424],
[-0.07227867, 0.16096972, -0.08869105],
[0.13993078, -0.20230117, 0.06237038],
[-0.05889719, 0.04007925, 0.01881794],
[-0.09667239, 0.07077749, 0.0258949],
[-0.49002117, 0.4954626, -0.00544143],
]
]
)
assert np.allclose(k2_cost, expected_cost, rtol=1e-6), "small_test_blank_last costs mismatch."
assert np.allclose(k2_grads, expected_grads, atol=1e-6), "small_test_blank_last gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small_random(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
rng = np.random.RandomState(0)
acts = rng.randn(1, 4, 3)
labels = [[1, 2]]
fn_k2 = init_k2_ctc(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
fn_pt = CTCLoss_Pytorch(reduction='sum', zero_infinity=True)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
assert np.allclose(k2_cost, pt_cost, rtol=1e-6), "small_random_test costs mismatch."
assert np.allclose(k2_grads, pt_grads, atol=1e-6), "small_random_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_big_tensor(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
# minibatch x T x alphabet_size
acts = [
[
[0.06535690384862791, 0.7875301411923206, 0.08159176605666074],
[0.5297155426466327, 0.7506749639230854, 0.7541348379087998],
[0.6097641124736383, 0.8681404965673826, 0.6225318186056529],
[0.6685222872103057, 0.8580392805336061, 0.16453892311765583],
[0.989779515236694, 0.944298460961015, 0.6031678586829663],
[0.9467833543605416, 0.666202507295747, 0.28688179752461884],
[0.09418426230195986, 0.3666735970751962, 0.736168049462793],
[0.1666804425271342, 0.7141542198635192, 0.3993997272216727],
[0.5359823524146038, 0.29182076440286386, 0.6126422611507932],
[0.3242405528768486, 0.8007644367291621, 0.5241057606558068],
[0.779194617063042, 0.18331417220174862, 0.113745182072432],
[0.24022162381327106, 0.3394695622533106, 0.1341595066017014],
],
[
[0.5055615569388828, 0.051597282072282646, 0.6402903936686337],
[0.43073311517251, 0.8294731834714112, 0.1774668847323424],
[0.3207001991262245, 0.04288308912457006, 0.30280282975568984],
[0.6751777088333762, 0.569537369330242, 0.5584738347504452],
[0.08313242153985256, 0.06016544344162322, 0.10795752845152584],
[0.7486153608562472, 0.943918041459349, 0.4863558118797222],
[0.4181986264486809, 0.6524078485043804, 0.024242983423721887],
[0.13458171554507403, 0.3663418070512402, 0.2958297395361563],
[0.9236695822497084, 0.6899291482654177, 0.7418981733448822],
[0.25000547599982104, 0.6034295486281007, 0.9872887878887768],
[0.5926057265215715, 0.8846724004467684, 0.5434495396894328],
[0.6607698886038497, 0.3771277082495921, 0.3580209022231813],
],
]
expected_costs = [6.388067, 5.2999153]
expected_grads = [
[
[0.06130501, -0.3107036, 0.24939862],
[0.08428053, -0.07131141, -0.01296911],
[-0.04510102, 0.21943177, -0.17433074],
[-0.1970142, 0.37144178, -0.17442757],
[-0.08807078, 0.35828218, -0.2702114],
[-0.24209887, 0.33242193, -0.09032306],
[-0.07871056, 0.3116736, -0.23296304],
[-0.27552277, 0.43320477, -0.157682],
[-0.16173504, 0.27361175, -0.1118767],
[-0.13012655, 0.42030025, -0.2901737],
[-0.2378576, 0.26685005, -0.02899244],
[0.08487711, 0.36765888, -0.45253596],
],
[
[-0.14147596, -0.2702151, 0.41169107],
[-0.05323913, -0.18442528, 0.23766442],
[-0.24160458, -0.11692462, 0.3585292],
[-0.1004294, -0.17919227, 0.27962166],
[-0.01819841, -0.12625945, 0.14445786],
[-0.00131121, 0.06060241, -0.0592912],
[-0.09093696, 0.2536721, -0.16273515],
[-0.08962183, 0.34198248, -0.25236064],
[-0.19668606, 0.25176668, -0.05508063],
[0.0232805, 0.1351273, -0.1584078],
[0.09494846, -0.17026341, 0.07531495],
[0.00775955, -0.30424336, 0.29648378],
],
]
acts = np.array(acts)
expected_costs = np.array(expected_costs)
labels = [[1, 2, 2, 2, 2], [1, 1, 2, 2, 1]]
fn_k2 = init_k2_ctc(num_classes=acts.shape[-1], blank=0, reduction='none')
k2_costs, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
assert np.allclose(k2_costs, expected_costs), "big_test average costs mismatch."
assert np.allclose(k2_grads, expected_grads, rtol=1e-3), "big_test grads for average cost mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_large_random(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
rng = np.random.RandomState(0)
acts = rng.randn(4, 80, 5)
labels = [
[1, 2, 4, 3, 2, 2, 1, 1, 1, 1, 1, 1, 2, 1, 2, 3, 3, 1, 1, 1],
[3, 2, 2, 3, 4, 1, 1, 1, 1, 1, 4, 4, 1, 2, 1, 3, 4, 3, 1, 2],
[4, 4, 1, 2, 1, 3, 4, 3, 1, 2, 3, 2, 2, 3, 4, 1, 1, 1, 1, 1],
[1, 1, 2, 1, 2, 3, 3, 1, 1, 1, 1, 2, 4, 3, 2, 2, 1, 1, 1, 1],
]
fn_k2 = init_k2_ctc(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_costs, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
fn_pt = CTCLoss_Pytorch(reduction='sum', zero_infinity=True)
pt_costs, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
assert np.allclose(k2_costs, pt_costs, atol=1e-5, rtol=1e-3), "large_random_test costs mismatch."
assert np.allclose(k2_grads, pt_grads, atol=1e-5, rtol=1e-3), "large_random_test gradient mismatch."
if __name__ == "__main__":
pytest.main([__file__])
| NeMo-main | tests/collections/asr/k2/test_ctc.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
import pytest
import torch
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_numpy import RNNTLoss as RNNTLoss_Numpy
try:
from nemo.collections.asr.parts.k2.graph_transducer import GraphRnntLoss
from nemo.core.utils.k2_guard import k2
except (ImportError, ModuleNotFoundError):
pytest.skip("k2 is not installed, skipping Graph-RNNT tests.", allow_module_level=True)
EPS_SM_INPUT = 1e-6
EPS_L_INPUT = 1e-4
DEVICES = ['cpu']
if torch.cuda.is_available() and k2.with_cuda:
DEVICES.append('cuda')
class TestGraphRnnt:
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
@pytest.mark.parametrize("num_frames", [1, 3, 6])
@pytest.mark.parametrize("vocab_size", [3])
def test_temporal_schema(self, device, blank_first, num_frames, vocab_size):
blank_id = 0 if blank_first else vocab_size - 1
loss = GraphRnntLoss(blank=blank_id)
temporal_schema = loss.get_temporal_schema(
num_frames=num_frames, vocab_size=vocab_size, device=torch.device(device)
)
etalon_schema_fst: List[List[int]] = []
for time_i in range(num_frames):
for label_i in range(vocab_size):
if label_i == blank_id:
# transition to the next state
etalon_schema_fst.append([time_i, time_i + 1, label_i, time_i, 0])
else:
# self-loop
etalon_schema_fst.append([time_i, time_i, label_i, time_i, 0])
etalon_schema_fst.append([num_frames, num_frames + 1, -1, -1, 0]) # transition to final state
etalon_schema_fst.append([num_frames + 1]) # final state
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_temporal_schema = k2.Fsa.from_str(etalon_schema_fst_str, num_aux_labels=1)
assert temporal_schema.num_arcs == etalon_temporal_schema.num_arcs
assert temporal_schema.shape == etalon_temporal_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
temporal_schema, etalon_temporal_schema, log_semiring=True, treat_epsilons_specially=False
), "Temporal schema mismatch"
assert k2.is_rand_equivalent(
temporal_schema.invert(),
etalon_temporal_schema.invert(),
log_semiring=True,
treat_epsilons_specially=False,
), "Temporal schema output labels mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
def test_unit_schema(self, device, blank_first):
vocab_size = 3
blank_id = 0 if blank_first else vocab_size - 1
if blank_first:
labels = [1, 1, 2, 1]
else:
labels = [1, 1, 0, 1]
loss = GraphRnntLoss(blank=blank_id)
unit_schema = loss.get_unit_schema(
units_tensor=torch.tensor(labels, device=torch.device(device)), vocab_size=vocab_size
)
etalon_schema_fst: List[List[int]] = []
for label_i, label in enumerate(labels):
etalon_schema_fst.append([label_i, label_i + 1, label, label, label_i, 0]) # forward: label
etalon_schema_fst.append([label_i, label_i, blank_id, blank_id, label_i, 0]) # self-loop: blank
etalon_schema_fst.append([len(labels), len(labels), blank_id, blank_id, len(labels), 0])
etalon_schema_fst.append([len(labels), len(labels) + 1, -1, -1, -1, 0]) # transition to final state
etalon_schema_fst.append([len(labels) + 1]) # final state
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_unit_schema = k2.Fsa.from_str(etalon_schema_fst_str, aux_label_names=["aux_labels", "unit_positions"])
assert unit_schema.num_arcs == etalon_unit_schema.num_arcs
assert unit_schema.shape == etalon_unit_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
unit_schema, etalon_unit_schema, log_semiring=True, treat_epsilons_specially=False
), "Unit schema input labels mismatch"
assert k2.is_rand_equivalent(
unit_schema.invert(), etalon_unit_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Unit schema output labels mismatch"
# swap aux_labels and unit positions to test unit_positions
unit_schema.aux_labels, unit_schema.unit_positions = unit_schema.unit_positions, unit_schema.aux_labels
etalon_unit_schema.aux_labels, etalon_unit_schema.unit_positions = (
etalon_unit_schema.unit_positions,
etalon_unit_schema.aux_labels,
)
assert k2.is_rand_equivalent(
unit_schema.invert(), etalon_unit_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Unit schema unit positions mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
def test_grid_schema(self, device, blank_first):
vocab_size = 3
blank_id = 0 if blank_first else vocab_size - 1
if blank_first:
labels = [1, 1, 2, 1]
else:
labels = [1, 1, 0, 1]
text_length = len(labels)
num_frames = 5
loss = GraphRnntLoss(blank=blank_id)
grid_schema = loss.get_grid(
units_tensor=torch.tensor(labels, device=torch.device(device)),
num_frames=num_frames,
vocab_size=vocab_size,
)
etalon_schema_fst: List[List[int]] = []
for frame_i in range(num_frames):
for label_i in range(text_length + 1):
state = frame_i * (text_length + 1) + label_i
if label_i < text_length:
next_state_label = state + 1
# next unit
etalon_schema_fst.append([state, next_state_label, labels[label_i], frame_i, label_i, 0])
if frame_i < num_frames - 1:
next_state_frame = (frame_i + 1) * (text_length + 1) + label_i
# next time frame (blank)
etalon_schema_fst.append([state, next_state_frame, blank_id, frame_i, label_i, 0])
last_grid_state = num_frames * (text_length + 1) - 1
etalon_schema_fst.append([last_grid_state, last_grid_state + 1, blank_id, num_frames - 1, text_length, 0])
etalon_schema_fst.append(
[last_grid_state + 1, last_grid_state + 2, -1, -1, -1, 0]
) # transition to final state
etalon_schema_fst.append([last_grid_state + 2]) # final state
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_grid_schema = k2.Fsa.from_str(etalon_schema_fst_str, aux_label_names=["aux_labels", "unit_positions"])
assert grid_schema.num_arcs == etalon_grid_schema.num_arcs
assert grid_schema.shape == etalon_grid_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
grid_schema, etalon_grid_schema, log_semiring=True, treat_epsilons_specially=False
), "Grid schema input labels mismatch"
assert k2.is_rand_equivalent(
grid_schema.invert(), etalon_grid_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Grid schema output labels mismatch"
# swap aux_labels and unit positions to test unit_positions
grid_schema.aux_labels, grid_schema.unit_positions = grid_schema.unit_positions, grid_schema.aux_labels
etalon_grid_schema.aux_labels, etalon_grid_schema.unit_positions = (
etalon_grid_schema.unit_positions,
etalon_grid_schema.aux_labels,
)
assert k2.is_rand_equivalent(
grid_schema.invert(), etalon_grid_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Grid schema unit positions mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("connect_composed", [True, False])
@pytest.mark.parametrize("blank_first", [True, False])
def test_small_compose_transducer(
self, device, connect_composed, blank_first, rnnt_test_helper, rnn_loss_sample_data
):
if blank_first:
sample_data = rnn_loss_sample_data.get_sample_small()
else:
sample_data = rnn_loss_sample_data.get_sample_small_blank_last()
graph_rnnt = GraphRnntLoss(
blank=sample_data.blank_id, connect_composed=connect_composed, use_grid_implementation=False
)
graph_cost, graph_grads = rnnt_test_helper.wrap_and_call(
graph_rnnt, sample_data.logits, sample_data.targets, device
)
assert np.allclose(graph_cost, sample_data.expected_cost.numpy(), rtol=EPS_SM_INPUT), "costs mismatch."
assert np.allclose(graph_grads, sample_data.expected_grads.numpy(), atol=1e-6), "gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
def test_small_grid_transducer(self, device, rnnt_test_helper, rnn_loss_sample_data):
sample_data = rnn_loss_sample_data.get_sample_small()
graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True)
graph_cost, graph_grads = rnnt_test_helper.wrap_and_call(
graph_rnnt, sample_data.logits, sample_data.targets, device
)
assert np.allclose(graph_cost, sample_data.expected_cost.numpy(), rtol=EPS_SM_INPUT), "costs mismatch."
assert np.allclose(graph_grads, sample_data.expected_grads.numpy(), atol=1e-6), "gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
def test_medium_grid_transducer(self, device, rnnt_test_helper, rnn_loss_sample_data):
sample_data = rnn_loss_sample_data.get_sample_medium()
graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True)
graph_cost, graph_grads = rnnt_test_helper.wrap_and_call(
graph_rnnt, sample_data.logits, sample_data.targets, device
)
assert np.allclose(graph_cost, sample_data.expected_cost.numpy(), rtol=EPS_SM_INPUT), "costs mismatch."
assert np.allclose(graph_grads, sample_data.expected_grads.numpy(), atol=1e-6), "gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
def test_medium_random_var_size(self, device, rnnt_test_helper, rnn_loss_sample_data):
sample_data = rnn_loss_sample_data.get_sample_medium_random_var_size(blank_first=True)
graph_rnnt = GraphRnntLoss(blank=0, use_grid_implementation=True)
graph_cost, graph_grads = rnnt_test_helper.wrap_and_call(
graph_rnnt,
sample_data.logits.detach(),
sample_data.targets,
device,
input_lengths=sample_data.input_lengths,
target_lengths=sample_data.target_lengths,
)
etalon_rnnt = RNNTLoss_Numpy(blank=0)
etalon_cost, etalon_grads = rnnt_test_helper.wrap_and_call(
etalon_rnnt,
sample_data.logits.detach(),
sample_data.targets,
device,
input_lengths=sample_data.input_lengths,
target_lengths=sample_data.target_lengths,
)
assert np.allclose(graph_cost.sum(), etalon_cost, rtol=EPS_SM_INPUT), "costs mismatch."
assert np.allclose(graph_grads, etalon_grads, atol=1e-4), "gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
def test_small_random_grid_compose_equivalent(self, device: torch.device, blank_first: bool, rnn_loss_sample_data):
sample_data = rnn_loss_sample_data.get_sample_small_random(blank_first, device=device)
criterion = GraphRnntLoss(blank=sample_data.blank_id, connect_composed=True, use_grid_implementation=False)
text_tensor = sample_data.targets[0]
num_frames = sample_data.logits.shape[1]
graph_grid = criterion.get_grid(text_tensor, num_frames, sample_data.vocab_size)
graph_composed = criterion.get_composed_lattice(text_tensor, num_frames, sample_data.vocab_size)
assert k2.is_rand_equivalent(
graph_grid, graph_composed, log_semiring=True, treat_epsilons_specially=False
), "Grid and composed graphs are not equivalent."
| NeMo-main | tests/collections/asr/k2/test_graph_transducer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_numpy import RNNTLoss as RNNTLoss_Numpy
DEVICES = ['cpu']
if torch.cuda.is_available():
DEVICES.append('cuda')
def wrap_and_call(fn, acts, labels, device):
if not torch.is_tensor(acts):
acts = torch.FloatTensor(acts)
if 'cuda' in device:
acts = acts.cuda()
if not acts.requires_grad:
acts.requires_grad = True
lengths = [acts.shape[1]] * acts.shape[0]
label_lengths = [len(l) for l in labels]
labels = torch.LongTensor(labels)
lengths = torch.LongTensor(lengths)
label_lengths = torch.LongTensor(label_lengths)
if 'cuda' in device:
labels = labels.cuda()
lengths = lengths.cuda()
label_lengths = label_lengths.cuda()
costs = fn(acts, labels, lengths, label_lengths)
cost = torch.sum(costs)
cost.backward()
if 'cuda' in device:
torch.cuda.synchronize()
if acts.grad is not None:
grad = acts.grad.data.cpu().numpy()
else:
grad = None
return costs.data.cpu().numpy(), grad
def init_k2_rnnt(**kwargs):
from nemo.collections.asr.parts.k2.ml_loss import RnntLoss
rnnt = RnntLoss(**kwargs)
return lambda acts, labels, lengths, label_lengths: rnnt(
torch.nn.functional.log_softmax(acts, -1),
labels.to(dtype=torch.long),
lengths.to(dtype=torch.long),
label_lengths.to(dtype=torch.long),
)[0]
def skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled):
if device == 'cpu':
supported, msg = k2_is_appropriate
elif device == 'cuda':
supported, msg = k2_cuda_is_enabled
else:
raise ValueError(f"Unknown device: {device}")
if not supported:
pytest.skip(f"k2 test is skipped. Reason : {msg}")
class TestRNNTLossK2:
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
acts = np.array(
[
[
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.2, 0.1, 0.1], [0.7, 0.1, 0.2, 0.1, 0.1]],
]
]
)
labels = [[1, 2]]
fn_k2 = init_k2_rnnt(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
expected_cost = 4.495666
expected_grads = np.array(
[
[
[
[-0.13116688, -0.3999269, 0.17703125, 0.17703125, 0.17703125],
[-0.18572757, 0.12247056, -0.18168412, 0.12247056, 0.12247056],
[-0.32091254, 0.06269141, 0.06928472, 0.12624499, 0.06269141],
],
[
[0.05456069, -0.21824276, 0.05456069, 0.05456069, 0.05456069],
[0.12073959, 0.12073959, -0.48295835, 0.12073959, 0.12073959],
[-0.6925882, 0.16871116, 0.18645467, 0.16871116, 0.16871116],
],
]
]
)
assert np.allclose(k2_cost, expected_cost, rtol=1e-6), "small_test costs mismatch."
assert np.allclose(k2_grads, expected_grads, atol=1e-6), "small_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small_blank_last(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
acts = np.array(
[
[
[[0.0, 1.0, 3.0], [0.0, 2.0, 3.0], [1.0, 1.0, 3.0], [2.0, 3.0, 2.0]],
[[0.0, 0.0, 1.0], [0.0, 1.0, 1.0], [1.0, 0.0, 1.0], [2.0, 2.0, 0.0]],
[[0.0, 2.0, 5.0], [0.0, 3.0, 5.0], [1.0, 2.0, 5.0], [2.0, 4.0, 4.0]],
[[0.0, 3.0, 4.0], [0.0, 4.0, 4.0], [1.0, 3.0, 4.0], [2.0, 5.0, 3.0]],
[[2.0, 2.0, 1.0], [2.0, 3.0, 1.0], [3.0, 2.0, 1.0], [4.0, 4.0, 0.0]],
]
]
)
labels = [[0, 1, 0]]
fn_k2 = init_k2_rnnt(num_classes=acts.shape[-1], blank=acts.shape[-1] - 1, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
expected_cost = 6.789285182952881
expected_grads = np.array(
[
[
[
[-0.03551076725125313, 0.11419519782066345, -0.07868456840515137],
[0.0027224558871239424, 0.00704305712133646, -0.009765520691871643],
[0.0013856772566214204, 0.0013924005907028913, -0.0027780719101428986],
[1.4249643527364242e-06, 3.873454716085689e-06, -5.298420546751004e-06],
],
[
[-0.1934257447719574, 0.19551163911819458, -0.0020859241485595703],
[0.07043898105621338, 0.05738453567028046, -0.12782356142997742],
[0.061031512916088104, 0.02286236733198166, -0.08389391005039215],
[0.0005252412520349026, 0.0005252412520349026, -0.0010504829697310925],
],
[
[-0.007841046899557114, 0.025142310187220573, -0.017301201820373535],
[0.0019501042552292347, 0.0005148053169250488, -0.0024650096893310547],
[0.0027856370434165, 0.008609085343778133, -0.01139475405216217],
[9.526080975774676e-05, 0.0007038871408440173, -0.000799147819634527],
],
[
[-0.01533521432429552, 0.1386115401983261, -0.12327653169631958],
[0.002850571647286415, -0.006693005561828613, 0.003842458128929138],
[0.009236274287104607, 0.08995233476161957, -0.0991886705160141],
[0.0001865450612967834, 0.0037468576338142157, -0.003933403175324202],
],
[
[-0.2888762652873993, 0.211185485124588, 0.07769080251455307],
[0.15952755510807037, -0.2182144820690155, 0.05868690833449364],
[-0.3332723379135132, 0.2436419129371643, 0.0896308496594429],
[0.4954628646373749, 0.4954628646373749, -0.9909257292747498],
],
]
]
)
assert np.allclose(k2_cost, expected_cost, rtol=1e-6), "small_test_blank_last costs mismatch."
assert np.allclose(k2_grads, expected_grads, atol=1e-6), "small_test_blank_last gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small_random(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
rng = np.random.RandomState(0)
acts = rng.randn(1, 4, 3, 3)
labels = [[1, 2]]
fn_k2 = init_k2_rnnt(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_cost, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
fn_np = RNNTLoss_Numpy()
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
assert np.allclose(k2_cost, np_cost, rtol=1e-6), "small_random_test costs mismatch."
assert np.allclose(k2_grads, np_grads, atol=1e-6), "small_random_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_big_tensor(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
# minibatch x T x U x alphabet_size
acts = [
[
[
[0.06535690384862791, 0.7875301411923206, 0.08159176605666074],
[0.5297155426466327, 0.7506749639230854, 0.7541348379087998],
[0.6097641124736383, 0.8681404965673826, 0.6225318186056529],
],
[
[0.6685222872103057, 0.8580392805336061, 0.16453892311765583],
[0.989779515236694, 0.944298460961015, 0.6031678586829663],
[0.9467833543605416, 0.666202507295747, 0.28688179752461884],
],
[
[0.09418426230195986, 0.3666735970751962, 0.736168049462793],
[0.1666804425271342, 0.7141542198635192, 0.3993997272216727],
[0.5359823524146038, 0.29182076440286386, 0.6126422611507932],
],
[
[0.3242405528768486, 0.8007644367291621, 0.5241057606558068],
[0.779194617063042, 0.18331417220174862, 0.113745182072432],
[0.24022162381327106, 0.3394695622533106, 0.1341595066017014],
],
],
[
[
[0.5055615569388828, 0.051597282072282646, 0.6402903936686337],
[0.43073311517251, 0.8294731834714112, 0.1774668847323424],
[0.3207001991262245, 0.04288308912457006, 0.30280282975568984],
],
[
[0.6751777088333762, 0.569537369330242, 0.5584738347504452],
[0.08313242153985256, 0.06016544344162322, 0.10795752845152584],
[0.7486153608562472, 0.943918041459349, 0.4863558118797222],
],
[
[0.4181986264486809, 0.6524078485043804, 0.024242983423721887],
[0.13458171554507403, 0.3663418070512402, 0.2958297395361563],
[0.9236695822497084, 0.6899291482654177, 0.7418981733448822],
],
[
[0.25000547599982104, 0.6034295486281007, 0.9872887878887768],
[0.5926057265215715, 0.8846724004467684, 0.5434495396894328],
[0.6607698886038497, 0.3771277082495921, 0.3580209022231813],
],
],
]
expected_costs = [4.2806528590890736, 3.9384369822503591]
expected_grads = [
[
[
[-1.86843902e-01, -6.25548810e-02, 2.49398798e-01],
[-2.03376666e-01, 2.02399328e-01, 9.77333169e-04],
[-1.41016081e-01, 7.91234672e-02, 6.18926100e-02],
],
[
[-1.15517676e-02, -8.12802389e-02, 9.28319991e-02],
[-1.54257029e-01, 2.29432687e-01, -7.51756504e-02],
[-2.46593088e-01, 1.46404594e-01, 1.00188486e-01],
],
[
[-1.29182907e-02, -6.15932420e-02, 7.45115355e-02],
[-5.59857301e-02, 2.19830811e-01, -1.63845062e-01],
[-4.97626871e-01, 2.09239945e-01, 2.88386941e-01],
],
[
[1.36048580e-02, -3.02196294e-02, 1.66147724e-02],
[1.13924511e-01, 6.27811998e-02, -1.76705718e-01],
[-6.67078257e-01, 3.67658824e-01, 2.99419403e-01],
],
],
[
[
[-3.56343776e-01, -5.53474613e-02, 4.11691219e-01],
[-9.69219357e-02, 2.94591039e-02, 6.74628317e-02],
[-6.35175705e-02, 2.76544970e-02, 3.58630717e-02],
],
[
[-1.54499024e-01, -7.39420280e-02, 2.28441030e-01],
[-1.66789949e-01, -8.78955179e-05, 1.66877866e-01],
[-1.72369644e-01, 1.05565332e-01, 6.68043196e-02],
],
[
[2.38748826e-02, -1.18255816e-01, 9.43809375e-02],
[-1.04707085e-01, -1.08934477e-01, 2.13641584e-01],
[-3.69844258e-01, 1.80118099e-01, 1.89726159e-01],
],
[
[2.57137045e-02, -7.94617534e-02, 5.37480488e-02],
[1.22328237e-01, -2.38788679e-01, 1.16460443e-01],
[-5.98686993e-01, 3.02203178e-01, 2.96483815e-01],
],
],
]
acts = np.array(acts)
expected_costs = np.array(expected_costs)
labels = [[1, 2], [1, 1]]
fn_k2 = init_k2_rnnt(num_classes=acts.shape[-1], blank=0, reduction='none')
k2_costs, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
assert np.allclose(k2_costs, expected_costs), "big_test average costs mismatch."
assert np.allclose(k2_grads, expected_grads, rtol=1e-3), "big_test grads for average cost mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_large_random(self, device, k2_is_appropriate, k2_cuda_is_enabled):
skip_test_if_unsupported(device, k2_is_appropriate, k2_cuda_is_enabled)
rng = np.random.RandomState(0)
acts = rng.randn(4, 8, 11, 5)
labels = [
[1, 2, 4, 3, 2, 2, 1, 1, 1, 1],
[3, 2, 2, 3, 4, 1, 1, 1, 1, 1],
[4, 4, 1, 2, 1, 3, 4, 3, 1, 2],
[1, 1, 2, 1, 2, 3, 3, 1, 1, 1],
]
fn_k2 = init_k2_rnnt(num_classes=acts.shape[-1], blank=0, reduction='sum')
k2_costs, k2_grads = wrap_and_call(fn_k2, acts, labels, device)
fn_np = RNNTLoss_Numpy()
np_costs, np_grads = wrap_and_call(fn_np, acts, labels, device)
assert np.allclose(k2_costs, np_costs, atol=1e-5, rtol=1e-3), "large_random_test costs mismatch."
assert np.allclose(k2_grads, np_grads, atol=1e-5, rtol=1e-3), "large_random_test gradient mismatch."
if __name__ == "__main__":
pytest.main([__file__])
| NeMo-main | tests/collections/asr/k2/test_rnnt.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
import pytest
import torch
try:
from nemo.collections.asr.parts.k2.w_transducer import GraphWTransducerLoss
from nemo.core.utils.k2_guard import k2
except (ImportError, ModuleNotFoundError):
pytest.skip("k2 is not installed, skipping Graph-W-Transducer tests.", allow_module_level=True)
DEVICES = ['cpu']
if torch.cuda.is_available() and k2.with_cuda:
DEVICES.append('cuda')
class TestGraphWTransducerLoss:
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
@pytest.mark.parametrize("num_frames", [1, 3, 6])
@pytest.mark.parametrize("vocab_size", [3])
@pytest.mark.parametrize("last_blank_mode", ["force_final", "allow_ignore"])
def test_temporal_schema(self, device, blank_first, num_frames, vocab_size, last_blank_mode):
blank_id = 0 if blank_first else vocab_size - 1
loss = GraphWTransducerLoss(blank=blank_id, last_blank_mode=last_blank_mode)
temporal_schema = loss.get_temporal_schema(
num_frames=num_frames, vocab_size=vocab_size, device=torch.device(device)
)
etalon_schema_fst: List[List[int]] = []
for time_i in range(num_frames):
for label_i in range(vocab_size):
if label_i == blank_id:
# transition to the next state
etalon_schema_fst.append([time_i, time_i + 1, label_i, time_i, 0])
else:
# self-loop
etalon_schema_fst.append([time_i, time_i, label_i, time_i, 0])
# eps transitions from the first state
eps_from_first_state = vocab_size
for time_i in range(1, num_frames):
etalon_schema_fst.append([0, time_i, eps_from_first_state, 0, 0])
# eps transitions to the last state
eps_to_last_state = vocab_size + 1
last_state_eps = num_frames - 1 if last_blank_mode == "force_final" else num_frames
for time_i in range(0, num_frames - 1):
etalon_schema_fst.append([time_i, last_state_eps, eps_to_last_state, time_i, 0])
# transition to the final state
etalon_schema_fst.append([num_frames, num_frames + 1, -1, -1, 0])
# final state
etalon_schema_fst.append([num_frames + 1])
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_temporal_schema = k2.Fsa.from_str(etalon_schema_fst_str, num_aux_labels=1)
assert temporal_schema.num_arcs == etalon_temporal_schema.num_arcs
assert temporal_schema.shape == etalon_temporal_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
temporal_schema, etalon_temporal_schema, log_semiring=True, treat_epsilons_specially=False
), "Temporal schema mismatch"
assert k2.is_rand_equivalent(
temporal_schema.invert(),
etalon_temporal_schema.invert(),
log_semiring=False,
treat_epsilons_specially=False,
), "Temporal schema output labels mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
def test_unit_schema(self, device, blank_first):
vocab_size = 3
blank_id = 0 if blank_first else vocab_size - 1
if blank_first:
labels = [1, 1, 2, 1]
else:
labels = [1, 1, 0, 1]
loss = GraphWTransducerLoss(blank=blank_id)
unit_schema = loss.get_unit_schema(
units_tensor=torch.tensor(labels, device=torch.device(device)), vocab_size=vocab_size
)
etalon_schema_fst: List[List[int]] = []
for label_i, label in enumerate(labels):
etalon_schema_fst.append([label_i, label_i + 1, label, label, label_i, 0]) # forward: label
etalon_schema_fst.append([label_i, label_i, blank_id, blank_id, label_i, 0]) # self-loop: blank
etalon_schema_fst.append([len(labels), len(labels), blank_id, blank_id, len(labels), 0])
# eps-transitions
etalon_schema_fst.append([0, 0, vocab_size, vocab_size, 0, 0])
etalon_schema_fst.append([len(labels), len(labels), vocab_size + 1, vocab_size + 1, len(labels), 0])
etalon_schema_fst.append([len(labels), len(labels) + 1, -1, -1, -1, 0]) # transition to final state
etalon_schema_fst.append([len(labels) + 1]) # final state
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_unit_schema = k2.Fsa.from_str(etalon_schema_fst_str, aux_label_names=["aux_labels", "unit_positions"])
assert unit_schema.num_arcs == etalon_unit_schema.num_arcs
assert unit_schema.shape == etalon_unit_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
unit_schema, etalon_unit_schema, log_semiring=True, treat_epsilons_specially=False
), "Unit schema input labels mismatch"
assert k2.is_rand_equivalent(
unit_schema.invert(), etalon_unit_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Unit schema output labels mismatch"
# swap aux_labels and unit positions to test unit_positions
unit_schema.aux_labels, unit_schema.unit_positions = unit_schema.unit_positions, unit_schema.aux_labels
etalon_unit_schema.aux_labels, etalon_unit_schema.unit_positions = (
etalon_unit_schema.unit_positions,
etalon_unit_schema.aux_labels,
)
assert k2.is_rand_equivalent(
unit_schema.invert(), etalon_unit_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Unit schema unit positions mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
@pytest.mark.parametrize("last_blank_mode", ["force_final", "allow_ignore"])
def test_grid_schema(self, device, blank_first, last_blank_mode):
vocab_size = 3
blank_id = 0 if blank_first else vocab_size - 1
if blank_first:
labels = [1, 1, 2, 1]
else:
labels = [1, 1, 0, 1]
text_length = len(labels)
num_frames = 5
loss = GraphWTransducerLoss(blank=blank_id, last_blank_mode=last_blank_mode)
grid_schema = loss.get_grid(
units_tensor=torch.tensor(labels, device=torch.device(device)),
num_frames=num_frames,
vocab_size=vocab_size,
)
etalon_schema_fst: List[List[int]] = []
for frame_i in range(num_frames):
for label_i in range(text_length + 1):
state = frame_i * (text_length + 1) + label_i
if label_i < text_length:
next_state_label = state + 1
# next unit
etalon_schema_fst.append([state, next_state_label, labels[label_i], frame_i, label_i, 0])
if frame_i < num_frames - 1:
next_state_frame = (frame_i + 1) * (text_length + 1) + label_i
# next time frame (blank)
etalon_schema_fst.append([state, next_state_frame, blank_id, frame_i, label_i, 0])
# start eps-transition
for frame_i in range(1, num_frames):
etalon_schema_fst.append([0, frame_i * (text_length + 1), vocab_size, 0, 0, 0])
last_grid_state = num_frames * (text_length + 1) - 1
# end eps-transitions
if last_blank_mode == "force_final":
last_eps_state = last_grid_state
else:
assert last_blank_mode == "allow_ignore"
last_eps_state = last_grid_state + 1
for frame_i in range(num_frames - 1):
etalon_schema_fst.append(
[(frame_i + 1) * (text_length + 1) - 1, last_eps_state, vocab_size + 1, frame_i, text_length, 0]
)
etalon_schema_fst.append([last_grid_state, last_grid_state + 1, blank_id, num_frames - 1, text_length, 0])
etalon_schema_fst.append(
[last_grid_state + 1, last_grid_state + 2, -1, -1, -1, 0]
) # transition to final state
etalon_schema_fst.append([last_grid_state + 2]) # final state
etalon_schema_fst = sorted(etalon_schema_fst) # required for k2.Fsa.from_str
etalon_schema_fst_str = "\n".join([" ".join(map(str, line)) for line in etalon_schema_fst])
etalon_grid_schema = k2.Fsa.from_str(etalon_schema_fst_str, aux_label_names=["aux_labels", "unit_positions"])
assert grid_schema.num_arcs == etalon_grid_schema.num_arcs
assert grid_schema.shape == etalon_grid_schema.shape # (num_states, None)
assert k2.is_rand_equivalent(
grid_schema, etalon_grid_schema, log_semiring=True, treat_epsilons_specially=False
), "Grid schema input labels mismatch"
assert k2.is_rand_equivalent(
grid_schema.invert(), etalon_grid_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Grid schema output labels mismatch"
# swap aux_labels and unit positions to test unit_positions
grid_schema.aux_labels, grid_schema.unit_positions = grid_schema.unit_positions, grid_schema.aux_labels
etalon_grid_schema.aux_labels, etalon_grid_schema.unit_positions = (
etalon_grid_schema.unit_positions,
etalon_grid_schema.aux_labels,
)
assert k2.is_rand_equivalent(
grid_schema.invert(), etalon_grid_schema.invert(), log_semiring=True, treat_epsilons_specially=False
), "Grid schema unit positions mismatch"
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("blank_first", [True, False])
@pytest.mark.parametrize("last_blank_mode", ["allow_ignore", "force_final"])
def test_small_random_grid_compose_equivalent(
self, device: torch.device, blank_first: bool, last_blank_mode, rnn_loss_sample_data
):
sample_data = rnn_loss_sample_data.get_sample_small_random(blank_first, device=device)
criterion = GraphWTransducerLoss(
blank=sample_data.blank_id,
last_blank_mode=last_blank_mode,
connect_composed=True,
use_grid_implementation=False,
)
text_tensor = sample_data.targets[0]
num_frames = sample_data.logits.shape[1]
graph_grid = criterion.get_grid(text_tensor, num_frames, sample_data.vocab_size)
graph_composed = criterion.get_composed_lattice(text_tensor, num_frames, sample_data.vocab_size)
assert k2.is_rand_equivalent(
graph_grid, graph_composed, log_semiring=True, treat_epsilons_specially=False
), "Grid and composed graphs are not equivalent."
@pytest.mark.unit
@pytest.mark.parametrize("device", DEVICES)
@pytest.mark.parametrize("last_blank_mode", ["allow_ignore", "force_final"])
@pytest.mark.parametrize("use_grid_implementation", [True, False])
def test_small_grid_transducer_inf_penalty(
self, device, last_blank_mode, use_grid_implementation, rnnt_test_helper, rnn_loss_sample_data
):
"""
With -inf eps penalty W-Transducer loss should be equivalent to RNN-T loss.
"""
sample_data = rnn_loss_sample_data.get_sample_small()
graph_rnnt = GraphWTransducerLoss(
blank=0,
eps_weight=-100.0,
last_blank_mode=last_blank_mode,
use_grid_implementation=use_grid_implementation,
)
graph_cost, graph_grads = rnnt_test_helper.wrap_and_call(
graph_rnnt, sample_data.logits, sample_data.targets, device
)
assert np.allclose(graph_cost, sample_data.expected_cost.numpy(), rtol=1e-6), "costs mismatch."
assert np.allclose(graph_grads, sample_data.expected_grads.numpy(), atol=1e-6), "gradient mismatch."
| NeMo-main | tests/collections/asr/k2/test_w_transducer.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from pyannote.core import Annotation, Segment
from nemo.collections.asr.parts.utils.vad_utils import (
align_labels_to_frames,
convert_labels_to_speech_segments,
frame_vad_construct_pyannote_object_per_file,
get_frame_labels,
get_nonspeech_segments,
load_speech_overlap_segments_from_rttm,
load_speech_segments_from_rttm,
read_rttm_as_pyannote_object,
)
def get_simple_rttm_without_overlap(rttm_file="test1.rttm"):
line = "SPEAKER <NA> 1 0 2 <NA> <NA> speech <NA> <NA>\n"
speech_segments = [[0.0, 2.0]]
with open(rttm_file, "w") as f:
f.write(line)
return rttm_file, speech_segments
def get_simple_rttm_with_overlap(rttm_file="test2.rttm"):
speech_segments = [[0.0, 3.0]]
overlap_segments = [[1.0, 2.0]]
with open(rttm_file, "w") as f:
f.write("SPEAKER <NA> 1 0 2 <NA> <NA> speech <NA> <NA>\n")
f.write("SPEAKER <NA> 1 1 2 <NA> <NA> speech <NA> <NA>\n")
return rttm_file, speech_segments, overlap_segments
def get_simple_rttm_with_silence(rttm_file="test3.rttm"):
line = "SPEAKER <NA> 1 1 2 <NA> <NA> speech <NA> <NA>\n"
speech_segments = [[1.0, 2.0]]
silence_segments = [[0.0, 1.0]]
with open(rttm_file, "w") as f:
f.write(line)
return rttm_file, speech_segments, silence_segments
class TestVADUtils:
@pytest.mark.parametrize(["logits_len", "labels_len"], [(20, 10), (20, 11), (20, 9), (10, 21), (10, 19)])
@pytest.mark.unit
def test_align_label_logits(self, logits_len, labels_len):
logits = np.arange(logits_len).tolist()
labels = np.arange(labels_len).tolist()
labels_new = align_labels_to_frames(probs=logits, labels=labels)
assert len(labels_new) == len(logits)
@pytest.mark.unit
def test_load_speech_segments_from_rttm(self, test_data_dir):
rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test1.rttm")
speech_segments_new = load_speech_segments_from_rttm(rttm_file)
assert speech_segments_new == speech_segments
@pytest.mark.unit
def test_load_speech_overlap_segments_from_rttm(self, test_data_dir):
rttm_file, speech_segments, overlap_segments = get_simple_rttm_with_overlap(test_data_dir + "/test2.rttm")
speech_segments_new, overlap_segments_new = load_speech_overlap_segments_from_rttm(rttm_file)
assert speech_segments_new == speech_segments
assert overlap_segments_new == overlap_segments
@pytest.mark.unit
def test_get_nonspeech_segments(self, test_data_dir):
rttm_file, speech_segments, silence_segments = get_simple_rttm_with_silence(test_data_dir + "/test3.rttm")
speech_segments_new = load_speech_segments_from_rttm(rttm_file)
silence_segments_new = get_nonspeech_segments(speech_segments_new)
assert silence_segments_new == silence_segments
@pytest.mark.unit
def test_get_frame_labels(self, test_data_dir):
rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test4.rttm")
speech_segments_new = load_speech_segments_from_rttm(rttm_file)
frame_labels = get_frame_labels(speech_segments_new, 0.02, 0.0, 3.0, as_str=False)
assert frame_labels[0] == 1
assert len(frame_labels) == 150
@pytest.mark.unit
def test_convert_labels_to_speech_segments(self, test_data_dir):
rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test5.rttm")
speech_segments_new = load_speech_segments_from_rttm(rttm_file)
frame_labels = get_frame_labels(speech_segments_new, 0.02, 0.0, 3.0, as_str=False)
speech_segments_new = convert_labels_to_speech_segments(frame_labels, 0.02)
assert speech_segments_new == speech_segments
@pytest.mark.unit
def test_read_rttm_as_pyannote_object(self, test_data_dir):
rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test6.rttm")
pyannote_object = read_rttm_as_pyannote_object(rttm_file)
pyannote_object_gt = Annotation()
pyannote_object_gt[Segment(0.0, 2.0)] = 'speech'
assert pyannote_object == pyannote_object_gt
@pytest.mark.unit
def test_frame_vad_construct_pyannote_object_per_file(self, test_data_dir):
rttm_file, speech_segments = get_simple_rttm_without_overlap(test_data_dir + "/test7.rttm")
# test for rttm input
ref, hyp = frame_vad_construct_pyannote_object_per_file(rttm_file, rttm_file)
pyannote_object_gt = Annotation()
pyannote_object_gt[Segment(0.0, 2.0)] = 'speech'
assert ref == hyp == pyannote_object_gt
# test for list input
speech_segments = load_speech_segments_from_rttm(rttm_file)
frame_labels = get_frame_labels(speech_segments, 0.02, 0.0, 3.0, as_str=False)
speech_segments_new = convert_labels_to_speech_segments(frame_labels, 0.02)
assert speech_segments_new == speech_segments
ref, hyp = frame_vad_construct_pyannote_object_per_file(frame_labels, frame_labels, 0.02)
assert ref == hyp == pyannote_object_gt
| NeMo-main | tests/collections/asr/utils/test_vad_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import namedtuple
from typing import List, Type, Union
import librosa
import matplotlib.pyplot as plt
import numpy as np
import pytest
import scipy
import soundfile as sf
import torch
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.asr.parts.utils.audio_utils import SOUND_VELOCITY as sound_velocity
from nemo.collections.asr.parts.utils.audio_utils import (
calculate_sdr_numpy,
convmtx_mc_numpy,
db2mag,
estimated_coherence,
generate_approximate_noise_field,
get_segment_start,
mag2db,
pow2db,
rms,
select_channels,
theoretical_coherence,
toeplitz,
)
class TestAudioSegment:
@pytest.mark.unit
@pytest.mark.parametrize(
"num_channels, channel_selectors", [(1, [None, 'average', 0]), (3, [None, 'average', 0, 1, [0, 1]]),]
)
@pytest.mark.parametrize("sample_rate", [8000, 16000, 22500])
def test_audio_segment_from_file(self, tmpdir, num_channels, channel_selectors, sample_rate):
"""Test loading and audio signal from a file.
"""
signal_len_sec = 4
num_samples = signal_len_sec * sample_rate
num_examples = 10
rtol, atol = 1e-5, 1e-6
for n in range(num_examples):
# Create a test vector
audio_file = os.path.join(tmpdir, f'test_audio_{n:02}.wav')
samples = np.random.randn(num_samples, num_channels)
sf.write(audio_file, samples, sample_rate, 'float')
for channel_selector in channel_selectors:
if channel_selector is None:
ref_samples = samples
elif isinstance(channel_selector, int) or isinstance(channel_selector, list):
ref_samples = samples[:, channel_selector]
elif channel_selector == 'average':
ref_samples = np.mean(samples, axis=1)
else:
raise ValueError(f'Unexpected value of channel_selector {channel_selector}')
# 1) Load complete audio
# Reference
ref_samples = ref_samples.squeeze()
ref_channels = 1 if ref_samples.ndim == 1 else ref_samples.shape[1]
# UUT
audio_segment = AudioSegment.from_file(audio_file, channel_selector=channel_selector)
# Test
assert (
audio_segment.sample_rate == sample_rate
), f'channel_selector {channel_selector}, sample rate not matching: {audio_segment.sample_rate} != {sample_rate}'
assert (
audio_segment.num_channels == ref_channels
), f'channel_selector {channel_selector}, num channels not matching: {audio_segment.num_channels} != {ref_channels}'
assert audio_segment.num_samples == len(
ref_samples
), f'channel_selector {channel_selector}, num samples not matching: {audio_segment.num_samples} != {len(ref_samples)}'
assert np.allclose(
audio_segment.samples, ref_samples, rtol=rtol, atol=atol
), f'channel_selector {channel_selector}, samples not matching'
# 2) Load a random segment
offset = 0.45 * np.random.rand() * signal_len_sec
duration = 0.45 * np.random.rand() * signal_len_sec
# Reference
start = int(offset * sample_rate)
end = start + int(duration * sample_rate)
ref_samples = ref_samples[start:end, ...]
# UUT
audio_segment = AudioSegment.from_file(
audio_file, offset=offset, duration=duration, channel_selector=channel_selector
)
# Test
assert (
audio_segment.sample_rate == sample_rate
), f'channel_selector {channel_selector}, offset {offset}, duration {duration}, sample rate not matching: {audio_segment.sample_rate} != {sample_rate}'
assert (
audio_segment.num_channels == ref_channels
), f'channel_selector {channel_selector}, offset {offset}, duration {duration}, num channels not matching: {audio_segment.num_channels} != {ref_channels}'
assert audio_segment.num_samples == len(
ref_samples
), f'channel_selector {channel_selector}, offset {offset}, duration {duration}, num samples not matching: {audio_segment.num_samples} != {len(ref_samples)}'
assert np.allclose(
audio_segment.samples, ref_samples, rtol=rtol, atol=atol
), f'channel_selector {channel_selector}, offset {offset}, duration {duration}, samples not matching'
@pytest.mark.unit
@pytest.mark.parametrize(
"num_channels, channel_selectors", [(1, [None, 'average', 0]), (3, [None, 'average', 0, 1, [0, 1]]),]
)
@pytest.mark.parametrize("offset", [0, 1.5])
@pytest.mark.parametrize("duration", [1, 2])
def test_audio_segment_multichannel_with_list(self, tmpdir, num_channels, channel_selectors, offset, duration):
"""Test loading an audio signal from a list of single-channel files.
"""
sample_rate = 16000
signal_len_sec = 5
num_samples = signal_len_sec * sample_rate
rtol, atol = 1e-5, 1e-6
# Random samples
samples = np.random.rand(num_samples, num_channels)
# Save audio
audio_files = []
for m in range(num_channels):
a_file = os.path.join(tmpdir, f'ch_{m}.wav')
sf.write(a_file, samples[:, m], sample_rate)
audio_files.append(a_file)
mc_file = os.path.join(tmpdir, f'mc.wav')
sf.write(mc_file, samples, sample_rate)
for channel_selector in channel_selectors:
# UUT: loading audio from a list of files
uut_segment = AudioSegment.from_file(
audio_file=audio_files, offset=offset, duration=duration, channel_selector=channel_selector
)
# Reference: load from the original file
ref_segment = AudioSegment.from_file(
audio_file=mc_file, offset=offset, duration=duration, channel_selector=channel_selector
)
# Check
assert (
uut_segment.sample_rate == ref_segment.sample_rate
), f'channel_selector {channel_selector}: expecting {ref_segment.sample_rate}, but UUT segment has {uut_segment.sample_rate}'
assert (
uut_segment.num_samples == ref_segment.num_samples
), f'channel_selector {channel_selector}: expecting {ref_segment.num_samples}, but UUT segment has {uut_segment.num_samples}'
assert np.allclose(
uut_segment.samples, ref_segment.samples, rtol=rtol, atol=atol
), f'channel_selector {channel_selector}: samples not matching'
# Try to get a channel that is out of range.
with pytest.raises(RuntimeError, match="Channel cannot be selected"):
AudioSegment.from_file(audio_file=audio_files, channel_selector=num_channels)
if num_channels > 1:
# Try to load a list of multichannel files
# This is expected to fail since we only support loading a single-channel signal
# from each file when audio_file is a list
with pytest.raises(RuntimeError, match="Expecting a single-channel audio signal"):
AudioSegment.from_file(audio_file=[mc_file, mc_file])
with pytest.raises(RuntimeError, match="Expecting a single-channel audio signal"):
AudioSegment.from_file(audio_file=[mc_file, mc_file], channel_selector=0)
@pytest.mark.unit
@pytest.mark.parametrize("target_sr", [8000, 16000])
def test_audio_segment_trim_match(self, tmpdir, target_sr):
"""Test loading and audio signal from a file matches when using a path and a list
for different target_sr, int_values and trim setups.
"""
sample_rate = 24000
signal_len_sec = 2
num_samples = signal_len_sec * sample_rate
num_examples = 10
rtol, atol = 1e-5, 1e-6
TrimSetup = namedtuple("TrimSetup", "ref top_db frame_length hop_length")
trim_setups = []
trim_setups.append(TrimSetup(np.max, 10, 2048, 1024))
trim_setups.append(TrimSetup(1.0, 35, 2048, 1024))
trim_setups.append(TrimSetup(0.8, 45, 2048, 1024))
for n in range(num_examples):
# Create a test vector
audio_file = os.path.join(tmpdir, f'test_audio_{n:02}.wav')
samples = np.random.randn(num_samples)
# normalize
samples = samples / np.max(samples)
# apply random scaling and window to have some samples cut by trim
samples = np.random.rand() * np.hanning(num_samples) * samples
sf.write(audio_file, samples, sample_rate, 'float')
for trim_setup in trim_setups:
# UUT 1: load from a path
audio_segment_1 = AudioSegment.from_file(
audio_file,
target_sr=target_sr,
trim=True,
trim_ref=trim_setup.ref,
trim_top_db=trim_setup.top_db,
trim_frame_length=trim_setup.frame_length,
trim_hop_length=trim_setup.hop_length,
)
# UUT 2: load from a list
audio_segment_2 = AudioSegment.from_file(
[audio_file],
target_sr=target_sr,
trim=True,
trim_ref=trim_setup.ref,
trim_top_db=trim_setup.top_db,
trim_frame_length=trim_setup.frame_length,
trim_hop_length=trim_setup.hop_length,
)
# Test
assert audio_segment_1 == audio_segment_2, f'trim setup {trim_setup}, loaded segments not matching'
class TestSelectChannels:
num_samples = 1000
max_diff_tol = 1e-9
@pytest.mark.unit
@pytest.mark.parametrize("channel_selector", [None, 'average', 0, 1, [0, 1]])
def test_single_channel_input(self, channel_selector: Type[Union[str, int, List[int]]]):
"""Cover the case with single-channel input signal.
Channel selector should not do anything in this case.
"""
golden_out = signal_in = np.random.rand(self.num_samples)
if channel_selector not in [None, 0, 'average']:
# Expect a failure if looking for a different channel when input is 1D
with pytest.raises(ValueError):
# UUT
signal_out = select_channels(signal_in, channel_selector)
else:
# UUT
signal_out = select_channels(signal_in, channel_selector)
# Check difference
max_diff = np.max(np.abs(signal_out - golden_out))
assert max_diff < self.max_diff_tol
@pytest.mark.unit
@pytest.mark.parametrize("num_channels", [2, 4])
@pytest.mark.parametrize("channel_selector", [None, 'average', 0, [1], [0, 1]])
def test_multi_channel_input(self, num_channels: int, channel_selector: Type[Union[str, int, List[int]]]):
"""Cover the case with multi-channel input signal and single-
or multi-channel output.
"""
num_samples = 1000
signal_in = np.random.rand(self.num_samples, num_channels)
# calculate golden output
if channel_selector is None:
golden_out = signal_in
elif channel_selector == 'average':
golden_out = np.mean(signal_in, axis=1)
else:
golden_out = signal_in[:, channel_selector].squeeze()
# UUT
signal_out = select_channels(signal_in, channel_selector)
# Check difference
max_diff = np.max(np.abs(signal_out - golden_out))
assert max_diff < self.max_diff_tol
@pytest.mark.unit
@pytest.mark.parametrize("num_channels", [1, 2])
@pytest.mark.parametrize("channel_selector", [2, [1, 2]])
def test_select_more_channels_than_available(
self, num_channels: int, channel_selector: Type[Union[str, int, List[int]]]
):
"""This test is expecting the UUT to fail because we ask for more channels
than available in the input signal.
"""
num_samples = 1000
signal_in = np.random.rand(self.num_samples, num_channels)
# expect failure since we ask for more channels than available
with pytest.raises(ValueError):
# UUT
signal_out = select_channels(signal_in, channel_selector)
class TestGenerateApproximateNoiseField:
@pytest.mark.unit
@pytest.mark.parametrize('num_mics', [5])
@pytest.mark.parametrize('mic_spacing', [0.05])
@pytest.mark.parametrize('fft_length', [512, 2048])
@pytest.mark.parametrize('sample_rate', [8000, 16000])
@pytest.mark.parametrize('field', ['spherical'])
def test_theoretical_coherence_matrix(
self, num_mics: int, mic_spacing: float, fft_length: int, sample_rate: float, field: str
):
"""Test calculation of a theoretical coherence matrix.
"""
# test setup
max_diff_tol = 1e-9
# golden reference: spherical coherence
num_subbands = fft_length // 2 + 1
angular_freq = 2 * np.pi * sample_rate * np.arange(0, num_subbands) / fft_length
golden_coherence = np.zeros((num_subbands, num_mics, num_mics))
for p in range(num_mics):
for q in range(num_mics):
if p == q:
golden_coherence[:, p, q] = 1.0
else:
if field == 'spherical':
dist_pq = abs(p - q) * mic_spacing
sinc_arg = angular_freq * dist_pq / sound_velocity
golden_coherence[:, p, q] = np.sinc(sinc_arg / np.pi)
else:
raise NotImplementedError(f'Field {field} not supported.')
# assume linear arrray
mic_positions = np.zeros((num_mics, 3))
mic_positions[:, 0] = mic_spacing * np.arange(num_mics)
# UUT
uut_coherence = theoretical_coherence(
mic_positions, sample_rate=sample_rate, fft_length=fft_length, field='spherical'
)
# Check difference
max_diff = np.max(np.abs(uut_coherence - golden_coherence))
assert max_diff < max_diff_tol
@pytest.mark.unit
@pytest.mark.parametrize('num_mics', [5])
@pytest.mark.parametrize('mic_spacing', [0.10])
@pytest.mark.parametrize('fft_length', [256, 512])
@pytest.mark.parametrize('sample_rate', [8000, 16000])
@pytest.mark.parametrize('field', ['spherical'])
def test_generate_approximate_noise_field(
self,
num_mics: int,
mic_spacing: float,
fft_length: int,
sample_rate: float,
field: str,
save_figures: bool = False,
):
"""Test approximate noise field with white noise as the input noise.
"""
duration_in_sec = 20
relative_mse_tol_dB = -30
relative_mse_tol = 10 ** (relative_mse_tol_dB / 10)
num_samples = sample_rate * duration_in_sec
noise_signal = np.random.rand(num_samples, num_mics)
# random channel-wise power scaling
noise_signal *= np.random.randn(num_mics)
# assume linear arrray
mic_positions = np.zeros((num_mics, 3))
mic_positions[:, 0] = mic_spacing * np.arange(num_mics)
# UUT
noise_field = generate_approximate_noise_field(
mic_positions, noise_signal, sample_rate=sample_rate, field=field, fft_length=fft_length
)
# Compare the estimated coherence with the theoretical coherence
# reference
golden_coherence = theoretical_coherence(
mic_positions, sample_rate=sample_rate, field=field, fft_length=fft_length
)
# estimated
N = librosa.stft(noise_field.transpose(), n_fft=fft_length)
# (channel, subband, frame) -> (subband, frame, channel)
N = N.transpose(1, 2, 0)
uut_coherence = estimated_coherence(N)
# Check difference
relative_mse_real = np.mean((uut_coherence.real - golden_coherence) ** 2)
assert relative_mse_real < relative_mse_tol
relative_mse_imag = np.mean((uut_coherence.imag) ** 2)
assert relative_mse_imag < relative_mse_tol
if save_figures:
# For debugging and visualization template
figure_dir = os.path.expanduser('~/_coherence')
if not os.path.exists(figure_dir):
os.mkdir(figure_dir)
freq = librosa.fft_frequencies(sr=sample_rate, n_fft=fft_length)
freq = freq / 1e3 # kHz
plt.figure(figsize=(7, 10))
for n in range(1, num_mics):
plt.subplot(num_mics - 1, 2, 2 * n - 1)
plt.plot(freq, golden_coherence[:, 0, n].real, label='golden')
plt.plot(freq, uut_coherence[:, 0, n].real, label='estimated')
plt.title(f'Real(coherence), p=0, q={n}')
plt.xlabel('f / kHz')
plt.grid()
plt.legend(loc='upper right')
plt.subplot(num_mics - 1, 2, 2 * n)
plt.plot(golden_coherence[:, 0, n].imag, label='golden')
plt.plot(uut_coherence[:, 0, n].imag, label='estimated')
plt.title(f'Imag(coherence), p=0, q={n}')
plt.xlabel('f / kHz')
plt.grid()
plt.legend(loc='upper right')
plt.tight_layout()
plt.savefig(
os.path.join(
figure_dir, f'num_mics_{num_mics}_sample_rate_{sample_rate}_fft_length_{fft_length}_{field}.png'
)
)
plt.close()
class TestAudioUtilsElements:
@pytest.mark.unit
def test_rms(self):
"""Test RMS calculation
"""
# setup
A = np.random.rand()
omega = 100
n_points = 1000
rms_threshold = 1e-4
# prep data
t = np.linspace(0, 2 * np.pi, n_points)
x = A * np.cos(2 * np.pi * omega * t)
# test
x_rms = rms(x)
golden_rms = A / np.sqrt(2)
assert (
np.abs(x_rms - golden_rms) < rms_threshold
), f'RMS not matching for A={A}, omega={omega}, n_point={n_points}'
@pytest.mark.unit
def test_db_conversion(self):
"""Test conversions to and from dB.
"""
num_examples = 10
abs_threshold = 1e-6
mag = np.random.rand(num_examples)
mag_db = mag2db(mag)
assert all(np.abs(mag - 10 ** (mag_db / 20)) < abs_threshold)
assert all(np.abs(db2mag(mag_db) - 10 ** (mag_db / 20)) < abs_threshold)
assert all(np.abs(pow2db(mag ** 2) - mag_db) < abs_threshold)
@pytest.mark.unit
def test_get_segment_start(self):
random_seed = 42
num_examples = 50
num_samples = 2000
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
# Generate signal
signal = _rng.normal(size=num_samples)
# Random start in the first half
start = _rng.integers(low=0, high=num_samples // 2)
# Random length
end = _rng.integers(low=start, high=num_samples)
# Selected segment
segment = signal[start:end]
# UUT
estimated_start = get_segment_start(signal=signal, segment=segment)
assert (
estimated_start == start
), f'Example {n}: estimated start ({estimated_start}) not matching the actual start ({start})'
@pytest.mark.unit
def test_calculate_sdr_numpy(self):
atol = 1e-6
random_seed = 42
num_examples = 50
num_samples = 2000
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
# Generate signal
target = _rng.normal(size=num_samples)
# Adjust the estimate
golden_sdr = _rng.integers(low=-10, high=10)
estimate = target * (1 + 10 ** (-golden_sdr / 20))
# UUT
estimated_sdr = calculate_sdr_numpy(estimate=estimate, target=target, remove_mean=False)
assert np.isclose(
estimated_sdr, golden_sdr, atol=atol
), f'Example {n}: estimated ({estimated_sdr}) not matching the actual value ({golden_sdr})'
# Add random mean and use remove_mean=True
# SDR should not change
target += _rng.uniform(low=-10, high=10)
estimate += _rng.uniform(low=-10, high=10)
# UUT
estimated_sdr = calculate_sdr_numpy(estimate=estimate, target=target, remove_mean=True)
assert np.isclose(
estimated_sdr, golden_sdr, atol=atol
), f'Example {n}: estimated ({estimated_sdr}) not matching the actual value ({golden_sdr})'
@pytest.mark.unit
def test_calculate_sdr_numpy_scale_invariant(self):
atol = 1e-6
random_seed = 42
num_examples = 50
num_samples = 2000
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
# Generate signal
target = _rng.normal(size=num_samples)
# Adjust the estimate
estimate = target + _rng.uniform(low=0.01, high=1) * _rng.normal(size=target.size)
# scaled target
target_scaled = target / (np.linalg.norm(target) + 1e-16)
target_scaled = np.sum(estimate * target_scaled) * target_scaled
golden_sdr = calculate_sdr_numpy(
estimate=estimate, target=target_scaled, scale_invariant=False, remove_mean=False
)
# UUT
estimated_sdr = calculate_sdr_numpy(
estimate=estimate, target=target, scale_invariant=True, remove_mean=False
)
print(golden_sdr, estimated_sdr)
assert np.isclose(
estimated_sdr, golden_sdr, atol=atol
), f'Example {n}: estimated ({estimated_sdr}) not matching the actual value ({golden_sdr})'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 3])
@pytest.mark.parametrize('filter_length', [10])
@pytest.mark.parametrize('delay', [0, 5])
def test_convmtx_mc(self, num_channels: int, filter_length: int, delay: int):
"""Test convmtx against convolve and sum.
Multiplication of convmtx_mc of input with a vectorized multi-channel filter
should match the sum of convolution of each input channel with the corresponding
filter.
"""
atol = 1e-6
random_seed = 42
num_examples = 10
num_samples = 2000
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_examples):
x = _rng.normal(size=(num_samples, num_channels))
f = _rng.normal(size=(filter_length, num_channels))
CM = convmtx_mc_numpy(x=x, filter_length=filter_length, delay=delay)
# Multiply convmtx_mc with the vectorized filter
uut = CM @ f.transpose().reshape(-1, 1)
uut = uut.squeeze(1)
# Calculate reference as sum of convolutions
golden_ref = 0
for m in range(num_channels):
x_m_delayed = np.hstack([np.zeros(delay), x[:, m]])
golden_ref += np.convolve(x_m_delayed, f[:, m], mode='full')[: len(x)]
assert np.allclose(uut, golden_ref, atol=atol), f'Example {n}: UUT not matching the reference.'
@pytest.mark.unit
@pytest.mark.parametrize('num_channels', [1, 3])
@pytest.mark.parametrize('filter_length', [10])
@pytest.mark.parametrize('num_samples', [10, 100])
def test_toeplitz(self, num_channels: int, filter_length: int, num_samples: int):
"""Test construction of a Toeplitz matrix for a given signal.
"""
atol = 1e-6
random_seed = 42
num_batches = 10
batch_size = 8
_rng = np.random.default_rng(seed=random_seed)
for n in range(num_batches):
x = _rng.normal(size=(batch_size, num_channels, num_samples))
# Construct Toeplitz matrix
Tx = toeplitz(x=torch.tensor(x))
# Compare against the reference
for b in range(batch_size):
for m in range(num_channels):
T_ref = scipy.linalg.toeplitz(x[b, m, ...])
assert np.allclose(
Tx[b, m, ...].cpu().numpy(), T_ref, atol=atol
), f'Example {n}: not matching the reference for (b={b}, m={m}), .'
| NeMo-main | tests/collections/asr/utils/test_audio_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.parts.utils.data_simulation_utils import (
DataAnnotator,
SpeechSampler,
add_silence_to_alignments,
binary_search_alignments,
get_cleaned_base_path,
get_split_points_in_alignments,
normalize_audio,
read_noise_manifest,
)
@pytest.fixture()
def annotator():
cfg = get_data_simulation_configs()
return DataAnnotator(cfg)
@pytest.fixture()
def sampler():
cfg = get_data_simulation_configs()
sampler = SpeechSampler(cfg)
# Must get session-wise randomized silence/overlap mean
sampler.get_session_overlap_mean()
sampler.get_session_silence_mean()
return sampler
def get_data_simulation_configs():
config_dict = {
'data_simulator': {
'manifest_filepath': '???',
'sr': 16000,
'random_seed': 42,
'multiprocessing_chunksize': 10000,
'session_config': {'num_speakers': 4, 'num_sessions': 60, 'session_length': 600},
'session_params': {
'max_audio_read_sec': 20,
'sentence_length_params': [0.4, 0.05],
'dominance_var': 0.11,
'min_dominance': 0.05,
'turn_prob': 0.875,
'min_turn_prob': 0.5,
'mean_silence': 0.15,
'mean_silence_var': 0.01,
'per_silence_var': 900,
'per_silence_min': 0.0,
'per_silence_max': -1,
'mean_overlap': 0.1,
'mean_overlap_var': 0.01,
'per_overlap_var': 900,
'per_overlap_min': 0.0,
'per_overlap_max': -1,
'start_window': True,
'window_type': 'hamming',
'window_size': 0.05,
'start_buffer': 0.1,
'split_buffer': 0.1,
'release_buffer': 0.1,
'normalize': True,
'normalization_type': 'equal',
'normalization_var': 0.1,
'min_volume': 0.75,
'max_volume': 1.25,
'end_buffer': 0.5,
},
'outputs': {
'output_dir': '???',
'output_filename': 'multispeaker_session',
'overwrite_output': True,
'output_precision': 3,
},
'background_noise': {
'add_bg': False,
'background_manifest': None,
'num_noise_files': 10,
'snr': 60,
'snr_min': None,
},
'segment_augmentor': {
'add_seg_aug': False,
'augmentor': {'gain': {'prob': 0.5, 'min_gain_dbfs': -10.0, 'max_gain_dbfs': 10.0},},
},
'session_augmentor': {
'add_sess_aug': False,
'augmentor': {'white_noise': {'prob': 1.0, 'min_level': -90, 'max_level': -46},},
},
'speaker_enforcement': {'enforce_num_speakers': True, 'enforce_time': [0.25, 0.75]},
'segment_manifest': {'window': 0.5, 'shift': 0.25, 'step_count': 50, 'deci': 3},
}
}
return DictConfig(config_dict)
def generate_words_and_alignments(sample_index):
if sample_index == 0:
words = ['', 'hello', 'world']
alignments = [0.5, 1.0, 1.5]
elif sample_index == 1:
words = ["", "stephanos", "dedalos", ""]
alignments = [0.51, 1.31, 2.04, 2.215]
elif sample_index == 2:
words = ['', 'hello', 'world', '', 'welcome', 'to', 'nemo', '']
alignments = [0.5, 1.0, 1.5, 1.7, 1.8, 2.2, 2.7, 2.8]
else:
raise ValueError(f"sample_index {sample_index} not supported")
speaker_id = 'speaker_0'
return words, alignments, speaker_id
class TestDataSimulatorUtils:
# TODO: add tests for all util functions
@pytest.mark.parametrize("max_audio_read_sec", [2.5, 3.5, 4.5])
@pytest.mark.parametrize("min_alignment_count", [2, 3, 4])
def test_binary_search_alignments(self, max_audio_read_sec, min_alignment_count):
inds = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
alignments = [0.5, 11.0, 11.5, 12.0, 13.0, 14.0, 14.5, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 30, 40.0]
offset_max = binary_search_alignments(inds, max_audio_read_sec, min_alignment_count, alignments)
assert max_audio_read_sec <= alignments[-1 * min_alignment_count] - alignments[inds[offset_max]]
@pytest.mark.parametrize("sample_len", [100, 16000])
@pytest.mark.parametrize("gain", [0.1, 0.5, 1.0, 2.0, 5.0])
def test_normalize_audio(self, sample_len, gain):
array_raw = np.random.randn(sample_len)
array_input = torch.from_numpy(gain * array_raw / np.max(np.abs(array_raw)))
norm_array = normalize_audio(array_input)
assert torch.max(torch.abs(norm_array)) == 1.0
assert torch.min(torch.abs(norm_array)) < 1.0
@pytest.mark.parametrize("output_dir", [os.path.join(os.getcwd(), "test_dir")])
def test_get_cleaned_base_path(self, output_dir):
result_path = get_cleaned_base_path(output_dir, overwrite_output=True)
assert os.path.exists(result_path) and not os.path.isfile(result_path)
result_path = get_cleaned_base_path(output_dir, overwrite_output=False)
assert os.path.exists(result_path) and not os.path.isfile(result_path)
os.rmdir(result_path)
assert not os.path.exists(result_path)
@pytest.mark.parametrize(
"words, alignments, answers",
[
(['', 'hello', 'world'], [0.5, 1.0, 1.5], [[0, 16000.0]]),
(
['', 'hello', 'world', '', 'welcome', 'to', 'nemo', ''],
[0.27, 1.0, 1.7, 2.7, 2.8, 3.2, 3.7, 3.9],
[[0, (1.7 + 0.5) * 16000], [(2.7 - 0.5) * 16000, (3.9 - 0.27) * 16000]],
),
],
)
@pytest.mark.parametrize("sr", [16000])
@pytest.mark.parametrize("split_buffer", [0.5])
@pytest.mark.parametrize("new_start", [0.0])
def test_get_split_points_in_alignments(self, words, alignments, sr, new_start, split_buffer, answers):
sentence_audio_len = sr * (alignments[-1] - alignments[0])
splits = get_split_points_in_alignments(words, alignments, split_buffer, sr, sentence_audio_len, new_start)
assert len(splits) == len(answers)
for k, interval in enumerate(splits):
assert abs(answers[k][0] - interval[0]) < 1e-4
assert abs(answers[k][1] - interval[1]) < 1e-4
@pytest.mark.parametrize(
"alignments, words", [(['hello', 'world'], [1.0, 1.5]), (['', 'hello', 'world'], [0.0, 1.0, 1.5])]
)
def test_add_silence_to_alignments(self, alignments, words):
"""
Test add_silence_to_alignments function.
"""
audio_manifest = {
'audio_filepath': 'test.wav',
'alignments': alignments,
'words': words,
}
audio_manifest = add_silence_to_alignments(audio_manifest)
if words[0] == '':
assert audio_manifest['alignments'] == [0.0] + alignments
assert audio_manifest['words'] == [''] + words
else:
assert audio_manifest['alignments'] == alignments
assert audio_manifest['words'] == words
class TestDataAnnotator:
def test_init(self, annotator):
assert isinstance(annotator, DataAnnotator)
def test_create_new_rttm_entry(self, annotator):
words, alignments, speaker_id = generate_words_and_alignments(sample_index=0)
start, end = alignments[0], alignments[-1]
rttm_list = annotator.create_new_rttm_entry(
words=words, alignments=alignments, start=start, end=end, speaker_id=speaker_id
)
assert rttm_list[0] == f"{start} {end} {speaker_id}"
def test_create_new_json_entry(self, annotator):
words, alignments, speaker_id = generate_words_and_alignments(sample_index=0)
start, end = alignments[0], alignments[-1]
test_wav_filename = '/path/to/test_wav_filename.wav'
test_rttm_filename = '/path/to/test_rttm_filename.rttm'
test_ctm_filename = '/path/to/test_ctm_filename.ctm'
text = " ".join(words)
one_line_json_dict = annotator.create_new_json_entry(
text=text,
wav_filename=test_wav_filename,
start=start,
length=end - start,
speaker_id=speaker_id,
rttm_filepath=test_rttm_filename,
ctm_filepath=test_ctm_filename,
)
start = round(float(start), annotator._params.data_simulator.outputs.output_precision)
length = round(float(end - start), annotator._params.data_simulator.outputs.output_precision)
meta = {
"audio_filepath": test_wav_filename,
"offset": start,
"duration": length,
"label": speaker_id,
"text": text,
"num_speakers": annotator._params.data_simulator.session_config.num_speakers,
"rttm_filepath": test_rttm_filename,
"ctm_filepath": test_ctm_filename,
"uem_filepath": None,
}
assert one_line_json_dict == meta
def test_create_new_ctm_entry(self, annotator):
words, alignments, speaker_id = generate_words_and_alignments(sample_index=0)
start = alignments[0]
session_name = 'test_session'
ctm_list = annotator.create_new_ctm_entry(
words=words, alignments=alignments, session_name=session_name, speaker_id=speaker_id, start=start
)
assert ctm_list[0] == (
alignments[1],
f"{session_name} {speaker_id} {alignments[1]} {alignments[1]-alignments[0]} {words[1]} 0\n",
)
assert ctm_list[1] == (
alignments[2],
f"{session_name} {speaker_id} {alignments[2]} {alignments[2]-alignments[1]} {words[2]} 0\n",
)
class TestSpeechSampler:
def test_init(self, sampler):
assert isinstance(sampler, SpeechSampler)
def test_init_overlap_params(self, sampler):
sampler._init_overlap_params()
assert sampler.per_silence_min_len is not None
assert sampler.per_silence_max_len is not None
assert type(sampler.per_silence_min_len) == int
assert type(sampler.per_silence_max_len) == int
def test_init_silence_params(self, sampler):
sampler._init_overlap_params()
assert sampler.per_overlap_min_len is not None
assert sampler.per_overlap_max_len is not None
assert type(sampler.per_overlap_min_len) == int
assert type(sampler.per_overlap_max_len) == int
@pytest.mark.parametrize("mean", [0.1, 0.2, 0.3])
@pytest.mark.parametrize("var", [0.05, 0.07])
def test_get_session_silence_mean_pass(self, sampler, mean, var):
sampler.mean_silence = mean
sampler.mean_silence_var = var
sampled_silence_mean = sampler.get_session_silence_mean()
assert 0 <= sampled_silence_mean <= 1
@pytest.mark.parametrize("mean", [0.5])
@pytest.mark.parametrize("var", [0.5, 0.6])
def test_get_session_silence_mean_fail(self, sampler, mean, var):
"""
This test should raise `ValueError` because `mean_silence_var`
should be less than `mean_silence * (1 - mean_silence)`.
"""
sampler.mean_silence = mean
sampler.mean_silence_var = var
with pytest.raises(ValueError) as execinfo:
sampler.get_session_silence_mean()
assert "ValueError" in str(execinfo) and "mean_silence_var" in str(execinfo)
@pytest.mark.parametrize("mean", [0.1, 0.2, 0.3])
@pytest.mark.parametrize("var", [0.05, 0.07])
def test_get_session_overlap_mean_pass(self, sampler, mean, var):
sampler.mean_overlap = mean
sampler.mean_overlap_var = var
sampled_overlap_mean = sampler.get_session_overlap_mean()
assert 0 <= sampled_overlap_mean <= 1
@pytest.mark.parametrize("mean", [0.4, 0.5])
@pytest.mark.parametrize("var", [0.3, 0.8])
def test_get_session_overlap_mean_fail(self, sampler, mean, var):
"""
This test should raise `ValueError` because `mean_overlap_var`
should be less than `mean_overlap * (1 - mean_overlap)`.
"""
sampler.mean_overlap = mean
sampler.mean_overlap_var = var
sampler._params = DictConfig(sampler._params)
with pytest.raises(ValueError) as execinfo:
sampler.get_session_overlap_mean()
assert "ValueError" in str(execinfo) and "mean_overlap_var" in str(execinfo)
@pytest.mark.parametrize("non_silence_len_samples", [16000, 32000])
@pytest.mark.parametrize("running_overlap_len_samples", [8000, 12000])
def test_sample_from_overlap_model(self, sampler, non_silence_len_samples, running_overlap_len_samples):
sampler.get_session_overlap_mean()
sampler.running_overlap_len_samples = running_overlap_len_samples
overlap_amount = sampler.sample_from_overlap_model(non_silence_len_samples=non_silence_len_samples)
assert type(overlap_amount) == int
assert 0 <= overlap_amount
@pytest.mark.parametrize("running_len_samples", [8000, 16000])
@pytest.mark.parametrize("running_overlap_len_samples", [8000, 12000])
def test_sample_from_silence_model(self, sampler, running_len_samples, running_overlap_len_samples):
sampler.get_session_silence_mean()
self.running_overlap_len_samples = running_overlap_len_samples
silence_amount = sampler.sample_from_silence_model(running_len_samples=running_len_samples)
assert type(silence_amount) == int
assert 0 <= silence_amount
@pytest.mark.with_downloads()
@pytest.mark.parametrize("num_noise_files", [1, 2, 4])
def test_sample_noise_manifest(self, sampler, num_noise_files, test_data_dir):
sampler.num_noise_files = num_noise_files
manifest_path = os.path.abspath(os.path.join(test_data_dir, 'asr/an4_val.json'))
noise_manifest = read_noise_manifest(add_bg=True, background_manifest=manifest_path)
sampled_noise_manifests = sampler.sample_noise_manifest(noise_manifest=noise_manifest)
assert len(sampled_noise_manifests) == num_noise_files
@pytest.mark.parametrize("running_speech_len_samples", [32000, 64000])
@pytest.mark.parametrize("running_overlap_len_samples", [16000, 32000])
@pytest.mark.parametrize("running_len_samples", [64000, 96000])
@pytest.mark.parametrize("non_silence_len_samples", [16000, 32000])
def test_silence_vs_overlap_selector(
self,
sampler,
running_overlap_len_samples,
running_speech_len_samples,
running_len_samples,
non_silence_len_samples,
):
sampler.running_overlap_len_samples = running_overlap_len_samples
sampler.running_speech_len_samples = running_speech_len_samples
add_overlap = sampler.silence_vs_overlap_selector(
running_len_samples=running_len_samples, non_silence_len_samples=non_silence_len_samples
)
assert type(add_overlap) == bool
| NeMo-main | tests/collections/asr/utils/test_data_simul_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import lru_cache
import pytest
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.asr.metrics.wer import CTCDecoding, CTCDecodingConfig
from nemo.collections.asr.metrics.wer_bpe import CTCBPEDecoding, CTCBPEDecodingConfig
from nemo.collections.asr.parts.mixins import mixins
from nemo.collections.asr.parts.utils.rnnt_utils import Hypothesis
def char_vocabulary():
return [' ', 'a', 'b', 'c', 'd', 'e', 'f']
@pytest.fixture()
@lru_cache(maxsize=8)
def tmp_tokenizer(test_data_dir):
cfg = DictConfig({'dir': os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128"), 'type': 'wpe'})
class _TmpASRBPE(mixins.ASRBPEMixin):
def register_artifact(self, _, vocab_path):
return vocab_path
asrbpe = _TmpASRBPE()
asrbpe._setup_tokenizer(cfg)
return asrbpe.tokenizer
def check_char_timestamps(hyp: Hypothesis, decoding: CTCDecoding):
assert hyp.timestep is not None
assert isinstance(hyp.timestep, dict)
assert 'timestep' in hyp.timestep
assert 'char' in hyp.timestep
assert 'word' in hyp.timestep
words = hyp.text.split(decoding.word_seperator)
words = list(filter(lambda x: x != '', words))
assert len(hyp.timestep['word']) == len(words)
def check_subword_timestamps(hyp: Hypothesis, decoding: CTCBPEDecoding):
assert hyp.timestep is not None
assert isinstance(hyp.timestep, dict)
assert 'timestep' in hyp.timestep
assert 'char' in hyp.timestep
assert 'word' in hyp.timestep
chars = list(hyp.text)
chars = list(filter(lambda x: x not in ['', ' ', '#'], chars))
all_chars = [list(decoding.tokenizer.tokens_to_text(data['char'])) for data in hyp.timestep['char']]
all_chars = [char for subword in all_chars for char in subword]
all_chars = list(filter(lambda x: x not in ['', ' ', '#'], all_chars))
assert len(chars) == len(all_chars)
class TestCTCDecoding:
@pytest.mark.unit
def test_constructor(self):
cfg = CTCDecodingConfig()
vocab = char_vocabulary()
decoding = CTCDecoding(decoding_cfg=cfg, vocabulary=vocab)
assert decoding is not None
@pytest.mark.unit
def test_constructor_subword(self, tmp_tokenizer):
cfg = CTCBPEDecodingConfig()
decoding = CTCBPEDecoding(decoding_cfg=cfg, tokenizer=tmp_tokenizer)
assert decoding is not None
@pytest.mark.unit
def test_char_decoding_greedy_forward(self,):
cfg = CTCDecodingConfig(strategy='greedy')
vocab = char_vocabulary()
decoding = CTCDecoding(decoding_cfg=cfg, vocabulary=vocab)
B, T = 4, 20
V = len(char_vocabulary()) + 1
input_signal = torch.randn(size=(B, T, V))
length = torch.randint(low=1, high=T, size=[B])
with torch.no_grad():
texts, _ = decoding.ctc_decoder_predictions_tensor(
input_signal, length, fold_consecutive=True, return_hypotheses=False
)
for text in texts:
assert isinstance(text, str)
@pytest.mark.unit
@pytest.mark.parametrize('alignments', [False, True])
@pytest.mark.parametrize('timestamps', [False, True])
def test_char_decoding_greedy_forward_hypotheses(self, alignments, timestamps):
cfg = CTCDecodingConfig(strategy='greedy', preserve_alignments=alignments, compute_timestamps=timestamps)
vocab = char_vocabulary()
decoding = CTCDecoding(decoding_cfg=cfg, vocabulary=vocab)
B, T = 4, 20
V = len(char_vocabulary()) + 1
input_signal = torch.randn(size=(B, T, V))
length = torch.randint(low=1, high=T, size=[B])
with torch.no_grad():
hyps, _ = decoding.ctc_decoder_predictions_tensor(
input_signal, length, fold_consecutive=True, return_hypotheses=True
)
for idx, hyp in enumerate(hyps):
assert isinstance(hyp, Hypothesis)
assert torch.is_tensor(hyp.y_sequence)
assert isinstance(hyp.text, str)
# alignments check
if alignments:
assert hyp.alignments is not None
assert isinstance(hyp.alignments, tuple)
assert len(hyp.alignments[0]) == length[idx]
assert len(hyp.alignments[1]) == length[idx]
# timestamps check
if timestamps:
check_char_timestamps(hyp, decoding)
@pytest.mark.unit
def test_subword_decoding_greedy_forward(self, tmp_tokenizer):
cfg = CTCBPEDecodingConfig(strategy='greedy')
decoding = CTCBPEDecoding(decoding_cfg=cfg, tokenizer=tmp_tokenizer)
B, T = 4, 20
V = decoding.tokenizer.tokenizer.vocab_size + 1
input_signal = torch.randn(size=(B, T, V))
length = torch.randint(low=1, high=T, size=[B])
with torch.no_grad():
texts, _ = decoding.ctc_decoder_predictions_tensor(
input_signal, length, fold_consecutive=True, return_hypotheses=False
)
for text in texts:
assert isinstance(text, str)
@pytest.mark.unit
@pytest.mark.parametrize('alignments', [False, True])
@pytest.mark.parametrize('timestamps', [False, True])
def test_subword_decoding_greedy_forward_hypotheses(self, tmp_tokenizer, alignments, timestamps):
cfg = CTCBPEDecodingConfig(strategy='greedy', preserve_alignments=alignments, compute_timestamps=timestamps)
decoding = CTCBPEDecoding(decoding_cfg=cfg, tokenizer=tmp_tokenizer)
B, T = 4, 20
V = decoding.tokenizer.tokenizer.vocab_size + 1
input_signal = torch.randn(size=(B, T, V))
length = torch.randint(low=1, high=T, size=[B])
with torch.no_grad():
hyps, _ = decoding.ctc_decoder_predictions_tensor(
input_signal, length, fold_consecutive=True, return_hypotheses=True
)
for idx, hyp in enumerate(hyps):
assert isinstance(hyp, Hypothesis)
assert torch.is_tensor(hyp.y_sequence)
assert isinstance(hyp.text, str)
# alignments check
if alignments:
assert hyp.alignments is not None
assert isinstance(hyp.alignments, tuple)
assert len(hyp.alignments[0]) == length[idx]
assert len(hyp.alignments[1]) == length[idx]
# timestamps check
if timestamps:
check_subword_timestamps(hyp, decoding)
| NeMo-main | tests/collections/asr/decoding/test_ctc_decoding.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from functools import lru_cache
import pytest
import torch
from omegaconf import DictConfig
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecoding, RNNTDecodingConfig
from nemo.collections.asr.metrics.rnnt_wer_bpe import RNNTBPEDecoding, RNNTBPEDecodingConfig
from nemo.collections.asr.models import ASRModel
from nemo.collections.asr.modules import RNNTDecoder, RNNTJoint
from nemo.collections.asr.parts.mixins import mixins
from nemo.collections.asr.parts.submodules import rnnt_beam_decoding as beam_decode
from nemo.collections.asr.parts.submodules import rnnt_greedy_decoding as greedy_decode
from nemo.collections.asr.parts.utils import rnnt_utils
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
NUMBA_RNNT_LOSS_AVAILABLE = numba_utils.numba_cpu_is_supported(
__NUMBA_MINIMUM_VERSION__
) or numba_utils.numba_cuda_is_supported(__NUMBA_MINIMUM_VERSION__)
def char_vocabulary():
return [' ', 'a', 'b', 'c', 'd', 'e', 'f']
@pytest.fixture()
@lru_cache(maxsize=8)
def tmp_tokenizer(test_data_dir):
cfg = DictConfig({'dir': os.path.join(test_data_dir, "asr", "tokenizers", "an4_wpe_128"), 'type': 'wpe'})
class _TmpASRBPE(mixins.ASRBPEMixin):
def register_artifact(self, _, vocab_path):
return vocab_path
asrbpe = _TmpASRBPE()
asrbpe._setup_tokenizer(cfg)
return asrbpe.tokenizer
@lru_cache(maxsize=2)
def get_rnnt_decoder(vocab_size, decoder_output_size=4):
prednet_cfg = {'pred_hidden': decoder_output_size, 'pred_rnn_layers': 1}
torch.manual_seed(0)
decoder = RNNTDecoder(prednet=prednet_cfg, vocab_size=vocab_size)
decoder.freeze()
return decoder
@lru_cache(maxsize=2)
def get_rnnt_joint(vocab_size, vocabulary=None, encoder_output_size=4, decoder_output_size=4, joint_output_shape=4):
jointnet_cfg = {
'encoder_hidden': encoder_output_size,
'pred_hidden': decoder_output_size,
'joint_hidden': joint_output_shape,
'activation': 'relu',
}
torch.manual_seed(0)
joint = RNNTJoint(jointnet_cfg, vocab_size, vocabulary=vocabulary)
joint.freeze()
return joint
@lru_cache(maxsize=1)
def get_model_encoder_output(data_dir, model_name):
# Import inside function to avoid issues with dependencies
import librosa
audio_filepath = os.path.join(data_dir, 'asr', 'test', 'an4', 'wav', 'cen3-fjlp-b.wav')
with torch.no_grad():
model = ASRModel.from_pretrained(model_name, map_location='cpu') # type: ASRModel
model.preprocessor.featurizer.dither = 0.0
model.preprocessor.featurizer.pad_to = 0
audio, sr = librosa.load(path=audio_filepath, sr=16000, mono=True)
input_signal = torch.tensor(audio, dtype=torch.float32).unsqueeze(0)
input_signal_length = torch.tensor([len(audio)], dtype=torch.int32)
encoded, encoded_len = model(input_signal=input_signal, input_signal_length=input_signal_length)
return model, encoded, encoded_len
def decode_text_from_greedy_hypotheses(hyps, decoding):
decoded_hyps = decoding.decode_hypothesis(hyps) # type: List[str]
return decoded_hyps
def decode_text_from_nbest_hypotheses(hyps, decoding):
hypotheses = []
all_hypotheses = []
for nbest_hyp in hyps: # type: rnnt_utils.NBestHypotheses
n_hyps = nbest_hyp.n_best_hypotheses # Extract all hypotheses for this sample
decoded_hyps = decoding.decode_hypothesis(n_hyps) # type: List[str]
hypotheses.append(decoded_hyps[0]) # best hypothesis
all_hypotheses.append(decoded_hyps)
return hypotheses, all_hypotheses
class TestRNNTDecoding:
@pytest.mark.unit
def test_constructor(self):
cfg = RNNTDecodingConfig()
vocab = char_vocabulary()
decoder = get_rnnt_decoder(vocab_size=len(vocab))
joint = get_rnnt_joint(vocab_size=len(vocab))
decoding = RNNTDecoding(decoding_cfg=cfg, decoder=decoder, joint=joint, vocabulary=vocab)
assert decoding is not None
@pytest.mark.unit
def test_constructor_subword(self, tmp_tokenizer):
cfg = RNNTDecodingConfig()
vocab = tmp_tokenizer.vocab
decoder = get_rnnt_decoder(vocab_size=len(vocab))
joint = get_rnnt_joint(vocab_size=len(vocab))
decoding = RNNTBPEDecoding(decoding_cfg=cfg, decoder=decoder, joint=joint, tokenizer=tmp_tokenizer)
assert decoding is not None
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads
@pytest.mark.unit
def test_greedy_decoding_preserve_alignments(self, test_data_dir):
model, encoded, encoded_len = get_model_encoder_output(test_data_dir, 'stt_en_conformer_transducer_small')
beam = greedy_decode.GreedyRNNTInfer(
model.decoder,
model.joint,
blank_index=model.joint.num_classes_with_blank - 1,
max_symbols_per_step=5,
preserve_alignments=True,
)
enc_out = encoded
enc_len = encoded_len
with torch.no_grad():
hyps = beam(encoder_output=enc_out, encoded_lengths=enc_len)[0] # type: rnnt_utils.Hypothesis
hyp = decode_text_from_greedy_hypotheses(hyps, model.decoding)
hyp = hyp[0]
assert hyp.alignments is not None
# Use the following commented print statements to check
# the alignment of other algorithms compared to the default
print("Text", hyp.text)
for t in range(len(hyp.alignments)):
t_u = []
for u in range(len(hyp.alignments[t])):
logp, label = hyp.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
t_u.append(int(label))
print(f"Tokens at timestep {t} = {t_u}")
print()
@pytest.mark.skipif(
not NUMBA_RNNT_LOSS_AVAILABLE, reason='RNNTLoss has not been compiled with appropriate numba version.',
)
@pytest.mark.with_downloads
@pytest.mark.unit
@pytest.mark.parametrize(
"beam_config",
[
{"search_type": "greedy"},
{"search_type": "default", "beam_size": 2,},
{"search_type": "alsd", "alsd_max_target_len": 0.5, "beam_size": 2,},
{"search_type": "tsd", "tsd_max_sym_exp_per_step": 3, "beam_size": 2,},
{"search_type": "maes", "maes_num_steps": 2, "maes_expansion_beta": 2, "beam_size": 2},
{"search_type": "maes", "maes_num_steps": 3, "maes_expansion_beta": 1, "beam_size": 2},
],
)
def test_beam_decoding_preserve_alignments(self, test_data_dir, beam_config):
beam_size = beam_config.pop("beam_size", 1)
model, encoded, encoded_len = get_model_encoder_output(test_data_dir, 'stt_en_conformer_transducer_small')
beam = beam_decode.BeamRNNTInfer(
model.decoder,
model.joint,
beam_size=beam_size,
return_best_hypothesis=False,
preserve_alignments=True,
**beam_config,
)
enc_out = encoded
enc_len = encoded_len
blank_id = torch.tensor(model.joint.num_classes_with_blank - 1, dtype=torch.int32)
with torch.no_grad():
hyps = beam(encoder_output=enc_out, encoded_lengths=enc_len)[0] # type: rnnt_utils.Hypothesis
hyp, all_hyps = decode_text_from_nbest_hypotheses(hyps, model.decoding)
hyp = hyp[0] # best hypothesis
all_hyps = all_hyps[0]
assert hyp.alignments is not None
if beam_config['search_type'] == 'alsd':
assert len(all_hyps) <= int(beam_config['alsd_max_target_len'] * float(enc_len[0]))
print("Beam search algorithm :", beam_config['search_type'])
# Use the following commented print statements to check
# the alignment of other algorithms compared to the default
for idx, hyp_ in enumerate(all_hyps): # type: (int, rnnt_utils.Hypothesis)
print("Hyp index", idx + 1, "text :", hyp_.text)
# Alignment length (T) must match audio length (T)
assert abs(len(hyp_.alignments) - enc_len[0]) <= 1
for t in range(len(hyp_.alignments)):
t_u = []
for u in range(len(hyp_.alignments[t])):
logp, label = hyp_.alignments[t][u]
assert torch.is_tensor(logp)
assert torch.is_tensor(label)
t_u.append(int(label))
# Blank token must be the last token in the current
if len(t_u) > 1:
assert t_u[-1] == blank_id
# No blank token should be present in the current timestep other than at the end
for token in t_u[:-1]:
assert token != blank_id
print(f"Tokens at timestep {t} = {t_u}")
print()
assert len(hyp_.timestep) > 0
print("Timesteps", hyp_.timestep)
print()
| NeMo-main | tests/collections/asr/decoding/test_rnnt_decoding.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: the file name does not contain "test" on purpose to avoid executing
# these tests outside of the CI machines environment, where test data is
# stored
import pytest
from examples.asr.transcribe_speech import TranscriptionConfig
from omegaconf import OmegaConf
from nemo.collections.asr.parts.utils.transcribe_utils import prepare_audio_data, setup_model
TEST_DATA_PATH = "/home/TestData/an4_dataset/an4_val.json"
PRETRAINED_MODEL_NAME = "stt_en_conformer_transducer_small"
def get_rnnt_alignments(strategy: str):
cfg = OmegaConf.structured(TranscriptionConfig(pretrained_name=PRETRAINED_MODEL_NAME))
cfg.rnnt_decoding.confidence_cfg.preserve_frame_confidence = True
cfg.rnnt_decoding.preserve_alignments = True
cfg.rnnt_decoding.strategy = strategy
cfg.dataset_manifest = TEST_DATA_PATH
filepaths = prepare_audio_data(cfg)[0][:10] # selecting 10 files only
model = setup_model(cfg, map_location="cuda")[0]
model.change_decoding_strategy(cfg.rnnt_decoding)
transcriptions = model.transcribe(
paths2audio_files=filepaths,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
return_hypotheses=True,
channel_selector=cfg.channel_selector,
)[0]
for transcription in transcriptions:
for align_elem, frame_confidence in zip(transcription.alignments, transcription.frame_confidence):
assert len(align_elem) == len(frame_confidence) # frame confidences have to match alignments
assert len(align_elem) > 0 # no empty alignments
for idx, pred in enumerate(align_elem):
if idx < len(align_elem) - 1:
assert pred[1].item() != model.decoder.blank_idx # all except last have to be non-blank
else:
assert pred[1].item() == model.decoder.blank_idx # last one has to be blank
return transcriptions
@pytest.fixture(autouse=True)
def cleanup_local_folder():
"""Overriding global fixture to make sure it's not applied for this test.
Otherwise, there will be errors in the CI in github.
"""
return
# TODO: add the same tests for multi-blank RNNT decoding
def test_rnnt_alignments():
# using greedy as baseline and comparing all other configurations to it
ref_transcriptions = get_rnnt_alignments("greedy")
transcriptions = get_rnnt_alignments("greedy_batch")
# comparing that label sequence in alignments is exactly the same
# we can't compare logits as well, because they are expected to be
# slightly different in batched and single-sample mode
assert len(ref_transcriptions) == len(transcriptions)
for ref_transcription, transcription in zip(ref_transcriptions, transcriptions):
for ref_align_elem, align_elem in zip(ref_transcription.alignments, transcription.alignments):
assert len(ref_align_elem) == len(align_elem)
for ref_pred, pred in zip(ref_align_elem, align_elem):
assert ref_pred[1].item() == pred[1].item()
| NeMo-main | tests/collections/asr/decoding/rnnt_alignments_check.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.asr.parts.numba.spec_augment import spec_aug_numba
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
def get_cfg(seed=0, dtype='float32', **kwargs):
# fmt: off
default = dict(b=2, f=80, t=20, device='cuda',
freq_masks=2, time_masks=2, freq_width=27, time_width=0.05, mask_value=0.0,
seed=seed, dtype=dtype)
default.update(**kwargs)
cfg = OmegaConf.create(default)
# fmt: on
return cfg
# fmt: off
def prepare_data(b, f, t, device='cuda', freq_masks=0, time_masks=0, freq_width=10, time_width=0.1,
seed=0, dtype='float32',
**kwargs):
torch.manual_seed(seed)
if dtype == 'float16':
dtype = torch.float16
else:
dtype = torch.float
x = torch.randn([b, f, t], dtype=dtype, device=device)
x_len = torch.randint(t, size=[b], device=x.device)
sh = x.shape
bs = sh[0]
if isinstance(time_width, int):
adaptive_temporal_width = False
else:
if time_width > 1.0 or time_width < 0.0:
raise ValueError('If `time_width` is a float value, must be in range [0, 1]')
adaptive_temporal_width = True
orginal_time_width = time_width
# Construct the freq and time masks as well as start positions
if freq_masks > 0:
freq_starts = torch.randint(0, sh[1] - freq_width + 1, size=[bs, freq_masks], device=x.device)
freq_lengths = torch.randint(0, freq_width + 1, size=[bs, freq_masks], device=x.device)
else:
freq_starts = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
freq_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
if time_masks > 0:
if adaptive_temporal_width:
time_width = (x_len * orginal_time_width).int().clamp(min=1)
else:
time_width = (
torch.tensor(orginal_time_width, dtype=torch.int32, device=x.device)
.unsqueeze(0)
.repeat(sh[0])
)
time_starts = []
time_lengths = []
for idx in range(sh[0]):
time_starts.append(
torch.randint(
0, max(1, x_len[idx] - time_width[idx]), size=[1, time_masks], device=x.device
)
)
time_lengths.append(
torch.randint(0, time_width[idx] + 1, size=[1, time_masks], device=x.device)
)
time_starts = torch.cat(time_lengths, 0)
time_lengths = torch.cat(time_lengths, 0)
else:
time_starts = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
time_lengths = torch.zeros([bs, 1], dtype=torch.int64, device=x.device)
output = dict(
x=x,
x_len=x_len,
freq_starts=freq_starts,
freq_lengths=freq_lengths,
time_starts=time_starts,
time_lengths=time_lengths,
sh=sh,
)
return output
# fmt: on
def launch_kernel(data, cfg):
# Launch CUDA kernel
# fmt: off
data['x'] = spec_aug_numba.launch_spec_augment_kernel(
x=data['x'], x_len=data['x_len'],
freq_starts=data['freq_starts'], freq_lengths=data['freq_lengths'],
time_starts=data['time_starts'], time_lengths=data['time_lengths'],
freq_masks=cfg.freq_masks, time_masks=cfg.time_masks, mask_value=cfg.mask_value
)
# fmt: on
def freq_mask_check(x, x_len, f_start, f_len, mask_value, bidx):
check_result = True
for fidx in range(f_start, f_start + f_len):
if not (x[bidx, fidx, :] == mask_value).all():
check_result = False
break
assert check_result
def time_mask_check(x, x_len, t_start, t_len, mask_value, bidx):
check_result = True
for tidx in range(t_start, t_start + t_len):
if tidx >= x_len[bidx]:
# this sample has smaller length than the time index of mask, ignore
continue
if not (x[bidx, :, tidx] == mask_value).all():
check_result = False
break
assert check_result
class TestSpecAugmentNumba:
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.parametrize('dtype', ['float16', 'float32'])
def test_spec_aug_kernel(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0, dtype=dtype)
cfg.freq_masks = 2
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
@pytest.mark.parametrize('dtype', ['float16', 'float32'])
def test_spec_aug_kernel_large_batch(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# Change max threads per block temporarily
original_buffer = spec_aug_numba.MAX_THREAD_BUFFER
spec_aug_numba.MAX_THREAD_BUFFER = 4
cfg = get_cfg(seed=0, dtype=dtype)
cfg.freq_masks = 2
cfg.time_masks = 10
cfg.b = spec_aug_numba.MAX_THREAD_BUFFER + 1
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
spec_aug_numba.MAX_THREAD_BUFFER = original_buffer
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_mask_value(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 10
cfg.mask_value = -1.0
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_grad(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
result = data['x'] # inplace modification via kernel
y = torch.ones_like(result, requires_grad=True)
z = y + result
z.mean().backward()
assert y.grad is not None
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_freq_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 0
cfg.time_masks = 10
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert time masks are correct
for bidx in range(sh[0]):
for t_start, t_len in zip(data['time_starts'][bidx], data['time_lengths'][bidx]):
time_mask_check(x, x_len, t_start, t_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_time_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 2
cfg.time_masks = 0
data = prepare_data(**cfg)
launch_kernel(data, cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
# Assert freq masks are correct
for bidx in range(sh[0]):
for f_start, f_len in zip(data['freq_starts'][bidx], data['freq_lengths'][bidx]):
freq_mask_check(x, x_len, f_start, f_len, mask_value=cfg.mask_value, bidx=bidx)
@pytest.mark.unit
@pytest.mark.run_only_on('GPU')
def test_spec_aug_kernel_no_freq_time_mask(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cfg = get_cfg(seed=0)
cfg.freq_masks = 0
cfg.time_masks = 0
data = prepare_data(**cfg)
x, x_len, sh = data['x'], data['x_len'], data['sh']
x_copy = x.clone()
launch_kernel(data, cfg)
# Assert no data edits occured
assert (data['x'] - x_copy).abs().mean() <= 1e-9
| NeMo-main | tests/collections/asr/numba/spec_augment/test_spec_aug_numba.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import pytest
import torch
from nemo.collections.asr.losses.rnnt import MultiblankRNNTLossPytorch, RNNTLossPytorch, TDTLossPytorch
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_numpy import RNNTLoss as RNNTLoss_Numpy
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_pytorch import (
MultiblankRNNTLossNumba,
RNNTLossNumba,
TDTLossNumba,
)
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
DEVICES = ['cpu']
if torch.cuda.is_available():
DEVICES.append('cuda')
DTYPES = [np.float32]
if numba_utils.is_numba_cuda_fp16_supported():
DTYPES.append(np.float16)
def wrap_and_call(fn, acts, labels, device):
if not torch.is_tensor(acts):
acts = torch.tensor(acts)
if 'cuda' in device:
acts = acts.cuda()
if not acts.requires_grad:
acts.requires_grad = True
lengths = [acts.shape[1]] * acts.shape[0]
label_lengths = [len(l) for l in labels]
labels = torch.LongTensor(labels)
lengths = torch.LongTensor(lengths)
label_lengths = torch.LongTensor(label_lengths)
if 'cuda' in device:
labels = labels.cuda()
lengths = lengths.cuda()
label_lengths = label_lengths.cuda()
costs = fn(acts, labels, lengths, label_lengths)
cost = torch.sum(costs)
cost.backward()
if 'cuda' in device:
torch.cuda.synchronize()
if acts.grad is not None:
grad = acts.grad.data.cpu().numpy()
else:
grad = None
return costs.data.cpu().numpy(), grad
class TestRNNTLossPytorch:
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_case_small(self, device, dtype):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
acts = np.array(
[
[
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.2, 0.1, 0.1], [0.7, 0.1, 0.2, 0.1, 0.1]],
]
]
).astype(dtype)
labels = [[1, 2]]
cost_threshold = 1e-8 if dtype == np.float32 else 5e-4
grad_threshold = 1e-8 if dtype == np.float32 else 1e-4
rtol = 1e-5 if dtype == np.float32 else 1e-3
fn_pt = RNNTLossNumba(blank=0, reduction='sum')
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy()
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
fn_ag = RNNTLossPytorch(blank=0, reduction='sum') # ag for automatic gradient computation
ag_cost, ag_grads = wrap_and_call(fn_ag, acts, labels, device)
expected_cost = 4.495666
expected_grads = np.array(
[
[
[
[-0.13116688, -0.3999269, 0.17703125, 0.17703125, 0.17703125],
[-0.18572757, 0.12247056, -0.18168412, 0.12247056, 0.12247056],
[-0.32091254, 0.06269141, 0.06928472, 0.12624499, 0.06269141],
],
[
[0.05456069, -0.21824276, 0.05456069, 0.05456069, 0.05456069],
[0.12073959, 0.12073959, -0.48295835, 0.12073959, 0.12073959],
[-0.6925882, 0.16871116, 0.18645467, 0.16871116, 0.16871116],
],
]
]
)
assert np.allclose(pt_cost, expected_cost, atol=cost_threshold, rtol=1e-6), "small_test costs mismatch."
assert np.allclose(pt_grads, expected_grads, atol=grad_threshold, rtol=rtol), "small_test gradient mismatch."
assert np.allclose(pt_cost, np_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(pt_grads, np_grads, atol=grad_threshold, rtol=rtol), "small_test gradient mismatch."
assert np.allclose(ag_cost, np_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(ag_grads, np_grads, atol=cost_threshold, rtol=rtol), "small_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_case_small_random(self, device, dtype):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
cost_threshold = 1e-8 if dtype == np.float32 else 5e-4
grad_threshold = 1e-8 if dtype == np.float32 else 1e-4
rtol = 1e-5 if dtype == np.float32 else 1e-3
rng = np.random.RandomState(0)
acts = rng.randn(1, 4, 3, 3).astype(dtype)
labels = [[1, 2]]
fn_pt = RNNTLossNumba(blank=0, reduction='sum')
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy()
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
fn_ag = RNNTLossPytorch(blank=0, reduction='sum') # ag for automatic gradient computation
ag_cost, ag_grads = wrap_and_call(fn_ag, acts, labels, device)
assert np.allclose(pt_cost, np_cost, atol=cost_threshold, rtol=rtol), "small_random_test costs mismatch."
assert np.allclose(pt_grads, np_grads, atol=grad_threshold, rtol=rtol), "small_random_test gradient mismatch."
assert np.allclose(pt_cost, ag_cost, atol=cost_threshold, rtol=rtol), "small_random_test costs mismatch."
assert np.allclose(pt_grads, ag_grads, atol=grad_threshold, rtol=rtol), "small_random_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('fastemit_lambda', [1.0, 0.01, 0.00001])
def test_case_small_random_fastemit_reg(self, device, dtype, fastemit_lambda):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
rng = np.random.RandomState(0)
acts = rng.randn(1, 4, 3, 3)
labels = [[1, 2]]
fn_pt = RNNTLossNumba(blank=0, reduction='sum', fastemit_lambda=fastemit_lambda)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy(fastemit_lambda=fastemit_lambda)
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
assert np.allclose(pt_cost, np_cost, rtol=1e-6), "small_random_test costs mismatch."
assert np.allclose(pt_grads, np_grads, rtol=1e-5), "small_random_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_case_big_tensor(self, device, dtype):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# minibatch x T x U x alphabet_size
activations = [
[
[
[0.06535690384862791, 0.7875301411923206, 0.08159176605666074],
[0.5297155426466327, 0.7506749639230854, 0.7541348379087998],
[0.6097641124736383, 0.8681404965673826, 0.6225318186056529],
],
[
[0.6685222872103057, 0.8580392805336061, 0.16453892311765583],
[0.989779515236694, 0.944298460961015, 0.6031678586829663],
[0.9467833543605416, 0.666202507295747, 0.28688179752461884],
],
[
[0.09418426230195986, 0.3666735970751962, 0.736168049462793],
[0.1666804425271342, 0.7141542198635192, 0.3993997272216727],
[0.5359823524146038, 0.29182076440286386, 0.6126422611507932],
],
[
[0.3242405528768486, 0.8007644367291621, 0.5241057606558068],
[0.779194617063042, 0.18331417220174862, 0.113745182072432],
[0.24022162381327106, 0.3394695622533106, 0.1341595066017014],
],
],
[
[
[0.5055615569388828, 0.051597282072282646, 0.6402903936686337],
[0.43073311517251, 0.8294731834714112, 0.1774668847323424],
[0.3207001991262245, 0.04288308912457006, 0.30280282975568984],
],
[
[0.6751777088333762, 0.569537369330242, 0.5584738347504452],
[0.08313242153985256, 0.06016544344162322, 0.10795752845152584],
[0.7486153608562472, 0.943918041459349, 0.4863558118797222],
],
[
[0.4181986264486809, 0.6524078485043804, 0.024242983423721887],
[0.13458171554507403, 0.3663418070512402, 0.2958297395361563],
[0.9236695822497084, 0.6899291482654177, 0.7418981733448822],
],
[
[0.25000547599982104, 0.6034295486281007, 0.9872887878887768],
[0.5926057265215715, 0.8846724004467684, 0.5434495396894328],
[0.6607698886038497, 0.3771277082495921, 0.3580209022231813],
],
],
]
expected_costs = [4.2806528590890736, 3.9384369822503591]
expected_grads = [
[
[
[-1.86843902e-01, -6.25548810e-02, 2.49398798e-01],
[-2.03376666e-01, 2.02399328e-01, 9.77333169e-04],
[-1.41016081e-01, 7.91234672e-02, 6.18926100e-02],
],
[
[-1.15517676e-02, -8.12802389e-02, 9.28319991e-02],
[-1.54257029e-01, 2.29432687e-01, -7.51756504e-02],
[-2.46593088e-01, 1.46404594e-01, 1.00188486e-01],
],
[
[-1.29182907e-02, -6.15932420e-02, 7.45115355e-02],
[-5.59857301e-02, 2.19830811e-01, -1.63845062e-01],
[-4.97626871e-01, 2.09239945e-01, 2.88386941e-01],
],
[
[1.36048580e-02, -3.02196294e-02, 1.66147724e-02],
[1.13924511e-01, 6.27811998e-02, -1.76705718e-01],
[-6.67078257e-01, 3.67658824e-01, 2.99419403e-01],
],
],
[
[
[-3.56343776e-01, -5.53474613e-02, 4.11691219e-01],
[-9.69219357e-02, 2.94591039e-02, 6.74628317e-02],
[-6.35175705e-02, 2.76544970e-02, 3.58630717e-02],
],
[
[-1.54499024e-01, -7.39420280e-02, 2.28441030e-01],
[-1.66789949e-01, -8.78955179e-05, 1.66877866e-01],
[-1.72369644e-01, 1.05565332e-01, 6.68043196e-02],
],
[
[2.38748826e-02, -1.18255816e-01, 9.43809375e-02],
[-1.04707085e-01, -1.08934477e-01, 2.13641584e-01],
[-3.69844258e-01, 1.80118099e-01, 1.89726159e-01],
],
[
[2.57137045e-02, -7.94617534e-02, 5.37480488e-02],
[1.22328237e-01, -2.38788679e-01, 1.16460443e-01],
[-5.98686993e-01, 3.02203178e-01, 2.96483815e-01],
],
],
]
activations = np.array(activations).astype(dtype)
labels = [[1, 2], [1, 1]]
cost_threshold = 1e-8 if dtype == np.float32 else 5e-4
grad_threshold = 1e-8 if dtype == np.float32 else 1e-4
rtol = 1e-3 if dtype == np.float32 else 0.1
fn_pt = RNNTLossNumba(blank=0, reduction='sum')
pt_costs, pt_grads = wrap_and_call(fn_pt, activations, labels, device)
fn_np = RNNTLoss_Numpy()
np_costs, np_grads = wrap_and_call(fn_np, activations, labels, device)
fn_ag = RNNTLossPytorch(blank=0, reduction='sum')
ag_costs, ag_grads = wrap_and_call(fn_ag, activations, labels, device)
assert np.allclose(pt_costs, sum(expected_costs), atol=cost_threshold), "big_test average costs mismatch."
assert np.allclose(
pt_grads, expected_grads, atol=grad_threshold, rtol=1e-3
), "big_test grads for average cost mismatch."
assert np.allclose(pt_costs, np_costs, atol=cost_threshold, rtol=rtol), "big_test average costs mismatch."
assert np.allclose(
pt_grads, np_grads, atol=grad_threshold, rtol=rtol
), "big_test grads for average cost mismatch."
assert np.allclose(pt_costs, ag_costs, atol=cost_threshold, rtol=rtol), "big_test average costs mismatch."
assert np.allclose(
pt_grads, ag_grads, atol=grad_threshold, rtol=rtol
), "big_test grads for average cost mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_case_large_random(self, device, dtype):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
rng = np.random.RandomState(0)
acts = rng.randn(4, 8, 11, 5).astype(dtype)
labels = [
[1, 2, 4, 3, 2, 2, 1, 1, 1, 1],
[3, 2, 2, 3, 4, 1, 1, 1, 1, 1],
[4, 4, 1, 2, 1, 3, 4, 3, 1, 2],
[1, 1, 2, 1, 2, 3, 3, 1, 1, 1],
]
cost_threshold = 1e-8 if dtype == np.float32 else 5e-4
grad_threshold = 1e-8 if dtype == np.float32 else 1e-4
rtol = 1e-3 if dtype == np.float32 else 5e-2
fn_pt = RNNTLossNumba(blank=0, reduction='sum')
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy()
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
fn_ag = RNNTLossPytorch(blank=0, reduction='sum')
ag_cost, ag_grads = wrap_and_call(fn_ag, acts, labels, device)
assert np.allclose(pt_cost, np_cost, atol=cost_threshold, rtol=rtol), "large_random_test costs mismatch."
assert np.allclose(ag_cost, np_cost, atol=cost_threshold, rtol=rtol), "large_random_test costs mismatch."
assert np.allclose(pt_grads, np_grads, atol=grad_threshold, rtol=rtol), "large_random_test gradient mismatch."
assert np.allclose(ag_grads, np_grads, atol=grad_threshold, rtol=rtol), "large_random_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
def test_case_small_clamp(self, device, dtype):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
GRAD_CLAMP = 0.1
acts = np.array(
[
[
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.2, 0.1, 0.1], [0.7, 0.1, 0.2, 0.1, 0.1]],
]
]
).astype(dtype)
labels = [[1, 2]]
cost_threshold = 1e-8 if dtype == np.float32 else 5e-4
grad_threshold = 1e-8 if dtype == np.float32 else 5e-5
rtol = 1e-5 if dtype == np.float32 else 1e-3
fn_pt = RNNTLossNumba(blank=0, reduction='sum', clamp=GRAD_CLAMP)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy(blank=0, clamp=GRAD_CLAMP)
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
expected_cost = 4.495666
expected_grads = np.array(
[
[
[
[-0.1, -0.1, 0.1, 0.1, 0.1],
[-0.1, 0.1, -0.1, 0.1, 0.1],
[-0.1, 0.06269141, 0.06928472, 0.1, 0.06269141],
],
[
[0.05456069, -0.1, 0.05456069, 0.05456069, 0.05456069],
[0.1, 0.1, -0.1, 0.1, 0.1],
[-0.1, 0.1, 0.1, 0.1, 0.1],
],
]
]
)
assert np.allclose(pt_cost, expected_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(pt_grads, expected_grads, atol=grad_threshold, rtol=rtol), "small_test gradient mismatch."
assert np.allclose(pt_cost, np_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(pt_grads, np_grads, atol=grad_threshold, rtol=rtol), "small_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.parametrize('fastemit_lambda', [1.0, 0.01, 0.00001])
def test_case_small_fastemit_clamp(self, device, dtype, fastemit_lambda):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
GRAD_CLAMP = 0.1
acts = np.array(
[
[
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.6, 0.1, 0.1], [0.1, 0.1, 0.2, 0.8, 0.1]],
[[0.1, 0.6, 0.1, 0.1, 0.1], [0.1, 0.1, 0.2, 0.1, 0.1], [0.7, 0.1, 0.2, 0.1, 0.1]],
]
]
).astype(dtype)
labels = [[1, 2]]
cost_threshold = 1e-8 if dtype == np.float32 else 1e-3
grad_threshold = 1e-8 if dtype == np.float32 else 5e-4
rtol = 1e-5 if dtype == np.float32 else 1e-3
fn_pt = RNNTLossNumba(blank=0, reduction='sum', fastemit_lambda=fastemit_lambda, clamp=GRAD_CLAMP)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_np = RNNTLoss_Numpy(blank=0, fastemit_lambda=fastemit_lambda, clamp=GRAD_CLAMP)
np_cost, np_grads = wrap_and_call(fn_np, acts, labels, device)
expected_cost = 4.495666
expected_cost += expected_cost * fastemit_lambda
assert np.allclose(pt_cost, expected_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(pt_cost, np_cost, atol=cost_threshold, rtol=rtol), "small_test costs mismatch."
assert np.allclose(pt_grads, np_grads, atol=grad_threshold, rtol=rtol), "small_test gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_small_random_accumulated(self, device):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
torch.manual_seed(0)
base_layer = torch.randn(3, 5, requires_grad=True)
mid1 = torch.randn(1, 4, 3, 3, requires_grad=True)
labels1 = [[1, 3]]
mid2 = torch.randn(1, 6, 5, 3, requires_grad=True)
labels2 = [[1, 2, 3, 4]]
def zero_grad():
if base_layer.grad is not None:
base_layer.grad = None
if mid1.grad is not None:
mid1.grad = None
if mid2.grad is not None:
mid2.grad = None
fn_pt = RNNTLossNumba(blank=0, reduction='sum')
fn_np = RNNTLoss_Numpy()
# run 1
acts1 = torch.matmul(mid1, base_layer) # [1, 4, 3, 5]
pt_cost1, _ = wrap_and_call(fn_pt, acts1, labels1, device)
pt_grads1 = base_layer.grad.detach().cpu().numpy()
zero_grad()
acts1 = torch.matmul(mid1, base_layer) # [1, 4, 3, 5]
np_cost1, _ = wrap_and_call(fn_np, acts1, labels1, device)
np_grads1 = base_layer.grad.detach().cpu().numpy()
zero_grad()
assert np.allclose(pt_grads1, np_grads1, atol=1e-6)
# run 2
acts2 = torch.matmul(mid2, base_layer) # [1, 4, 3, 5]
pt_cost2, _ = wrap_and_call(fn_pt, acts2, labels2, device)
pt_grads2 = base_layer.grad.clone().cpu().numpy()
zero_grad()
acts2 = torch.matmul(mid2, base_layer) # [1, 4, 3, 5]
np_cost2, _ = wrap_and_call(fn_np, acts2, labels2, device)
np_grads2 = base_layer.grad.clone().cpu().numpy()
zero_grad()
assert np.allclose(pt_grads2, np_grads2, atol=1e-6)
# run 1 + 2
acts1 = torch.matmul(mid1, base_layer) # [1, 4, 3, 5]
pt_cost1, _ = wrap_and_call(fn_pt, acts1, labels1, device)
acts2 = torch.matmul(mid2, base_layer) # [1, 6, 5, 5]
pt_cost2, _ = wrap_and_call(fn_pt, acts2, labels2, device)
pt_grads1_p_2 = base_layer.grad.clone().cpu().numpy()
assert np.allclose(pt_grads1_p_2, np_grads1 + np_grads2, atol=1e-5)
class TestMultiblankRNNTLoss:
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_randomized_act_label(self, device):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
B, T, U, V = 4, 8, 4, 8 # here V is number of non blank labels
big_blank_durations = [2, 4, 8]
sigma = 0.1
acts = torch.rand([B, T, U, V + 1 + len(big_blank_durations)])
labels = [[random.randrange(0, V) for i in range(U - 1)] for j in range(B)]
fn_pt = MultiblankRNNTLossNumba(
blank=V + len(big_blank_durations),
reduction='sum',
big_blank_durations=big_blank_durations,
sigma=sigma,
)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_ag = MultiblankRNNTLossPytorch(
blank=V + len(big_blank_durations),
reduction='sum',
big_blank_durations=big_blank_durations,
sigma=sigma,
) # ag for automatic gradient computation
ag_cost, ag_grads = wrap_and_call(fn_ag, acts, labels, device)
assert np.allclose(pt_cost, ag_cost, rtol=1e-6), "multi-blank costs mismatch."
assert np.allclose(pt_grads, ag_grads, rtol=1e-2), "multi-blank gradient mismatch."
class TestTDTLoss:
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_randomized_act_label(self, device):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
B, T, U, V = 4, 8, 4, 8 # here V is number of non blank labels
durations = [0, 1, 2, 3, 4, 5]
sigma = 0.05
acts = torch.rand([B, T, U, V + 1 + len(durations)])
labels = [[random.randrange(0, V) for i in range(U - 1)] for j in range(B)]
fn_pt = TDTLossNumba(blank=V, reduction='sum', durations=durations, sigma=sigma)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
fn_ag = TDTLossPytorch(
blank=V, reduction='sum', durations=durations, sigma=sigma
) # ag for automatic gradient computation
ag_cost, ag_grads = wrap_and_call(fn_ag, acts, labels, device)
assert np.allclose(pt_cost, ag_cost, rtol=1e-6), "tdt costs mismatch."
assert np.allclose(pt_grads, ag_grads, rtol=1e-2), "td gradient mismatch."
@pytest.mark.unit
@pytest.mark.parametrize('device', DEVICES)
def test_case_fixed_case_act_label(self, device):
if device == 'cuda':
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
B, T, U, V = 1, 3, 2, 3 # here V is number of non blank labels
durations = [0, 1, 2]
sigma = 0.05
acts = torch.zeros([B, T, U, V + 1 + len(durations)])
labels = [[(i + j) % (V - 1) for i in range(U - 1)] for j in range(B)]
fn_pt = TDTLossNumba(blank=V, reduction='sum', durations=durations, sigma=sigma)
pt_cost, pt_grads = wrap_and_call(fn_pt, acts, labels, device)
expected_cost = 4.155739
expected_grads = [
[
[
[-0.64962804, 0.25, 0.25, 0.14962798, 0.2672583, -0.16792619, -0.09933221],
[0.01651875, 0.01651875, 0.01651875, -0.04955626, 0.022025, -0.01227201, -0.009753],
],
[
[-0.04892651, 0.01714851, 0.01714851, 0.01462949, -0.01143234, -0.01143234, 0.02286467],
[0.12531489, 0.12531489, 0.12531489, -0.37594467, 0.16708651, 0.13027048, -0.29735702],
],
[
[-0.02572276, 0.00857425, 0.00857425, 0.00857425, -0.02286468, 0.01143234, 0.01143234],
[0.13388914, 0.13388914, 0.13388914, -0.40166742, 0.17851885, -0.35703772, 0.17851885],
],
]
]
assert np.allclose(pt_cost, expected_cost, rtol=1e-6), "tdt costs mismatch."
assert np.allclose(pt_grads, expected_grads, rtol=1e-2), "td gradient mismatch."
if __name__ == "__main__":
pytest.main([__file__])
| NeMo-main | tests/collections/asr/numba/rnnt_loss/test_rnnt_pytorch.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import torch
from numba import cuda
from nemo.collections.asr.losses.rnnt_pytorch import MultiblankRNNTLossPytorch, TDTLossPytorch
from nemo.collections.asr.parts.numba.rnnt_loss import rnnt_numpy
from nemo.collections.asr.parts.numba.rnnt_loss.rnnt_pytorch import certify_inputs
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import gpu_rnnt_kernel, reduce
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
DTYPES = [torch.float32]
if numba_utils.is_numba_cuda_fp16_supported():
DTYPES.append(torch.float16)
def log_softmax(x, axis=-1):
x = torch.from_numpy(x) # zero-copy
x = x.float()
x = torch.log_softmax(x, dim=axis)
x = x.numpy()
return x
def log_softmax_grad(x, axis=-1):
x = torch.tensor(x, requires_grad=True) # alloc memory
y = torch.log_softmax(x, dim=axis)
y.sum().backward()
return x.grad.numpy()
class TestRNNTCUDAKernels:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_compute_alphas_kernel(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
threshold = 1e-5 if dtype == torch.float32 else 3e-4
# Numpy kernel
x = random.randn(*original_shape)
labels = np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]) # [1, 10]
label_len = len(labels[0]) + 1
blank_idx = 0
x_np = log_softmax(x, axis=-1)
ground_alphas, ground_log_likelihood = rnnt_numpy.forward_pass(
x_np[0, :, :label_len, :], labels[0, : label_len - 1], blank_idx
)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=dtype)
labels_c = torch.tensor(labels, device=device, dtype=torch.int64)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int64, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int64, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# sync kernel
stream.synchronize()
# reshape alphas
alphas = alphas.view([B, T, U])
diff = ground_alphas - alphas[0].cpu().numpy()
assert np.abs(diff).mean() <= threshold
assert np.square(diff).mean() <= (threshold ** 2)
ll_diff = ground_log_likelihood - llForward[0].cpu().numpy()
assert np.abs(ll_diff).mean() <= threshold
assert np.square(ll_diff).mean() <= (threshold ** 2)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_compute_betas_kernel(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
threshold = 1e-5 if dtype == torch.float32 else 3e-4
# Numpy kernel
x = random.randn(*original_shape)
labels = np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]]) # [1, 10]
label_len = len(labels[0]) + 1
blank_idx = 0
x_np = log_softmax(x, axis=-1)
ground_alphas, ground_log_likelihood = rnnt_numpy.backward_pass(
x_np[0, :, :label_len, :], labels[0, : label_len - 1], blank_idx
)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=dtype)
labels_c = torch.tensor(labels, device=device, dtype=torch.int64)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int64, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int64, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# sync kernel
stream.synchronize()
# reshape alphas
betas = betas.view([B, T, U])
diff = ground_alphas - betas[0].cpu().numpy()
assert np.abs(diff).mean() <= threshold
assert np.square(diff).mean() <= (threshold ** 2)
ll_diff = ground_log_likelihood - llBackward[0].cpu().numpy()
assert np.abs(ll_diff).mean() <= threshold
assert np.square(ll_diff).mean() <= (threshold ** 2)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_compute_grads_kernel(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.0
clamp = 0.0
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
threshold = 1e-5 if dtype == torch.float32 else 3e-5
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int64)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int64))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int64))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=dtype)
labels_c = labels.clone().to(device=device, dtype=torch.int64)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int64, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int64, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= threshold
assert np.square(diff).mean() <= (threshold ** 2) * 5.0
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_compute_grads_kernel_fastemit(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.001
clamp = 0.0
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
threshold = 1e-5 if dtype == torch.float32 else 3e-5
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int64)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int64))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int64))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=dtype)
labels_c = labels.clone().to(device=device, dtype=torch.int64)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int64, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int64, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= threshold
assert np.square(diff).mean() <= (threshold ** 2) * 5
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_compute_grads_kernel_clamp(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
fastemit_lambda = 0.0
clamp = 0.1
random = np.random.RandomState(0)
original_shape = [1, 5, 11, 3]
B, T, U, V = original_shape
threshold = 1e-5 if dtype == torch.float32 else 3e-5
# Numpy kernel
x = random.randn(*original_shape)
labels = torch.from_numpy(np.array([[1, 1, 1, 2, 2, 2, 1, 2, 2, 1]], dtype=np.int64)) # [1, 10]
audio_len = torch.from_numpy(np.array([T], dtype=np.int64))
label_len = torch.from_numpy(np.array([U - 1], dtype=np.int64))
blank_idx = 0
x_np = torch.from_numpy(x)
x_np.requires_grad_(True)
"""
Here we will directly utilize the numpy variant of the loss without explicitly calling
the numpy functions for alpha, beta and grads.
This is because the grads returned by the rnnt_numpy.transduce_batch() are :
d/dx (alpha + beta alignment)(log_softmax(x)).
But according to the chain rule, we'd still need to compute the gradient of log_softmax(x)
and update the alignments by hand. Instead, we will rely on pytorch to compute the gradient
of the log_softmax(x) step and propagate it backwards.
"""
loss_func = rnnt_numpy.RNNTLoss(blank_idx, fastemit_lambda=fastemit_lambda, clamp=clamp)
loss_val = loss_func(x_np, labels, audio_len, label_len)
loss_val.sum().backward()
true_grads = x_np.grad
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x_c = torch.tensor(x, device=device, dtype=dtype)
labels_c = labels.clone().to(device=device, dtype=torch.int64)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
betas = torch.zeros(B * T * U, device=device, dtype=x_c.dtype)
llForward = torch.zeros(B, device=device, dtype=x_c.dtype)
llBackward = torch.zeros(B, device=device, dtype=x_c.dtype)
input_lengths = torch.tensor([T], dtype=torch.int64, device=device)
label_lengths = torch.tensor([len(labels[0])], dtype=torch.int64, device=device)
# certify input data
certify_inputs(x_c, labels_c, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x_c = x_c.view([-1])
grads = torch.zeros_like(x_c, requires_grad=False)
# call kernel
# log softmax reduction
reduce.reduce_max(x_c, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x_c, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_alphas_kernel[B, U, stream, 0](
x_c, denom, alphas, llForward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# beta kernel
gpu_rnnt_kernel.compute_betas_kernel[B, U, stream, 0](
x_c, denom, betas, llBackward, input_lengths, label_lengths, labels_c, B, T, U, V, blank_idx,
)
# gamma kernel
grad_blocks_per_grid = B * T * U
grad_threads_per_block = gpu_rnnt_kernel.GPU_RNNT_THREAD_SIZE
gpu_rnnt_kernel.compute_grad_kernel[grad_blocks_per_grid, grad_threads_per_block, stream, 0](
grads,
x_c,
denom,
alphas,
betas,
llForward,
input_lengths,
label_lengths,
labels_c,
B,
T,
U,
V,
blank_idx,
fastemit_lambda,
clamp,
)
# sync kernel
stream.synchronize()
# reshape grads
grads = grads.view([B, T, U, V])
diff = true_grads - grads[0].cpu().numpy()
assert np.abs(diff).mean() <= threshold
assert np.square(diff).mean() <= (threshold ** 2) * 5
class TestTDTCUDAKernels:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_compute_alphas_kernel(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 15, 11, 3]
durations = [0, 1, 2]
B, T, U, V = original_shape
Vd = len(durations)
duration_act_shape = [B, T, U, Vd]
sigma = 0.05
# for passing into the kernel function -- it expected unnormalized logits
x = random.randn(*original_shape)
# for passing into the pytorch function -- it expected normalized logits
normalized_x = log_softmax(x, axis=-1) - 0.05
xd = random.randn(*duration_act_shape)
# duration logits are normalized before passing into the loss computation.
xd = log_softmax(xd, axis=-1)
labels = np.array([[1, 1, 1, 1, 0, 0, 1, 0, 0, 1]]) # [1, 10]
blank_idx = V - 1
pytorch_tdt_loss = TDTLossPytorch(blank_idx, durations, sigma=sigma)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x = torch.tensor(x, device=device, dtype=torch.float32)
normalized_x = torch.tensor(normalized_x, device=device, dtype=torch.float32)
xd = torch.tensor(xd, device=device, dtype=torch.float32)
labels = torch.tensor(labels, device=device, dtype=torch.long)
durations = torch.tensor(durations, device=device, dtype=torch.long)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x.dtype)
llForward = torch.zeros(B, device=device, dtype=x.dtype)
input_lengths = torch.tensor([T], dtype=torch.long, device=device)
label_lengths = torch.tensor([U - 1], dtype=torch.long, device=device)
ground_log_likelihood, ground_alphas = pytorch_tdt_loss.compute_forward_prob(
normalized_x, xd, labels, input_lengths, label_lengths
)
# certify input data
certify_inputs(x, labels, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x = x.view([-1])
xd = xd.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_tdt_alphas_kernel[B, U, stream, 0](
x,
xd,
denom,
sigma,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
B,
T,
U,
V,
blank_idx,
durations,
Vd,
)
# sync kernel
stream.synchronize()
# reshape alphas
alphas = alphas.view([B, T, U])
diff = torch.norm(ground_alphas - alphas)
ll_diff = torch.norm(ground_log_likelihood - llForward)
assert diff <= 1e-3
assert ll_diff <= 1e-3
class TestMultiblankRNNTCUDAKernels:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
def test_compute_alphas_kernel(self):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 15, 11, 6]
big_blank_durations = [2, 3, 4]
B, T, U, V = original_shape
num_big_blanks = len(big_blank_durations)
sigma = 0.05
# for passing into the kernel function -- it expected unnormalized logits
x = random.randn(*original_shape)
# for passing into the pytorch function -- it expected normalized logits
normalized_x = log_softmax(x, axis=-1) - sigma
labels = np.array([[1, 1, 1, 1, 0, 0, 1, 0, 0, 1]]) # [1, 10]
blank_idx = V - 1
pytorch_multiblank_loss = MultiblankRNNTLossPytorch(blank_idx, big_blank_durations, sigma=sigma)
# Pytorch kernel
device = torch.device('cuda')
if hasattr(cuda, 'external_stream'):
stream = cuda.external_stream(torch.cuda.current_stream(device).cuda_stream)
else:
stream = cuda.default_stream()
x = torch.tensor(x, device=device, dtype=torch.float32)
normalized_x = torch.tensor(normalized_x, device=device, dtype=torch.float32)
labels = torch.tensor(labels, device=device, dtype=torch.long)
big_blank_durations = torch.tensor(big_blank_durations, device=device, dtype=torch.long)
# Allocate workspace memory
denom = torch.zeros(B * T * U, device=device, dtype=x.dtype)
alphas = torch.zeros(B * T * U, device=device, dtype=x.dtype)
llForward = torch.zeros(B, device=device, dtype=x.dtype)
input_lengths = torch.tensor([T], dtype=torch.long, device=device)
label_lengths = torch.tensor([U - 1], dtype=torch.long, device=device)
ground_log_likelihood, ground_alphas = pytorch_multiblank_loss.compute_forward_prob(
normalized_x, labels, input_lengths, label_lengths
)
# certify input data
certify_inputs(x, labels, input_lengths, label_lengths)
# flatten activation tensor (for pointer based indexing)
x = x.view([-1])
# call kernel
# log softmax reduction
reduce.reduce_max(x, denom, rows=V, cols=B * T * U, minus=False, stream=stream)
reduce.reduce_exp(x, denom, rows=V, cols=B * T * U, minus=True, stream=stream)
# alpha kernel
gpu_rnnt_kernel.compute_multiblank_alphas_kernel[B, U, stream, 0](
x,
denom,
sigma,
alphas,
llForward,
input_lengths,
label_lengths,
labels,
B,
T,
U,
V,
blank_idx,
big_blank_durations,
num_big_blanks,
)
# sync kernel
stream.synchronize()
# reshape alphas
alphas = alphas.view([B, T, U])
diff = torch.norm(ground_alphas - alphas)
ll_diff = torch.norm(ground_log_likelihood - llForward)
assert diff <= 1e-3
assert ll_diff <= 1e-3
| NeMo-main | tests/collections/asr/numba/rnnt_loss/utils/test_gpu_rnnt_kernel.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils import global_constants, rnnt_helper
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
DTYPES = [np.float32]
if numba_utils.is_numba_cuda_fp16_supported():
DTYPES.append(np.float16)
class TestRNNTHelper:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_log_sum_exp(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_sum_exp(x[x_pos], y[x_pos])
x = np.zeros([8]).astype(dtype) # np.random.rand(8192)
y = np.ones([8]).astype(dtype) # np.random.rand(8192)
threshold = 1e-5 if dtype == np.float32 else 2e-3
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
assert (x_new.sum() - 10.506093500145782) <= threshold
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_log_sum_exp_neg_inf(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_sum_exp(x[x_pos], y[x_pos])
x = np.asarray([global_constants.FP32_NEG_INF] * 8).astype(dtype)
y = np.ones([len(x)]).astype(dtype)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
assert np.allclose(x_new, np.ones_like(x_new), atol=1e-5)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_div_up(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.div_up(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10).astype(dtype) # np.random.rand(8192)
y = np.full([8], fill_value=2).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == ((10 + 2 - 1) // 2)
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_add(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.add(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10).astype(dtype) # np.random.rand(8192)
y = np.full([8], fill_value=2).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == 12
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_maximum(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.maximum(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10).astype(dtype) # np.random.rand(8192)
y = np.full([8], fill_value=2).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
for i in range(len(x_new)):
assert x_new[i] == 10
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_identity(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.identity(x[x_pos])
x = np.full([8], fill_value=10).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
for i in range(len(x_new)):
assert x_new[i] == x[i]
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', [np.float32, np.float16])
def test_negate(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.negate(x[x_pos])
x = np.full([8], fill_value=10).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
for i in range(len(x_new)):
assert x_new[i] == -x[i]
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_exponential(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x):
x_pos = cuda.grid(1)
if x_pos < x.shape[0]:
x[x_pos] = rnnt_helper.exponential(x[x_pos])
x = np.random.rand(8).astype(dtype)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c
y = np.exp(x)
for i in range(len(x_new)):
assert (x_new[i] - y[i]) < 1e-4
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_log_plus(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
# wrapper kernel for device function that is tested
@cuda.jit
def _kernel(x, y):
x_pos = cuda.grid(1)
if x_pos < x.shape[0] and x_pos < y.shape[0]:
x[x_pos] = rnnt_helper.log_plus(x[x_pos], y[x_pos])
x = np.full([8], fill_value=10.0).astype(dtype) # np.random.rand(8192)
y = np.full([8], fill_value=2.0).astype(dtype) # np.random.rand(8192)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = global_constants.threads_per_block()
blocks_per_grid = (x.shape[0] + threads_per_block - 1) // threads_per_block
_kernel[blocks_per_grid, threads_per_block, stream](x_c, y_c)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
z = np.log1p(np.exp(-np.fabs(x - y))) + np.maximum(x, y)
for i in range(len(x_new)):
assert x_new[i] == z[i]
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Helpers can only be run when CUDA is available")
@pytest.mark.parametrize('batch_size', [8, 128, 256])
@pytest.mark.parametrize('fastemit_lambda', [0.0, 0.001])
@pytest.mark.parametrize('dtype', DTYPES)
@pytest.mark.unit
def test_compute_costs_data(self, batch_size, fastemit_lambda, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
np.random.seed(0)
x = np.full([batch_size], fill_value=0.0) # np.random.rand(8192)
y = np.random.randn(batch_size).astype(dtype) # np.random.rand(8192)
threshold = 1e-5 if dtype == np.float32 else 1e-5
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
y_c = cuda.to_device(y, stream=stream)
# call kernel
threads_per_block = min(x.shape[0], 32)
blocks_per_grid = (x.shape[0] + (threads_per_block - 1)) // threads_per_block
# Kernel call (source, dest, extra_args_...)
rnnt_helper.compute_costs_data[blocks_per_grid, threads_per_block, stream](y_c, x_c, fastemit_lambda)
# sync kernel
stream.synchronize()
x_new = x_c.copy_to_host(stream=stream)
del x_c, y_c
res = -(y.astype(np.float32).copy())
res *= 1.0 + fastemit_lambda
for i in range(len(x_new)):
assert abs(x_new[i] - res[i]) < threshold, f"index failed {i}"
if __name__ == '__main__':
pytest.main([__file__])
| NeMo-main | tests/collections/asr/numba/rnnt_loss/utils/test_rnnt_helper.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
from numba import cuda
from nemo.collections.asr.parts.numba.rnnt_loss.utils.cuda_utils import reduce
from nemo.core.utils import numba_utils
from nemo.core.utils.numba_utils import __NUMBA_MINIMUM_VERSION__
DTYPES = [np.float32]
if numba_utils.is_numba_cuda_fp16_supported():
DTYPES.append(np.float16)
class TestRNNTCUDAReductions:
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_reduce_max(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 4, 3]
x = random.randn(*original_shape).reshape([-1]).astype(dtype)
dx = random.randn(*x.shape).astype(dtype)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
dx_c = cuda.to_device(dx, stream=stream)
# call kernel
cols = np.prod(original_shape[:3])
reduce.reduce_max(x_c, dx_c, rows=original_shape[-1], cols=cols, minus=False, stream=stream)
# sync kernel
stream.synchronize()
dx_result = dx_c.copy_to_host(stream=stream)
del x_c, dx_c
# collect results in first [B * T * U] values; for all V
assert np.abs(dx_result[cols:] - dx[cols:]).sum() <= 1e-7
# make sure dx_result updates the [B * T * U] values
assert np.abs(dx_result[:cols] - dx[:cols]).sum() > 0
@pytest.mark.skipif(not cuda.is_available(), reason="CUDA Reductions can only be run when CUDA is available")
@pytest.mark.unit
@pytest.mark.parametrize('dtype', DTYPES)
def test_reduce_exp(self, dtype):
numba_utils.skip_numba_cuda_test_if_unsupported(__NUMBA_MINIMUM_VERSION__)
random = np.random.RandomState(0)
original_shape = [1, 5, 4, 2]
x = random.randn(*original_shape).reshape([-1]).astype(dtype)
dx = np.zeros_like(x).astype(dtype)
stream = cuda.stream()
x_c = cuda.to_device(x, stream=stream)
dx_c = cuda.to_device(dx, stream=stream)
# call kernel
cols = np.prod(original_shape[:3])
reduce.reduce_exp(x_c, dx_c, rows=original_shape[-1], cols=cols, minus=False, stream=stream)
# sync kernel
stream.synchronize()
dx_result = dx_c.copy_to_host(stream=stream)
del x_c, dx_c
# collect results in first [B * T * U] values; for all V
assert (dx_result[cols:] - dx[cols:]).sum() <= 1e-7
# make sure dx_result updates the [B * T * U] values
assert np.abs(dx_result[:cols] - dx[:cols]).sum() > 0
if __name__ == '__main__':
pytest.main([__file__])
| NeMo-main | tests/collections/asr/numba/rnnt_loss/utils/test_reduce.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from einops import rearrange
from omegaconf import DictConfig
from nemo.collections.tts.models import SpectrogramEnhancerModel
from nemo.collections.tts.parts.utils.helpers import mask_sequence_tensor
@pytest.fixture
def enhancer_config():
n_bands = 80
latent_dim = 192
style_depth = 4
network_capacity = 16
fmap_max = 192
config = {
"model": {
"n_bands": n_bands,
"latent_dim": latent_dim,
"style_depth": style_depth,
"network_capacity": network_capacity,
"mixed_prob": 0.9,
"fmap_max": fmap_max,
"generator": {
"_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Generator",
"n_bands": n_bands,
"latent_dim": latent_dim,
"network_capacity": network_capacity,
"style_depth": style_depth,
"fmap_max": fmap_max,
},
"discriminator": {
"_target_": "nemo.collections.tts.modules.spectrogram_enhancer.Discriminator",
"n_bands": n_bands,
"network_capacity": network_capacity,
"fmap_max": fmap_max,
},
"spectrogram_min_value": -13.18,
"spectrogram_max_value": 4.78,
"consistency_loss_weight": 10.0,
"gradient_penalty_loss_weight": 10.0,
"gradient_penalty_loss_every_n_steps": 4,
"spectrogram_predictor_path": None,
},
"generator_opt": {"_target_": "torch.optim.Adam", "lr": 2e-4, "betas": [0.5, 0.9]},
"discriminator_opt": {"_target_": "torch.optim.Adam", "lr": 2e-4, "betas": [0.5, 0.9]},
}
return DictConfig(config)
@pytest.fixture
def enhancer(enhancer_config):
return SpectrogramEnhancerModel(cfg=enhancer_config.model)
@pytest.fixture
def enhancer_with_fastpitch(enhancer_config_with_fastpitch):
return SpectrogramEnhancerModel(cfg=enhancer_config_with_fastpitch.model)
@pytest.fixture
def sample_input(batch_size=15, max_length=1000):
generator = torch.Generator()
generator.manual_seed(0)
lengths = torch.randint(max_length // 4, max_length - 7, (batch_size,), generator=generator)
input_spectrograms = torch.randn((batch_size, 80, 1000), generator=generator)
input_spectrograms = mask_sequence_tensor(input_spectrograms, lengths)
return input_spectrograms, lengths
@pytest.mark.unit
def test_pad_spectrograms(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
output = enhancer.pad_spectrograms(input_spectrograms)
assert output.size(-1) >= input_spectrograms.size(-1)
@pytest.mark.unit
def test_spectrogram_norm_unnorm(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
same_input_spectrograms = enhancer.unnormalize_spectrograms(
enhancer.normalize_spectrograms(input_spectrograms, lengths), lengths
)
assert torch.allclose(input_spectrograms, same_input_spectrograms, atol=1e-5)
@pytest.mark.unit
def test_spectrogram_unnorm_norm(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
same_input_spectrograms = enhancer.normalize_spectrograms(
enhancer.unnormalize_spectrograms(input_spectrograms, lengths), lengths
)
assert torch.allclose(input_spectrograms, same_input_spectrograms, atol=1e-5)
@pytest.mark.unit
def test_spectrogram_norm_unnorm_dont_look_at_padding(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
same_input_spectrograms = enhancer.unnormalize_spectrograms(
enhancer.normalize_spectrograms(input_spectrograms, lengths), lengths
)
for i, length in enumerate(lengths.tolist()):
assert torch.allclose(input_spectrograms[i, :, :length], same_input_spectrograms[i, :, :length], atol=1e-5)
@pytest.mark.unit
def test_spectrogram_unnorm_norm_dont_look_at_padding(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
same_input_spectrograms = enhancer.normalize_spectrograms(
enhancer.unnormalize_spectrograms(input_spectrograms, lengths), lengths
)
for i, length in enumerate(lengths.tolist()):
assert torch.allclose(input_spectrograms[i, :, :length], same_input_spectrograms[i, :, :length], atol=1e-5)
@pytest.mark.unit
def test_generator_pass_keeps_size(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
output = enhancer.forward(input_spectrograms=input_spectrograms, lengths=lengths)
assert output.shape == input_spectrograms.shape
@pytest.mark.unit
def test_discriminator_pass(enhancer: SpectrogramEnhancerModel, sample_input):
input_spectrograms, lengths = sample_input
input_spectrograms = rearrange(input_spectrograms, "b c l -> b 1 c l")
logits = enhancer.discriminator(x=input_spectrograms, condition=input_spectrograms, lengths=lengths)
assert logits.shape == lengths.shape
@pytest.mark.unit
def test_nemo_save_load(enhancer: SpectrogramEnhancerModel, tmp_path):
path = tmp_path / "test-enhancer-save-load.nemo"
enhancer.save_to(path)
SpectrogramEnhancerModel.restore_from(path)
| NeMo-main | tests/collections/tts/test_spectrogram_enhancer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import pytest
import torch
from omegaconf import OmegaConf
from nemo.collections.tts.models import FastPitchModel, HifiGanModel, RadTTSModel
from nemo.utils.app_state import AppState
@pytest.fixture()
def fastpitch_model():
model = FastPitchModel.from_pretrained(model_name="tts_en_fastpitch")
model.export_config['enable_volume'] = True
model.export_config['enable_ragged_batches'] = True
return model
@pytest.fixture()
def hifigan_model():
model = HifiGanModel.from_pretrained(model_name="tts_en_hifigan")
return model
@pytest.fixture()
def radtts_model():
this_test_dir = os.path.dirname(os.path.abspath(__file__))
cfg = OmegaConf.load(os.path.join(this_test_dir, '../../../examples/tts/conf/rad-tts_feature_pred.yaml'))
cfg.model.init_from_ptl_ckpt = None
cfg.model.train_ds.dataset.manifest_filepath = "dummy.json"
cfg.model.train_ds.dataset.sup_data_path = "dummy.json"
cfg.model.validation_ds.dataset.manifest_filepath = "dummy.json"
cfg.model.validation_ds.dataset.sup_data_path = "dummy.json"
cfg.pitch_mean = 212.35
cfg.pitch_std = 68.52
app_state = AppState()
app_state.is_model_being_restored = True
model = RadTTSModel(cfg=cfg.model)
app_state.is_model_being_restored = False
model.eval()
model.set_export_config({'enable_ragged_batches': 'True', 'enable_volume': 'True'})
return model
class TestExportable:
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_FastPitchModel_export_to_onnx(self, fastpitch_model):
model = fastpitch_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'fp.onnx')
model.export(output=filename, verbose=True, onnx_opset_version=14, check_trace=True)
@pytest.mark.with_downloads()
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_HifiGanModel_export_to_onnx(self, hifigan_model):
model = hifigan_model.cuda()
assert hifigan_model.generator is not None
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'hfg.onnx')
model.export(output=filename, verbose=True, check_trace=True)
@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_RadTTSModel_export_to_torchscript(self, radtts_model):
model = radtts_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'rad.ts')
with torch.cuda.amp.autocast(enabled=True, cache_enabled=False, dtype=torch.float16):
input_example1 = model.input_module.input_example(max_batch=13, max_dim=777)
input_example2 = model.input_module.input_example(max_batch=19, max_dim=999)
model.export(output=filename, verbose=True, input_example=input_example1, check_trace=[input_example2])
@pytest.mark.pleasefixme
@pytest.mark.run_only_on('GPU')
@pytest.mark.unit
def test_RadTTSModel_export_to_onnx(self, radtts_model):
model = radtts_model.cuda()
with tempfile.TemporaryDirectory() as tmpdir:
filename = os.path.join(tmpdir, 'rad.onnx')
with torch.cuda.amp.autocast(enabled=True, cache_enabled=False, dtype=torch.float16):
input_example1 = model.input_module.input_example(max_batch=13, max_dim=777)
input_example2 = model.input_module.input_example(max_batch=19, max_dim=999)
model.export(
output=filename,
input_example=input_example1,
verbose=True,
onnx_opset_version=14,
check_trace=[input_example2],
)
| NeMo-main | tests/collections/tts/test_tts_exportables.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.