python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.en.taggers.date import get_four_digit_year_graph
from nemo_text_processing.text_normalization.en.utils import get_abs_path
from pynini.examples import plurals
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
-23 -> cardinal { negative: "true" integer: "twenty three" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, lm: bool = False):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
self.lm = lm
self.deterministic = deterministic
# TODO replace to have "oh" as a default for "0"
graph = pynini.Far(get_abs_path("data/number/cardinal_number_name.far")).get_fst()
graph_au = pynini.Far(get_abs_path("data/number/cardinal_number_name_au.far")).get_fst()
self.graph_hundred_component_at_least_one_none_zero_digit = (
pynini.closure(NEMO_DIGIT, 2, 3) | pynini.difference(NEMO_DIGIT, pynini.accep("0"))
) @ graph
graph_digit = pynini.string_file(get_abs_path("data/number/digit.tsv"))
graph_zero = pynini.string_file(get_abs_path("data/number/zero.tsv"))
single_digits_graph = pynini.invert(graph_digit | graph_zero)
self.single_digits_graph = single_digits_graph + pynini.closure(insert_space + single_digits_graph)
if not deterministic:
# for a single token allow only the same normalization
# "007" -> {"oh oh seven", "zero zero seven"} not {"oh zero seven"}
single_digits_graph_zero = pynini.invert(graph_digit | graph_zero)
single_digits_graph_oh = pynini.invert(graph_digit) | pynini.cross("0", "oh")
self.single_digits_graph = single_digits_graph_zero + pynini.closure(
insert_space + single_digits_graph_zero
)
self.single_digits_graph |= single_digits_graph_oh + pynini.closure(insert_space + single_digits_graph_oh)
single_digits_graph_with_commas = pynini.closure(
self.single_digits_graph + insert_space, 1, 3
) + pynini.closure(
pynutil.delete(",")
+ single_digits_graph
+ insert_space
+ single_digits_graph
+ insert_space
+ single_digits_graph,
1,
)
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
graph = (
pynini.closure(NEMO_DIGIT, 1, 3)
+ (pynini.closure(pynutil.delete(",") + NEMO_DIGIT ** 3) | pynini.closure(NEMO_DIGIT ** 3))
) @ graph
self.graph = graph
self.graph_with_and = self.add_optional_and(graph)
if deterministic:
long_numbers = pynini.compose(NEMO_DIGIT ** (5, ...), self.single_digits_graph).optimize()
self.long_numbers = plurals._priority_union(long_numbers, self.graph_with_and, NEMO_SIGMA).optimize()
cardinal_with_leading_zeros = pynini.compose(
pynini.accep("0") + pynini.closure(NEMO_DIGIT), self.single_digits_graph
)
final_graph = self.long_numbers | cardinal_with_leading_zeros
final_graph |= self.add_optional_and(graph_au)
else:
leading_zeros = pynini.compose(pynini.closure(pynini.accep("0"), 1), self.single_digits_graph)
cardinal_with_leading_zeros = (
leading_zeros + pynutil.insert(" ") + pynini.compose(pynini.closure(NEMO_DIGIT), self.graph_with_and)
)
self.long_numbers = self.graph_with_and | pynutil.add_weight(self.single_digits_graph, 0.0001)
# add small weight to non-default graphs to make sure the deterministic option is listed first
final_graph = (
self.long_numbers
| get_four_digit_year_graph() # allows e.g. 4567 be pronounced as forty five sixty seven
| pynutil.add_weight(single_digits_graph_with_commas, 0.0001)
| cardinal_with_leading_zeros
).optimize()
one_to_a_replacement_graph = (
pynini.cross("one hundred", "a hundred")
| pynini.cross("one thousand", "thousand")
| pynini.cross("one million", "a million")
)
final_graph |= pynini.compose(final_graph, one_to_a_replacement_graph.optimize() + NEMO_SIGMA).optimize()
# remove commas for 4 digits numbers
four_digit_comma_graph = (NEMO_DIGIT - "0") + pynutil.delete(",") + NEMO_DIGIT ** 3
final_graph |= pynini.compose(four_digit_comma_graph.optimize(), final_graph).optimize()
self.final_graph = final_graph
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + final_graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
def add_optional_and(self, graph):
graph_with_and = graph
if not self.lm:
graph_with_and = pynutil.add_weight(graph, 0.00001)
not_quote = pynini.closure(NEMO_NOT_QUOTE)
no_thousand_million = pynini.difference(
not_quote, not_quote + pynini.union("thousand", "million") + not_quote
).optimize()
integer = (
not_quote + pynutil.add_weight(pynini.cross("hundred ", "hundred and ") + no_thousand_million, -0.0001)
).optimize()
no_hundred = pynini.difference(NEMO_SIGMA, not_quote + pynini.accep("hundred") + not_quote).optimize()
integer |= (
not_quote + pynutil.add_weight(pynini.cross("thousand ", "thousand and ") + no_hundred, -0.0001)
).optimize()
optional_hundred = pynini.compose((NEMO_DIGIT - "0") ** 3, graph).optimize()
optional_hundred = pynini.compose(optional_hundred, NEMO_SIGMA + pynini.cross(" hundred", "") + NEMO_SIGMA)
graph_with_and |= pynini.compose(graph, integer).optimize()
graph_with_and |= optional_hundred
return graph_with_and
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.abbreviation import AbbreviationFst
from nemo_text_processing.text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.en.taggers.date import DateFst
from nemo_text_processing.text_normalization.en.taggers.decimal import DecimalFst
from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.en.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.en.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.en.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.en.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.taggers.range import RangeFst as RangeFst
from nemo_text_processing.text_normalization.en.taggers.roman import RomanFst
from nemo_text_processing.text_normalization.en.taggers.serial import SerialFst
from nemo_text_processing.text_normalization.en.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.en.taggers.time import TimeFst
from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.verbalizers.abbreviation import AbbreviationFst as vAbbreviation
from nemo_text_processing.text_normalization.en.verbalizers.cardinal import CardinalFst as vCardinal
from nemo_text_processing.text_normalization.en.verbalizers.date import DateFst as vDate
from nemo_text_processing.text_normalization.en.verbalizers.decimal import DecimalFst as vDecimal
from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst as vElectronic
from nemo_text_processing.text_normalization.en.verbalizers.fraction import FractionFst as vFraction
from nemo_text_processing.text_normalization.en.verbalizers.measure import MeasureFst as vMeasure
from nemo_text_processing.text_normalization.en.verbalizers.money import MoneyFst as vMoney
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst as vOrdinal
from nemo_text_processing.text_normalization.en.verbalizers.roman import RomanFst as vRoman
from nemo_text_processing.text_normalization.en.verbalizers.telephone import TelephoneFst as vTelephone
from nemo_text_processing.text_normalization.en.verbalizers.time import TimeFst as vTime
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst as vWord
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = True,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != 'None':
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_en_tn_{deterministic}_deterministic{whitelist_file}.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode='r')['tokenize_and_classify']
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f'Creating ClassifyFst grammars. This might take some time...')
# TAGGERS
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic)
deterministic_ordinal = OrdinalFst(cardinal=cardinal, deterministic=True)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal=cardinal, deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(deterministic=deterministic, cardinal=cardinal)
fraction_graph = fraction.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
date_graph = DateFst(cardinal=cardinal, deterministic=deterministic).fst
punctuation = PunctuationFst(deterministic=True)
punct_graph = punctuation.graph
word_graph = WordFst(punctuation=punctuation, deterministic=deterministic).graph
time_graph = TimeFst(cardinal=cardinal, deterministic=deterministic).fst
telephone_graph = TelephoneFst(deterministic=deterministic).fst
electronic_graph = ElectronicFst(cardinal=cardinal, deterministic=deterministic).fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=deterministic).fst
whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist)
whitelist_graph = whitelist.graph
serial_graph = SerialFst(cardinal=cardinal, ordinal=deterministic_ordinal, deterministic=deterministic).fst
# VERBALIZERS
cardinal = vCardinal(deterministic=deterministic)
v_cardinal_graph = cardinal.fst
decimal = vDecimal(cardinal=cardinal, deterministic=deterministic)
v_decimal_graph = decimal.fst
ordinal = vOrdinal(deterministic=deterministic)
v_ordinal_graph = ordinal.fst
fraction = vFraction(deterministic=deterministic)
v_fraction_graph = fraction.fst
v_telephone_graph = vTelephone(deterministic=deterministic).fst
v_electronic_graph = vElectronic(deterministic=deterministic).fst
measure = vMeasure(decimal=decimal, cardinal=cardinal, fraction=fraction, deterministic=deterministic)
v_measure_graph = measure.fst
v_time_graph = vTime(deterministic=deterministic).fst
v_date_graph = vDate(ordinal=ordinal, deterministic=deterministic).fst
v_money_graph = vMoney(decimal=decimal, deterministic=deterministic).fst
v_roman_graph = vRoman(deterministic=deterministic).fst
v_abbreviation = vAbbreviation(deterministic=deterministic).fst
det_v_time_graph = vTime(deterministic=True).fst
det_v_date_graph = vDate(ordinal=vOrdinal(deterministic=True), deterministic=True).fst
time_final = pynini.compose(time_graph, det_v_time_graph)
date_final = pynini.compose(date_graph, det_v_date_graph)
range_graph = RangeFst(
time=time_final, date=date_final, cardinal=CardinalFst(deterministic=True), deterministic=deterministic
).fst
v_word_graph = vWord(deterministic=deterministic).fst
sem_w = 1
word_w = 100
punct_w = 2
classify_and_verbalize = (
pynutil.add_weight(whitelist_graph, sem_w)
| pynutil.add_weight(pynini.compose(time_graph, v_time_graph), sem_w)
| pynutil.add_weight(pynini.compose(decimal_graph, v_decimal_graph), sem_w)
| pynutil.add_weight(pynini.compose(measure_graph, v_measure_graph), sem_w)
| pynutil.add_weight(pynini.compose(cardinal_graph, v_cardinal_graph), sem_w)
| pynutil.add_weight(pynini.compose(ordinal_graph, v_ordinal_graph), sem_w)
| pynutil.add_weight(pynini.compose(telephone_graph, v_telephone_graph), sem_w)
| pynutil.add_weight(pynini.compose(electronic_graph, v_electronic_graph), sem_w)
| pynutil.add_weight(pynini.compose(fraction_graph, v_fraction_graph), sem_w)
| pynutil.add_weight(pynini.compose(money_graph, v_money_graph), sem_w)
| pynutil.add_weight(word_graph, word_w)
| pynutil.add_weight(pynini.compose(date_graph, v_date_graph), sem_w - 0.01)
| pynutil.add_weight(pynini.compose(range_graph, v_word_graph), sem_w)
| pynutil.add_weight(
pynini.compose(serial_graph, v_word_graph), 1.1001
) # should be higher than the rest of the classes
).optimize()
if not deterministic:
roman_graph = RomanFst(deterministic=deterministic).fst
# the weight matches the word_graph weight for "I" cases in long sentences with multiple semiotic tokens
classify_and_verbalize |= pynutil.add_weight(pynini.compose(roman_graph, v_roman_graph), word_w)
abbreviation_graph = AbbreviationFst(whitelist=whitelist, deterministic=deterministic).fst
classify_and_verbalize |= pynutil.add_weight(
pynini.compose(abbreviation_graph, v_abbreviation), word_w
)
punct_only = pynutil.add_weight(punct_graph, weight=punct_w)
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct_only),
1,
)
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" "))
+ classify_and_verbalize
+ pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph |= punct_only + pynini.closure(punct)
graph = delete_space + graph + delete_space
remove_extra_spaces = pynini.closure(NEMO_NOT_SPACE, 1) + pynini.closure(
delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1)
)
remove_extra_spaces |= (
pynini.closure(pynutil.delete(" "), 1)
+ pynini.closure(NEMO_NOT_SPACE, 1)
+ pynini.closure(delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1))
)
graph = pynini.compose(graph.optimize(), remove_extra_spaces).optimize()
self.fst = graph
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f'ClassifyFst grammars are saved to {far_file}.')
# to remove normalization options that still contain digits and some special symbols
# e.g., "P&E" -> {P and E, P&E}, "P & E" will be removed from the list of normalization options
no_digits = pynini.closure(pynini.difference(NEMO_CHAR, pynini.union(NEMO_DIGIT, "&")))
self.fst_no_digits = pynini.compose(self.fst, no_digits).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/tokenize_and_classify_with_audio.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
convert_space,
)
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from pynini.examples import plurals
from pynini.lib import pynutil
class SerialFst(GraphFst):
"""
This class is a composite class of two other class instances
Args:
time: composed tagger and verbalizer
date: composed tagger and verbalizer
cardinal: tagger
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
lm: whether to use for hybrid LM
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True, lm: bool = False):
super().__init__(name="integer", kind="classify", deterministic=deterministic)
"""
Finite state transducer for classifying serial (handles only cases without delimiters,
values with delimiters are handled by default).
The serial is a combination of digits, letters and dashes, e.g.:
c325b -> tokens { cardinal { integer: "c three two five b" } }
"""
if deterministic:
num_graph = pynini.compose(NEMO_DIGIT ** (6, ...), cardinal.single_digits_graph).optimize()
num_graph |= pynini.compose(NEMO_DIGIT ** (1, 5), cardinal.graph).optimize()
# to handle numbers starting with zero
num_graph |= pynini.compose(
pynini.accep("0") + pynini.closure(NEMO_DIGIT), cardinal.single_digits_graph
).optimize()
else:
num_graph = cardinal.final_graph
# TODO: "#" doesn't work from the file
symbols_graph = pynini.string_file(get_abs_path("data/whitelist/symbol.tsv")).optimize() | pynini.cross(
"#", "hash"
)
num_graph |= symbols_graph
if not self.deterministic and not lm:
num_graph |= cardinal.single_digits_graph
num_graph |= pynini.compose(num_graph, NEMO_SIGMA + pynutil.delete("hundred ") + NEMO_SIGMA)
# also allow double digits to be pronounced as integer in serial number
num_graph |= pynutil.add_weight(
NEMO_DIGIT ** 2 @ cardinal.graph_hundred_component_at_least_one_none_zero_digit, weight=0.0001
)
# add space between letter and digit/symbol
symbols = [x[0] for x in load_labels(get_abs_path("data/whitelist/symbol.tsv"))]
symbols = pynini.union(*symbols)
digit_symbol = NEMO_DIGIT | symbols
graph_with_space = pynini.compose(
pynini.cdrewrite(pynutil.insert(" "), NEMO_ALPHA | symbols, digit_symbol, NEMO_SIGMA),
pynini.cdrewrite(pynutil.insert(" "), digit_symbol, NEMO_ALPHA | symbols, NEMO_SIGMA),
)
# serial graph with delimiter
delimiter = pynini.accep("-") | pynini.accep("/") | pynini.accep(" ")
if not deterministic:
delimiter |= pynini.cross("-", " dash ") | pynini.cross("/", " slash ")
alphas = pynini.closure(NEMO_ALPHA, 1)
letter_num = alphas + delimiter + num_graph
num_letter = pynini.closure(num_graph + delimiter, 1) + alphas
next_alpha_or_num = pynini.closure(delimiter + (alphas | num_graph))
next_alpha_or_num |= pynini.closure(
delimiter
+ num_graph
+ plurals._priority_union(pynini.accep(" "), pynutil.insert(" "), NEMO_SIGMA).optimize()
+ alphas
)
serial_graph = letter_num + next_alpha_or_num
serial_graph |= num_letter + next_alpha_or_num
# numbers only with 2+ delimiters
serial_graph |= (
num_graph + delimiter + num_graph + delimiter + num_graph + pynini.closure(delimiter + num_graph)
)
# 2+ symbols
serial_graph |= pynini.compose(NEMO_SIGMA + symbols + NEMO_SIGMA, num_graph + delimiter + num_graph)
# exclude ordinal numbers from serial options
serial_graph = pynini.compose(
pynini.difference(NEMO_SIGMA, pynini.project(ordinal.graph, "input")), serial_graph
).optimize()
serial_graph = pynutil.add_weight(serial_graph, 0.0001)
serial_graph |= (
pynini.closure(NEMO_NOT_SPACE, 1)
+ (pynini.cross("^2", " squared") | pynini.cross("^3", " cubed")).optimize()
)
# at least one serial graph with alpha numeric value and optional additional serial/num/alpha values
serial_graph = (
pynini.closure((serial_graph | num_graph | alphas) + delimiter)
+ serial_graph
+ pynini.closure(delimiter + (serial_graph | num_graph | alphas))
)
serial_graph |= pynini.compose(graph_with_space, serial_graph.optimize()).optimize()
serial_graph = pynini.compose(pynini.closure(NEMO_NOT_SPACE, 2), serial_graph).optimize()
# this is not to verbolize "/" as "slash" in cases like "import/export"
serial_graph = pynini.compose(
pynini.difference(
NEMO_SIGMA, pynini.closure(NEMO_ALPHA, 1) + pynini.accep("/") + pynini.closure(NEMO_ALPHA, 1)
),
serial_graph,
)
self.graph = serial_graph.optimize()
graph = pynutil.insert("name: \"") + convert_space(self.graph).optimize() + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/serial.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
NEMO_UPPER,
TO_UPPER,
GraphFst,
get_abs_path,
insert_space,
)
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. [email protected] -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
if deterministic:
numbers = NEMO_DIGIT
else:
numbers = pynutil.insert(" ") + cardinal.long_numbers + pynutil.insert(" ")
accepted_symbols = pynini.project(pynini.string_file(get_abs_path("data/electronic/symbol.tsv")), "input")
accepted_common_domains = pynini.project(
pynini.string_file(get_abs_path("data/electronic/domain.tsv")), "input"
)
dict_words = pynutil.add_weight(pynini.string_file(get_abs_path("data/electronic/words.tsv")), MIN_NEG_WEIGHT)
dict_words_without_delimiter = dict_words + pynini.closure(
pynutil.add_weight(pynutil.insert(" ") + dict_words, MIN_NEG_WEIGHT), 1
)
dict_words_graph = dict_words_without_delimiter | dict_words
all_accepted_symbols_start = (
dict_words_graph | pynini.closure(TO_UPPER) | pynini.closure(NEMO_UPPER) | accepted_symbols
).optimize()
all_accepted_symbols_end = (
dict_words_graph | numbers | pynini.closure(TO_UPPER) | pynini.closure(NEMO_UPPER) | accepted_symbols
).optimize()
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbol.tsv")).optimize()
username = (NEMO_ALPHA | dict_words_graph) + pynini.closure(
NEMO_ALPHA | numbers | accepted_symbols | dict_words_graph
)
username = pynutil.insert("username: \"") + username + pynutil.insert("\"") + pynini.cross('@', ' ')
domain_graph = all_accepted_symbols_start + pynini.closure(
all_accepted_symbols_end | pynutil.add_weight(accepted_common_domains, MIN_NEG_WEIGHT)
)
protocol_symbols = pynini.closure((graph_symbols | pynini.cross(":", "colon")) + pynutil.insert(" "))
protocol_start = (pynini.cross("https", "HTTPS ") | pynini.cross("http", "HTTP ")) + (
pynini.accep("://") @ protocol_symbols
)
protocol_file_start = pynini.accep("file") + insert_space + (pynini.accep(":///") @ protocol_symbols)
protocol_end = pynutil.add_weight(pynini.cross("www", "WWW ") + pynini.accep(".") @ protocol_symbols, -1000)
protocol = protocol_file_start | protocol_start | protocol_end | (protocol_start + protocol_end)
domain_graph_with_class_tags = (
pynutil.insert("domain: \"")
+ pynini.compose(
NEMO_ALPHA + pynini.closure(NEMO_NOT_SPACE) + (NEMO_ALPHA | NEMO_DIGIT | pynini.accep("/")),
domain_graph,
).optimize()
+ pynutil.insert("\"")
)
protocol = pynutil.insert("protocol: \"") + pynutil.add_weight(protocol, MIN_NEG_WEIGHT) + pynutil.insert("\"")
# email
graph = pynini.compose(
NEMO_SIGMA + pynini.accep("@") + NEMO_SIGMA + pynini.accep(".") + NEMO_SIGMA,
username + domain_graph_with_class_tags,
)
# abc.com, abc.com/123-sm
# when only domain, make sure it starts and end with NEMO_ALPHA
graph |= (
pynutil.insert("domain: \"")
+ pynini.compose(
NEMO_ALPHA + pynini.closure(NEMO_NOT_SPACE) + accepted_common_domains + pynini.closure(NEMO_NOT_SPACE),
domain_graph,
).optimize()
+ pynutil.insert("\"")
)
# www.abc.com/sdafsdf, or https://www.abc.com/asdfad or www.abc.abc/asdfad
graph |= protocol + pynutil.insert(" ") + domain_graph_with_class_tags
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_LOWER,
NEMO_SIGMA,
TO_LOWER,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import (
augment_labels_with_punct_at_end,
get_abs_path,
load_labels,
)
from pynini.examples import plurals
from pynini.lib import pynutil
graph_teen = pynini.invert(pynini.string_file(get_abs_path("data/number/teen.tsv"))).optimize()
graph_digit = pynini.invert(pynini.string_file(get_abs_path("data/number/digit.tsv"))).optimize()
ties_graph = pynini.invert(pynini.string_file(get_abs_path("data/number/ty.tsv"))).optimize()
year_suffix = load_labels(get_abs_path("data/date/year_suffix.tsv"))
year_suffix.extend(augment_labels_with_punct_at_end(year_suffix))
year_suffix = pynini.string_map(year_suffix).optimize()
def get_ties_graph(deterministic: bool = True):
"""
Returns two digit transducer, e.g.
03 -> o three
12 -> thirteen
20 -> twenty
"""
graph = graph_teen | ties_graph + pynutil.delete("0") | ties_graph + insert_space + graph_digit
if deterministic:
graph = graph | pynini.cross("0", "oh") + insert_space + graph_digit
else:
graph = graph | (pynini.cross("0", "oh") | pynini.cross("0", "zero")) + insert_space + graph_digit
return graph.optimize()
def get_four_digit_year_graph(deterministic: bool = True):
"""
Returns a four digit transducer which is combination of ties/teen or digits
(using hundred instead of thousand format), e.g.
1219 -> twelve nineteen
3900 -> thirty nine hundred
"""
graph_ties = get_ties_graph(deterministic)
graph_with_s = (
(graph_ties + insert_space + graph_ties)
| (graph_teen + insert_space + (ties_graph | pynini.cross("1", "ten")))
) + pynutil.delete("0s")
graph_with_s |= (graph_teen | graph_ties) + insert_space + pynini.cross("00", "hundred") + pynutil.delete("s")
graph_with_s = graph_with_s @ pynini.cdrewrite(
pynini.cross("y", "ies") | pynutil.insert("s"), "", "[EOS]", NEMO_SIGMA
)
graph = graph_ties + insert_space + graph_ties
graph |= (graph_teen | graph_ties) + insert_space + pynini.cross("00", "hundred")
thousand_graph = (
graph_digit
+ insert_space
+ pynini.cross("00", "thousand")
+ (pynutil.delete("0") | insert_space + graph_digit)
)
thousand_graph |= (
graph_digit
+ insert_space
+ pynini.cross("000", "thousand")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynini.accep("s")
)
graph |= graph_with_s
if deterministic:
graph = plurals._priority_union(thousand_graph, graph, NEMO_SIGMA)
else:
graph |= thousand_graph
return graph.optimize()
def _get_two_digit_year_with_s_graph():
# to handle '70s -> seventies
graph = (
pynini.closure(pynutil.delete("'"), 0, 1)
+ pynini.compose(
ties_graph + pynutil.delete("0s"), pynini.cdrewrite(pynini.cross("y", "ies"), "", "[EOS]", NEMO_SIGMA)
)
).optimize()
return graph
def _get_year_graph(cardinal_graph, deterministic: bool = True):
"""
Transducer for year, only from 1000 - 2999 e.g.
1290 -> twelve nineteen
2000 - 2009 will be verbalized as two thousand.
Transducer for 3 digit year, e.g. 123-> one twenty three
Transducer for year with suffix
123 A.D., 4200 B.C
"""
graph = get_four_digit_year_graph(deterministic)
graph = (pynini.union("1", "2") + (NEMO_DIGIT ** 3) + pynini.closure(pynini.cross(" s", "s") | "s", 0, 1)) @ graph
graph |= _get_two_digit_year_with_s_graph()
three_digit_year = (NEMO_DIGIT @ cardinal_graph) + insert_space + (NEMO_DIGIT ** 2) @ cardinal_graph
year_with_suffix = (
(get_four_digit_year_graph(deterministic=True) | three_digit_year) + delete_space + insert_space + year_suffix
)
graph |= year_with_suffix
return graph.optimize()
def _get_two_digit_year(cardinal_graph, single_digits_graph):
two_digit_year = NEMO_DIGIT ** (2) @ plurals._priority_union(cardinal_graph, single_digits_graph, NEMO_SIGMA)
return two_digit_year
def _get_financial_period_graph():
# 1H23 -> first half of twenty three
# 3Q22 -> third quarter of twenty two
h_ordinals = pynini.cross('1', 'first') | pynini.cross('2', 'second')
q_ordinals = h_ordinals | pynini.cross('3', 'third') | pynini.cross('4', 'fourth')
h_graph = h_ordinals + pynini.cross('H', ' half')
q_graph = q_ordinals + pynini.cross('Q', ' quarter')
period_graph = h_graph | q_graph
return period_graph
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, e.g.
jan. 5, 2012 -> date { month: "january" day: "five" year: "twenty twelve" preserve_order: true }
jan. 5 -> date { month: "january" day: "five" preserve_order: true }
5 january 2012 -> date { day: "five" month: "january" year: "twenty twelve" preserve_order: true }
2012-01-05 -> date { year: "twenty twelve" month: "january" day: "five" }
2012.01.05 -> date { year: "twenty twelve" month: "january" day: "five" }
2012/01/05 -> date { year: "twenty twelve" month: "january" day: "five" }
2012 -> date { year: "twenty twelve" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool, lm: bool = False):
super().__init__(name="date", kind="classify", deterministic=deterministic)
# january
month_graph = pynini.string_file(get_abs_path("data/date/month_name.tsv")).optimize()
# January, JANUARY
month_graph |= pynini.compose(TO_LOWER + pynini.closure(NEMO_CHAR), month_graph) | pynini.compose(
TO_LOWER ** (2, ...), month_graph
)
# jan
month_abbr_graph = pynini.string_file(get_abs_path("data/date/month_abbr.tsv")).optimize()
# jan, Jan, JAN
month_abbr_graph = (
month_abbr_graph
| pynini.compose(TO_LOWER + pynini.closure(NEMO_LOWER, 1), month_abbr_graph).optimize()
| pynini.compose(TO_LOWER ** (2, ...), month_abbr_graph).optimize()
) + pynini.closure(pynutil.delete("."), 0, 1)
month_graph |= month_abbr_graph.optimize()
month_numbers_labels = pynini.string_file(get_abs_path("data/date/month_number.tsv")).optimize()
cardinal_graph = cardinal.graph_hundred_component_at_least_one_none_zero_digit
year_graph = _get_year_graph(cardinal_graph=cardinal_graph, deterministic=deterministic)
# three_digit_year = (NEMO_DIGIT @ cardinal_graph) + insert_space + (NEMO_DIGIT ** 2) @ cardinal_graph
# year_graph |= three_digit_year
month_graph = pynutil.insert("month: \"") + month_graph + pynutil.insert("\"")
month_numbers_graph = pynutil.insert("month: \"") + month_numbers_labels + pynutil.insert("\"")
endings = ["rd", "th", "st", "nd"]
endings += [x.upper() for x in endings]
endings = pynini.union(*endings)
day_graph = (
pynutil.insert("day: \"")
+ pynini.closure(pynutil.delete("the "), 0, 1)
+ (
((pynini.union("1", "2") + NEMO_DIGIT) | NEMO_DIGIT | (pynini.accep("3") + pynini.union("0", "1")))
+ pynini.closure(pynutil.delete(endings), 0, 1)
)
@ cardinal_graph
+ pynutil.insert("\"")
)
two_digit_year = _get_two_digit_year(
cardinal_graph=cardinal_graph, single_digits_graph=cardinal.single_digits_graph
)
two_digit_year = pynutil.insert("year: \"") + two_digit_year + pynutil.insert("\"")
# if lm:
# two_digit_year = pynini.compose(pynini.difference(NEMO_DIGIT, "0") + NEMO_DIGIT ** (3), two_digit_year)
# year_graph = pynini.compose(pynini.difference(NEMO_DIGIT, "0") + NEMO_DIGIT ** (2), year_graph)
# year_graph |= pynini.compose(pynini.difference(NEMO_DIGIT, "0") + NEMO_DIGIT ** (4, ...), year_graph)
graph_year = pynutil.insert(" year: \"") + pynutil.delete(" ") + year_graph + pynutil.insert("\"")
graph_year |= (
pynutil.insert(" year: \"")
+ pynini.accep(",")
+ pynini.closure(pynini.accep(" "), 0, 1)
+ year_graph
+ pynutil.insert("\"")
)
optional_graph_year = pynini.closure(graph_year, 0, 1)
year_graph = pynutil.insert("year: \"") + year_graph + pynutil.insert("\"")
graph_mdy = month_graph + (
(delete_extra_space + day_graph)
| (pynini.accep(" ") + day_graph)
| graph_year
| (delete_extra_space + day_graph + graph_year)
)
graph_mdy |= (
month_graph
+ pynini.cross("-", " ")
+ day_graph
+ pynini.closure(((pynini.cross("-", " ") + NEMO_SIGMA) @ graph_year), 0, 1)
)
for x in ["-", "/", "."]:
delete_sep = pynutil.delete(x)
graph_mdy |= (
month_numbers_graph
+ delete_sep
+ insert_space
+ pynini.closure(pynutil.delete("0"), 0, 1)
+ day_graph
+ delete_sep
+ insert_space
+ (year_graph | two_digit_year)
)
graph_dmy = day_graph + delete_extra_space + month_graph + optional_graph_year
day_ex_month = (NEMO_DIGIT ** 2 - pynini.project(month_numbers_graph, "input")) @ day_graph
for x in ["-", "/", "."]:
delete_sep = pynutil.delete(x)
graph_dmy |= (
day_ex_month
+ delete_sep
+ insert_space
+ month_numbers_graph
+ delete_sep
+ insert_space
+ (year_graph | two_digit_year)
)
graph_ymd = pynini.accep("")
for x in ["-", "/", "."]:
delete_sep = pynutil.delete(x)
graph_ymd |= (
(year_graph | two_digit_year)
+ delete_sep
+ insert_space
+ month_numbers_graph
+ delete_sep
+ insert_space
+ pynini.closure(pynutil.delete("0"), 0, 1)
+ day_graph
)
final_graph = graph_mdy | graph_dmy
if not deterministic or lm:
final_graph += pynini.closure(pynutil.insert(" preserve_order: true"), 0, 1)
m_sep_d = (
month_numbers_graph
+ pynutil.delete(pynini.union("-", "/"))
+ insert_space
+ pynini.closure(pynutil.delete("0"), 0, 1)
+ day_graph
)
final_graph |= m_sep_d
else:
final_graph += pynutil.insert(" preserve_order: true")
period_fy = pynutil.insert("text: \"") + _get_financial_period_graph() + pynutil.insert("\"")
graph_fy = period_fy + insert_space + two_digit_year
final_graph |= graph_ymd | year_graph | graph_fy
if not deterministic or lm:
ymd_to_mdy_graph = None
ymd_to_dmy_graph = None
mdy_to_dmy_graph = None
md_to_dm_graph = None
for month in [x[0] for x in load_labels(get_abs_path("data/date/month_name.tsv"))]:
for day in [x[0] for x in load_labels(get_abs_path("data/date/day.tsv"))]:
ymd_to_mdy_curr = (
pynutil.insert("month: \"" + month + "\" day: \"" + day + "\" ")
+ pynini.accep('year:')
+ NEMO_SIGMA
+ pynutil.delete(" month: \"" + month + "\" day: \"" + day + "\"")
)
# YY-MM-DD -> MM-DD-YY
ymd_to_mdy_curr = pynini.compose(graph_ymd, ymd_to_mdy_curr)
ymd_to_mdy_graph = (
ymd_to_mdy_curr
if ymd_to_mdy_graph is None
else pynini.union(ymd_to_mdy_curr, ymd_to_mdy_graph)
)
ymd_to_dmy_curr = (
pynutil.insert("day: \"" + day + "\" month: \"" + month + "\" ")
+ pynini.accep('year:')
+ NEMO_SIGMA
+ pynutil.delete(" month: \"" + month + "\" day: \"" + day + "\"")
)
# YY-MM-DD -> MM-DD-YY
ymd_to_dmy_curr = pynini.compose(graph_ymd, ymd_to_dmy_curr).optimize()
ymd_to_dmy_graph = (
ymd_to_dmy_curr
if ymd_to_dmy_graph is None
else pynini.union(ymd_to_dmy_curr, ymd_to_dmy_graph)
)
mdy_to_dmy_curr = (
pynutil.insert("day: \"" + day + "\" month: \"" + month + "\" ")
+ pynutil.delete("month: \"" + month + "\" day: \"" + day + "\" ")
+ pynini.accep('year:')
+ NEMO_SIGMA
).optimize()
# MM-DD-YY -> verbalize as MM-DD-YY (February fourth 1991) or DD-MM-YY (the fourth of February 1991)
mdy_to_dmy_curr = pynini.compose(graph_mdy, mdy_to_dmy_curr).optimize()
mdy_to_dmy_graph = (
mdy_to_dmy_curr
if mdy_to_dmy_graph is None
else pynini.union(mdy_to_dmy_curr, mdy_to_dmy_graph).optimize()
).optimize()
md_to_dm_curr = pynutil.insert("day: \"" + day + "\" month: \"" + month + "\"") + pynutil.delete(
"month: \"" + month + "\" day: \"" + day + "\""
)
md_to_dm_curr = pynini.compose(m_sep_d, md_to_dm_curr).optimize()
md_to_dm_graph = (
md_to_dm_curr
if md_to_dm_graph is None
else pynini.union(md_to_dm_curr, md_to_dm_graph).optimize()
).optimize()
final_graph |= mdy_to_dmy_graph | md_to_dm_graph | ymd_to_mdy_graph | ymd_to_dmy_graph
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
convert_space,
get_abs_path,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from pynini.examples import plurals
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying word. Considers sentence boundary exceptions.
e.g. sleep -> tokens { name: "sleep" }
Args:
punctuation: PunctuationFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, punctuation: GraphFst, deterministic: bool = True):
super().__init__(name="word", kind="classify", deterministic=deterministic)
punct = PunctuationFst().graph
default_graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, punct.project("input")), 1)
symbols_to_exclude = (pynini.union("$", "€", "₩", "£", "¥", "#", "%") | NEMO_DIGIT).optimize()
graph = pynini.closure(pynini.difference(NEMO_NOT_SPACE, symbols_to_exclude), 1)
graph = pynutil.add_weight(graph, MIN_NEG_WEIGHT) | default_graph
# leave phones of format [HH AH0 L OW1] untouched
phoneme_unit = pynini.closure(NEMO_ALPHA, 1) + pynini.closure(NEMO_DIGIT)
phoneme = (
pynini.accep(pynini.escape("["))
+ pynini.closure(phoneme_unit + pynini.accep(" "))
+ phoneme_unit
+ pynini.accep(pynini.escape("]"))
)
# leave IPA phones of format [ˈdoʊv] untouched, single words and sentences with punctuation marks allowed
punct_marks = pynini.union(*punctuation.punct_marks).optimize()
stress = pynini.union("ˈ", "'", "ˌ")
ipa_phoneme_unit = pynini.string_file(get_abs_path("data/whitelist/ipa_symbols.tsv"))
# word in ipa form
ipa_phonemes = (
pynini.closure(stress, 0, 1)
+ pynini.closure(ipa_phoneme_unit, 1)
+ pynini.closure(stress | ipa_phoneme_unit)
)
# allow sentences of words in IPA format separated with spaces or punct marks
delim = (punct_marks | pynini.accep(" ")) ** (1, ...)
ipa_phonemes = ipa_phonemes + pynini.closure(delim + ipa_phonemes) + pynini.closure(delim, 0, 1)
ipa_phonemes = (pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]"))).optimize()
if not deterministic:
phoneme = (
pynini.accep(pynini.escape("["))
+ pynini.closure(pynini.accep(" "), 0, 1)
+ pynini.closure(phoneme_unit + pynini.accep(" "))
+ phoneme_unit
+ pynini.closure(pynini.accep(" "), 0, 1)
+ pynini.accep(pynini.escape("]"))
).optimize()
ipa_phonemes = (
pynini.accep(pynini.escape("[")) + ipa_phonemes + pynini.accep(pynini.escape("]"))
).optimize()
phoneme |= ipa_phonemes
self.graph = plurals._priority_union(convert_space(phoneme.optimize()), graph, NEMO_SIGMA)
self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.en.taggers.date import DateFst
from nemo_text_processing.text_normalization.en.taggers.decimal import DecimalFst
from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.en.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.en.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.en.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.en.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.taggers.range import RangeFst as RangeFst
from nemo_text_processing.text_normalization.en.taggers.roman import RomanFst
from nemo_text_processing.text_normalization.en.taggers.serial import SerialFst
from nemo_text_processing.text_normalization.en.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.en.taggers.time import TimeFst
from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.verbalizers.cardinal import CardinalFst as vCardinal
from nemo_text_processing.text_normalization.en.verbalizers.date import DateFst as vDate
from nemo_text_processing.text_normalization.en.verbalizers.decimal import DecimalFst as vDecimal
from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst as vElectronic
from nemo_text_processing.text_normalization.en.verbalizers.fraction import FractionFst as vFraction
from nemo_text_processing.text_normalization.en.verbalizers.measure import MeasureFst as vMeasure
from nemo_text_processing.text_normalization.en.verbalizers.money import MoneyFst as vMoney
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst as vOrdinal
from nemo_text_processing.text_normalization.en.verbalizers.roman import RomanFst as vRoman
from nemo_text_processing.text_normalization.en.verbalizers.telephone import TelephoneFst as vTelephone
from nemo_text_processing.text_normalization.en.verbalizers.time import TimeFst as vTime
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst as vWord
from pynini.examples import plurals
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = True,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != 'None':
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_en_tn_{deterministic}_deterministic{whitelist_file}_lm.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode='r')['tokenize_and_classify']
no_digits = pynini.closure(pynini.difference(NEMO_CHAR, NEMO_DIGIT))
self.fst_no_digits = pynini.compose(self.fst, no_digits).optimize()
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f'Creating ClassifyFst grammars. This might take some time...')
# TAGGERS
cardinal = CardinalFst(deterministic=True, lm=True)
cardinal_tagger = cardinal
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal=cardinal, deterministic=True)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal=cardinal, deterministic=True)
decimal_graph = decimal.fst
fraction = FractionFst(deterministic=True, cardinal=cardinal)
fraction_graph = fraction.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=True)
measure_graph = measure.fst
date = DateFst(cardinal=cardinal, deterministic=True, lm=True)
date_graph = date.fst
punctuation = PunctuationFst(deterministic=True)
punct_graph = punctuation.graph
word_graph = WordFst(punctuation=punctuation, deterministic=deterministic).graph
time_graph = TimeFst(cardinal=cardinal, deterministic=True).fst
telephone_graph = TelephoneFst(deterministic=True).fst
electronic_graph = ElectronicFst(cardinal=cardinal, deterministic=True).fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=False).fst
whitelist = WhiteListFst(input_case=input_case, deterministic=False, input_file=whitelist)
whitelist_graph = whitelist.graph
serial_graph = SerialFst(cardinal=cardinal, ordinal=ordinal, deterministic=deterministic, lm=True).fst
# VERBALIZERS
cardinal = vCardinal(deterministic=True)
v_cardinal_graph = cardinal.fst
decimal = vDecimal(cardinal=cardinal, deterministic=True)
v_decimal_graph = decimal.fst
ordinal = vOrdinal(deterministic=True)
v_ordinal_graph = ordinal.fst
fraction = vFraction(deterministic=True, lm=True)
v_fraction_graph = fraction.fst
v_telephone_graph = vTelephone(deterministic=True).fst
v_electronic_graph = vElectronic(deterministic=True).fst
measure = vMeasure(decimal=decimal, cardinal=cardinal, fraction=fraction, deterministic=False)
v_measure_graph = measure.fst
v_time_graph = vTime(deterministic=True).fst
v_date_graph = vDate(ordinal=ordinal, deterministic=deterministic, lm=True).fst
v_money_graph = vMoney(decimal=decimal, deterministic=deterministic).fst
v_roman_graph = vRoman(deterministic=deterministic).fst
v_word_graph = vWord(deterministic=deterministic).fst
cardinal_or_date_final = plurals._priority_union(date_graph, cardinal_graph, NEMO_SIGMA)
cardinal_or_date_final = pynini.compose(cardinal_or_date_final, (v_cardinal_graph | v_date_graph))
time_final = pynini.compose(time_graph, v_time_graph)
ordinal_final = pynini.compose(ordinal_graph, v_ordinal_graph)
sem_w = 1
word_w = 100
punct_w = 2
classify_and_verbalize = (
pynutil.add_weight(time_final, sem_w)
| pynutil.add_weight(pynini.compose(decimal_graph, v_decimal_graph), sem_w)
| pynutil.add_weight(pynini.compose(measure_graph, v_measure_graph), sem_w)
| pynutil.add_weight(ordinal_final, sem_w)
| pynutil.add_weight(pynini.compose(telephone_graph, v_telephone_graph), sem_w)
| pynutil.add_weight(pynini.compose(electronic_graph, v_electronic_graph), sem_w)
| pynutil.add_weight(pynini.compose(fraction_graph, v_fraction_graph), sem_w)
| pynutil.add_weight(pynini.compose(money_graph, v_money_graph), sem_w)
| pynutil.add_weight(cardinal_or_date_final, sem_w)
| pynutil.add_weight(whitelist_graph, sem_w)
| pynutil.add_weight(
pynini.compose(serial_graph, v_word_graph), 1.1001
) # should be higher than the rest of the classes
).optimize()
roman_graph = RomanFst(deterministic=deterministic, lm=True).fst
# the weight matches the word_graph weight for "I" cases in long sentences with multiple semiotic tokens
classify_and_verbalize |= pynutil.add_weight(pynini.compose(roman_graph, v_roman_graph), sem_w)
date_final = pynini.compose(date_graph, v_date_graph)
range_graph = RangeFst(
time=time_final, cardinal=cardinal_tagger, date=date_final, deterministic=deterministic
).fst
classify_and_verbalize |= pynutil.add_weight(pynini.compose(range_graph, v_word_graph), sem_w)
classify_and_verbalize = pynutil.insert("< ") + classify_and_verbalize + pynutil.insert(" >")
classify_and_verbalize |= pynutil.add_weight(word_graph, word_w)
punct_only = pynutil.add_weight(punct_graph, weight=punct_w)
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct_only),
1,
)
def get_token_sem_graph(classify_and_verbalize):
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" "))
+ classify_and_verbalize
+ pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph |= punct_only + pynini.closure(punct)
graph = delete_space + graph + delete_space
remove_extra_spaces = pynini.closure(NEMO_NOT_SPACE, 1) + pynini.closure(
delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1)
)
remove_extra_spaces |= (
pynini.closure(pynutil.delete(" "), 1)
+ pynini.closure(NEMO_NOT_SPACE, 1)
+ pynini.closure(delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1))
)
graph = pynini.compose(graph.optimize(), remove_extra_spaces).optimize()
return graph
self.fst = get_token_sem_graph(classify_and_verbalize)
no_digits = pynini.closure(pynini.difference(NEMO_CHAR, NEMO_DIGIT))
self.fst_no_digits = pynini.compose(self.fst, no_digits).optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f'ClassifyFst grammars are saved to {far_file}.')
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/tokenize_and_classify_lm.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "twelve" minutes: "thirty" suffix: "a m" zone: "e s t" } -> twelve thirty a m e s t
time { hours: "twelve" } -> twelve o'clock
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="time", kind="verbalize", deterministic=deterministic)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
suffix = (
pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_suffix = pynini.closure(delete_space + insert_space + suffix, 0, 1)
zone = (
pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(delete_space + insert_space + zone, 0, 1)
second = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph_hms = (
hour
+ pynutil.insert(" hours ")
+ delete_space
+ minute
+ pynutil.insert(" minutes and ")
+ delete_space
+ second
+ pynutil.insert(" seconds")
+ optional_suffix
+ optional_zone
)
graph_hms @= pynini.cdrewrite(
pynutil.delete("o ")
| pynini.cross("one minutes", "one minute")
| pynini.cross("one seconds", "one second")
| pynini.cross("one hours", "one hour"),
pynini.union(" ", "[BOS]"),
"",
NEMO_SIGMA,
)
graph = hour + delete_space + insert_space + minute + optional_suffix + optional_zone
graph |= hour + insert_space + pynutil.insert("o'clock") + optional_zone
graph |= hour + delete_space + insert_space + suffix + optional_zone
graph |= graph_hms
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst
from pynini.lib import pynutil
class RomanFst(GraphFst):
"""
Finite state transducer for verbalizing roman numerals
e.g. tokens { roman { integer: "one" } } -> one
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="roman", kind="verbalize", deterministic=deterministic)
suffix = OrdinalFst().suffix
cardinal = pynini.closure(NEMO_NOT_QUOTE)
ordinal = pynini.compose(cardinal, suffix)
graph = (
pynutil.delete("key_cardinal: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ pynini.accep(" ")
+ pynutil.delete("integer: \"")
+ cardinal
+ pynutil.delete("\"")
).optimize()
graph |= (
pynutil.delete("default_cardinal: \"default\" integer: \"") + cardinal + pynutil.delete("\"")
).optimize()
graph |= (
pynutil.delete("default_ordinal: \"default\" integer: \"") + ordinal + pynutil.delete("\"")
).optimize()
graph |= (
pynutil.delete("key_the_ordinal: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ pynini.accep(" ")
+ pynutil.delete("integer: \"")
+ pynini.closure(pynutil.insert("the "), 0, 1)
+ ordinal
+ pynutil.delete("\"")
).optimize()
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/roman.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { negative: "true" cardinal { integer: "twelve" } units: "kilograms" } -> minus twelve kilograms
measure { decimal { integer_part: "twelve" fractional_part: "five" } units: "kilograms" } -> twelve point five kilograms
tokens { measure { units: "covid" decimal { integer_part: "nineteen" fractional_part: "five" } } } -> covid nineteen point five
Args:
decimal: DecimalFst
cardinal: CardinalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
optional_sign = cardinal.optional_sign
unit = (
pynutil.delete("units: \"")
+ pynini.difference(pynini.closure(NEMO_NOT_QUOTE, 1), pynini.union("address", "math"))
+ pynutil.delete("\"")
+ delete_space
)
if not deterministic:
unit |= pynini.compose(unit, pynini.cross(pynini.union("inch", "inches"), "\""))
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
if not deterministic:
graph_decimal |= pynini.compose(
graph_decimal,
NEMO_SIGMA
+ (
pynini.cross(" point five", " and a half")
| pynini.cross("zero point five", "half")
| pynini.cross(" point two five", " and a quarter")
| pynini.cross("zero point two five", "quarter")
),
).optimize()
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_fraction = (
pynutil.delete("fraction {") + delete_space + fraction.graph + delete_space + pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal | graph_fraction) + delete_space + insert_space + unit
# SH adds "preserve_order: true" by default
preserve_order = pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
graph |= unit + insert_space + (graph_cardinal | graph_decimal) + delete_space + pynini.closure(preserve_order)
# for only unit
graph |= (
pynutil.delete("cardinal { integer: \"-\"")
+ delete_space
+ pynutil.delete("}")
+ delete_space
+ unit
+ pynini.closure(preserve_order)
)
address = (
pynutil.delete("units: \"address\" ")
+ delete_space
+ graph_cardinal
+ delete_space
+ pynini.closure(preserve_order)
)
math = (
pynutil.delete("units: \"math\" ")
+ delete_space
+ graph_cardinal
+ delete_space
+ pynini.closure(preserve_order)
)
graph |= address | math
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, GraphFst, insert_space
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst
from pynini.examples import plurals
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. tokens { fraction { integer: "twenty three" numerator: "four" denominator: "five" } } ->
twenty three and four fifth
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, lm: bool = False):
super().__init__(name="fraction", kind="verbalize", deterministic=deterministic)
suffix = OrdinalFst().suffix
integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\" ")
denominator_one = pynini.cross("denominator: \"one\"", "over one")
denominator_half = pynini.cross("denominator: \"two\"", "half")
denominator_quarter = pynini.cross("denominator: \"four\"", "quarter")
denominator_rest = (
pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE) @ suffix + pynutil.delete("\"")
)
denominators = plurals._priority_union(
denominator_one,
plurals._priority_union(
denominator_half,
plurals._priority_union(denominator_quarter, denominator_rest, NEMO_SIGMA),
NEMO_SIGMA,
),
NEMO_SIGMA,
).optimize()
if not deterministic:
denominators |= pynutil.delete("denominator: \"") + (pynini.accep("four") @ suffix) + pynutil.delete("\"")
numerator_one = pynutil.delete("numerator: \"") + pynini.accep("one") + pynutil.delete("\" ")
numerator_one = numerator_one + insert_space + denominators
numerator_rest = (
pynutil.delete("numerator: \"")
+ (pynini.closure(NEMO_NOT_QUOTE) - pynini.accep("one"))
+ pynutil.delete("\" ")
)
numerator_rest = numerator_rest + insert_space + denominators
numerator_rest @= pynini.cdrewrite(
plurals._priority_union(pynini.cross("half", "halves"), pynutil.insert("s"), NEMO_SIGMA),
"",
"[EOS]",
NEMO_SIGMA,
)
graph = numerator_one | numerator_rest
conjunction = pynutil.insert("and ")
if not deterministic and not lm:
conjunction = pynini.closure(conjunction, 0, 1)
integer = pynini.closure(integer + insert_space + conjunction, 0, 1)
graph = integer + graph
graph @= pynini.cdrewrite(
pynini.cross("and one half", "and a half") | pynini.cross("over ones", "over one"), "", "[EOS]", NEMO_SIGMA
)
self.graph = graph
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone numbers, e.g.
telephone { country_code: "one" number_part: "one two three, one two three, five six seven eight" extension: "one" }
-> one, one two three, one two three, five six seven eight, one
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="verbalize", deterministic=deterministic)
optional_country_code = pynini.closure(
pynutil.delete("country_code: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ delete_space
+ insert_space,
0,
1,
)
number_part = (
pynutil.delete("number_part: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynini.closure(pynutil.add_weight(pynutil.delete(" "), -0.0001), 0, 1)
+ pynutil.delete("\"")
)
optional_extension = pynini.closure(
delete_space
+ insert_space
+ pynutil.delete("extension: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\""),
0,
1,
)
graph = optional_country_code + number_part + optional_extension
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, GraphFst, delete_space
from nemo_text_processing.text_normalization.en.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "thirteen" } } -> thirteenth
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
graph_digit = pynini.string_file(get_abs_path("data/ordinal/digit.tsv")).invert()
graph_teens = pynini.string_file(get_abs_path("data/ordinal/teen.tsv")).invert()
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
convert_rest = pynutil.insert("th")
suffix = pynini.cdrewrite(
graph_digit | graph_teens | pynini.cross("ty", "tieth") | convert_rest, "", "[EOS]", NEMO_SIGMA,
).optimize()
self.graph = pynini.compose(graph, suffix)
self.suffix = suffix
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class AbbreviationFst(GraphFst):
"""
Finite state transducer for verbalizing abbreviations
e.g. tokens { abbreviation { value: "A B C" } } -> "ABC"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="abbreviation", kind="verbalize", deterministic=deterministic)
graph = pynutil.delete("value: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/abbreviation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.abbreviation import AbbreviationFst
from nemo_text_processing.text_normalization.en.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.en.verbalizers.date import DateFst
from nemo_text_processing.text_normalization.en.verbalizers.decimal import DecimalFst
from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.en.verbalizers.fraction import FractionFst
from nemo_text_processing.text_normalization.en.verbalizers.measure import MeasureFst
from nemo_text_processing.text_normalization.en.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.en.verbalizers.roman import RomanFst
from nemo_text_processing.text_normalization.en.verbalizers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.en.verbalizers.time import TimeFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
decimal = DecimalFst(cardinal=cardinal, deterministic=deterministic)
decimal_graph = decimal.fst
ordinal = OrdinalFst(deterministic=deterministic)
ordinal_graph = ordinal.fst
fraction = FractionFst(deterministic=deterministic)
fraction_graph = fraction.fst
telephone_graph = TelephoneFst(deterministic=deterministic).fst
electronic_graph = ElectronicFst(deterministic=deterministic).fst
measure = MeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
time_graph = TimeFst(deterministic=deterministic).fst
date_graph = DateFst(ordinal=ordinal, deterministic=deterministic).fst
money_graph = MoneyFst(decimal=decimal, deterministic=deterministic).fst
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
graph = (
time_graph
| date_graph
| money_graph
| measure_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| telephone_graph
| electronic_graph
| fraction_graph
| whitelist_graph
)
roman_graph = RomanFst(deterministic=deterministic).fst
graph |= roman_graph
if not deterministic:
abbreviation_graph = AbbreviationFst(deterministic=deterministic).fst
graph |= abbreviation_graph
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "misses" } } -> misses
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="whitelist", kind="verbalize", deterministic=deterministic)
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "twelve" minutes: "thirty" } } tokens { name: "now" } tokens { name: "." } -> its twelve thirty now .
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"en_tn_{deterministic}_deterministic_verbalizer.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["verbalize"]
logging.info(f'VerbalizeFinalFst graph was restored from {far_file}.')
else:
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
if deterministic:
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
else:
graph = delete_space + types + delete_space
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"verbalize": self.fst})
logging.info(f"VerbalizeFinalFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "twelve" fractional_part: "five o o six" quantity: "billion" } -> minus twelve point five o o six billion
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
self.optional_sign = pynini.cross("negative: \"true\"", "minus ")
if not deterministic:
self.optional_sign |= pynini.cross("negative: \"true\"", "negative ")
self.optional_sign = pynini.closure(self.optional_sign + delete_space, 0, 1)
self.integer = pynutil.delete("integer_part:") + cardinal.integer
self.optional_integer = pynini.closure(self.integer + delete_space + insert_space, 0, 1)
self.fractional_default = (
pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.fractional = pynutil.insert("point ") + self.fractional_default
self.quantity = (
delete_space
+ insert_space
+ pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.optional_quantity = pynini.closure(self.quantity, 0, 1)
graph = self.optional_sign + (
self.integer
| (self.integer + self.quantity)
| (self.optional_integer + self.fractional + self.optional_quantity)
)
self.numbers = graph
delete_tokens = self.delete_tokens(graph)
if not deterministic:
delete_tokens |= pynini.compose(
delete_tokens,
NEMO_SIGMA
+ (
pynini.cross(" point five", " and a half")
| pynini.cross("zero point five", "half")
| pynini.cross(" point two five", " and a quarter")
| pynini.cross("zero point two five", "quarter")
),
).optimize()
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_preserve_order,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "twelve" fractional_part: "o five" currency: "dollars" } -> twelve o five dollars
Args:
decimal: DecimalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
keep_space = pynini.accep(" ")
maj = pynutil.delete("currency_maj: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
min = pynutil.delete("currency_min: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
fractional_part = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
integer_part = decimal.integer
# *** currency_maj
graph_integer = integer_part + keep_space + maj
# *** currency_maj + (***) | ((and) *** current_min)
fractional = fractional_part + delete_extra_space + min
if not deterministic:
fractional |= pynutil.insert("and ") + fractional
graph_integer_with_minor = integer_part + keep_space + maj + keep_space + fractional + delete_preserve_order
# *** point *** currency_maj
graph_decimal = decimal.numbers + keep_space + maj
# *** current_min
graph_minor = fractional_part + delete_extra_space + min + delete_preserve_order
graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor
if not deterministic:
graph |= graph_integer + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal, e.g.
cardinal { negative: "true" integer: "23" } -> minus twenty three
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
self.optional_sign = pynini.cross("negative: \"true\"", "minus ")
if not deterministic:
self.optional_sign |= pynini.cross("negative: \"true\"", "negative ")
self.optional_sign |= pynini.cross("negative: \"true\"", "dash ")
self.optional_sign = pynini.closure(self.optional_sign + delete_space, 0, 1)
integer = pynini.closure(NEMO_NOT_QUOTE)
self.integer = delete_space + pynutil.delete("\"") + integer + pynutil.delete("\"")
integer = pynutil.delete("integer:") + self.integer
self.numbers = self.optional_sign + integer
delete_tokens = self.delete_tokens(self.numbers)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_CHAR,
NEMO_LOWER,
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
TO_LOWER,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import get_abs_path
from pynini.examples import plurals
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> c d f one at a b c dot e d u
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="verbalize", deterministic=deterministic)
graph_digit_no_zero = pynini.invert(pynini.string_file(get_abs_path("data/number/digit.tsv"))).optimize()
graph_zero = pynini.cross("0", "zero")
long_numbers = pynutil.add_weight(graph_digit_no_zero + pynini.cross("000", " thousand"), MIN_NEG_WEIGHT)
if not deterministic:
graph_zero |= pynini.cross("0", "o") | pynini.cross("0", "oh")
graph_digit = graph_digit_no_zero | graph_zero
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbol.tsv")).optimize()
NEMO_NOT_BRACKET = pynini.difference(NEMO_CHAR, pynini.union("{", "}")).optimize()
dict_words = pynini.project(pynini.string_file(get_abs_path("data/electronic/words.tsv")), "output")
default_chars_symbols = pynini.cdrewrite(
pynutil.insert(" ") + (graph_symbols | graph_digit | long_numbers) + pynutil.insert(" "),
"",
"",
NEMO_SIGMA,
)
default_chars_symbols = pynini.compose(
pynini.closure(NEMO_NOT_BRACKET), default_chars_symbols.optimize()
).optimize()
# this is far cases when user name was split by dictionary words, i.e. "[email protected]" -> "service part"
space_separated_dict_words = pynutil.add_weight(
NEMO_ALPHA
+ pynini.closure(NEMO_ALPHA | NEMO_SPACE)
+ NEMO_SPACE
+ pynini.closure(NEMO_ALPHA | NEMO_SPACE),
MIN_NEG_WEIGHT,
)
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ (default_chars_symbols | space_separated_dict_words).optimize()
+ pynutil.delete("\"")
)
domain_common = pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
# this will be used for a safe fallback
domain_all = pynini.compose(
default_chars_symbols,
pynini.closure(TO_LOWER | NEMO_LOWER | NEMO_SPACE | pynutil.add_weight(dict_words, MIN_NEG_WEIGHT)),
)
domain = (
domain_all
+ insert_space
+ plurals._priority_union(
domain_common, pynutil.add_weight(pynini.cross(".", "dot"), weight=0.0001), NEMO_SIGMA
)
+ pynini.closure(insert_space + default_chars_symbols, 0, 1)
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ (domain | pynutil.add_weight(domain_all, weight=100)).optimize()
+ delete_space
+ pynutil.delete("\"")
).optimize()
protocol = pynutil.delete("protocol: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
graph = (
pynini.closure(protocol + delete_space, 0, 1)
+ pynini.closure(user_name + delete_space + pynutil.insert(" at ") + delete_space, 0, 1)
+ domain
+ delete_space
).optimize() @ pynini.cdrewrite(delete_extra_space, "", "", NEMO_SIGMA)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
MIN_NEG_WEIGHT,
NEMO_ALPHA,
NEMO_CHAR,
NEMO_SIGMA,
NEMO_SPACE,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from pynini.lib import pynutil
class PostProcessingFst:
"""
Finite state transducer that post-processing an entire sentence after verbalization is complete, e.g.
removes extra spaces around punctuation marks " ( one hundred and twenty three ) " -> "(one hundred and twenty three)"
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, cache_dir: str = None, overwrite_cache: bool = False):
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, "en_tn_post_processing.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["post_process_graph"]
logging.info(f'Post processing graph was restored from {far_file}.')
else:
self.set_punct_dict()
self.fst = self.get_punct_postprocess_graph()
if far_file:
generator_main(far_file, {"post_process_graph": self.fst})
def set_punct_dict(self):
self.punct_marks = {
"'": [
"'",
'´',
'ʹ',
'ʻ',
'ʼ',
'ʽ',
'ʾ',
'ˈ',
'ˊ',
'ˋ',
'˴',
'ʹ',
'΄',
'՚',
'՝',
'י',
'׳',
'ߴ',
'ߵ',
'ᑊ',
'ᛌ',
'᾽',
'᾿',
'`',
'´',
'῾',
'‘',
'’',
'‛',
'′',
'‵',
'ꞌ',
''',
'`',
'𖽑',
'𖽒',
],
}
def get_punct_postprocess_graph(self):
"""
Returns graph to post process punctuation marks.
{``} quotes are converted to {"}. Note, if there are spaces around single quote {'}, they will be kept.
By default, a space is added after a punctuation mark, and spaces are removed before punctuation marks.
"""
punct_marks_all = PunctuationFst().punct_marks
# no_space_before_punct assume no space before them
quotes = ["'", "\"", "``", "«"]
dashes = ["-", "—"]
brackets = ["<", "{", "("]
open_close_single_quotes = [
("`", "`"),
]
open_close_double_quotes = [('"', '"'), ("``", "``"), ("“", "”")]
open_close_symbols = open_close_single_quotes + open_close_double_quotes
allow_space_before_punct = ["&"] + quotes + dashes + brackets + [k[0] for k in open_close_symbols]
no_space_before_punct = [m for m in punct_marks_all if m not in allow_space_before_punct]
no_space_before_punct = pynini.union(*no_space_before_punct)
no_space_after_punct = pynini.union(*brackets)
delete_space = pynutil.delete(" ")
delete_space_optional = pynini.closure(delete_space, 0, 1)
# non_punct allows space
# delete space before no_space_before_punct marks, if present
non_punct = pynini.difference(NEMO_CHAR, no_space_before_punct).optimize()
graph = (
pynini.closure(non_punct)
+ pynini.closure(
no_space_before_punct | pynutil.add_weight(delete_space + no_space_before_punct, MIN_NEG_WEIGHT)
)
+ pynini.closure(non_punct)
)
graph = pynini.closure(graph).optimize()
graph = pynini.compose(
graph, pynini.cdrewrite(pynini.cross("``", '"'), "", "", NEMO_SIGMA).optimize()
).optimize()
# remove space after no_space_after_punct (even if there are no matching closing brackets)
no_space_after_punct = pynini.cdrewrite(delete_space, no_space_after_punct, NEMO_SIGMA, NEMO_SIGMA).optimize()
graph = pynini.compose(graph, no_space_after_punct).optimize()
# remove space around text in quotes
single_quote = pynutil.add_weight(pynini.accep("`"), MIN_NEG_WEIGHT)
double_quotes = pynutil.add_weight(pynini.accep('"'), MIN_NEG_WEIGHT)
quotes_graph = (
single_quote + delete_space_optional + NEMO_ALPHA + NEMO_SIGMA + delete_space_optional + single_quote
).optimize()
# this is to make sure multiple quotes are tagged from right to left without skipping any quotes in the left
not_alpha = pynini.difference(NEMO_CHAR, NEMO_ALPHA).optimize() | pynutil.add_weight(
NEMO_SPACE, MIN_NEG_WEIGHT
)
end = pynini.closure(pynutil.add_weight(not_alpha, MIN_NEG_WEIGHT))
quotes_graph |= (
double_quotes
+ delete_space_optional
+ NEMO_ALPHA
+ NEMO_SIGMA
+ delete_space_optional
+ double_quotes
+ end
)
quotes_graph = pynutil.add_weight(quotes_graph, MIN_NEG_WEIGHT)
quotes_graph = NEMO_SIGMA + pynini.closure(NEMO_SIGMA + quotes_graph + NEMO_SIGMA)
graph = pynini.compose(graph, quotes_graph).optimize()
# remove space between a word and a single quote followed by s
remove_space_around_single_quote = pynini.cdrewrite(
delete_space_optional + pynini.union(*self.punct_marks["'"]) + delete_space,
NEMO_ALPHA,
pynini.union("s ", "s[EOS]"),
NEMO_SIGMA,
)
graph = pynini.compose(graph, remove_space_around_single_quote).optimize()
return graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/post_processing.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.examples import plurals
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { month: "february" day: "five" year: "twenty twelve" preserve_order: true } -> february fifth twenty twelve
date { day: "five" month: "february" year: "twenty twelve" preserve_order: true } -> the fifth of february twenty twelve
Args:
ordinal: OrdinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, ordinal: GraphFst, deterministic: bool = True, lm: bool = False):
super().__init__(name="date", kind="verbalize", deterministic=deterministic)
phrase = pynini.closure(NEMO_NOT_QUOTE, 1)
day_cardinal = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
day = day_cardinal @ ordinal.suffix
period = pynutil.delete("text:") + delete_space + pynutil.delete("\"") + phrase + pynutil.delete("\"")
month = pynutil.delete("month:") + delete_space + pynutil.delete("\"") + phrase + pynutil.delete("\"")
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete("\"")
)
# financial period
graph_fy = (
pynutil.insert("the ") + period + pynutil.insert(" of ") + pynini.closure(delete_extra_space + year, 0, 1)
)
# month (day) year
graph_mdy = (
month + pynini.closure(delete_extra_space + day, 0, 1) + pynini.closure(delete_extra_space + year, 0, 1)
)
# may 5 -> may five
if not deterministic and not lm:
graph_mdy |= (
month
+ pynini.closure(delete_extra_space + day_cardinal, 0, 1)
+ pynini.closure(delete_extra_space + year, 0, 1)
)
# day month year
graph_dmy = (
pynutil.insert("the ")
+ day
+ delete_extra_space
+ pynutil.insert("of ")
+ month
+ pynini.closure(delete_extra_space + year, 0, 1)
)
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space
)
final_graph = (
(plurals._priority_union(graph_mdy, pynutil.add_weight(graph_dmy, 0.0001), NEMO_SIGMA) | year | graph_fy)
+ delete_space
+ optional_preserve_order
)
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing word
e.g. tokens { name: "sleep" } -> sleep
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="word", kind="verbalize", deterministic=deterministic)
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/whitelist/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/ordinal/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/date/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/electronic/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/telephone/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/time/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/number/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/money/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/address/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/measure/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/data/roman/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import string
from argparse import ArgumentParser
from typing import List
import pynini
from pynini import Far
"""
This files takes 1. Far file containing a fst graph created by TN or ITN 2. entire string.
Optionally: 3. start position of substring 4. end (exclusive) position of substring
and returns input output mapping of all words in the string bounded by whitespace.
If start and end position specified returns
1. mapped output string 2. start and end indices of mapped substring
Usage:
python alignment.py --fst=<fst file> --text="2615 Forest Av, 1 Aug 2016" --rule=TOKENIZE_AND_CLASSIFY --start=22 --end=26 --grammar=TN
Output:
inp string: |2615 Forest Av, 1 Aug 2016|
out string: |twenty six fifteen Forest Avenue , the first of august twenty sixteen|
inp indices: [22:26]
out indices: [55:69]
in: |2016| out: |twenty sixteen|
python alignment.py --fst=<fst file> --text="2615 Forest Av, 1 Aug 2016" --rule=TOKENIZE_AND_CLASSIFY
Output:
inp string: |2615 Forest Av, 1 Aug 2016|
out string: |twenty six fifteen Forest Avenue , the first of august twenty sixteen|
inp indices: [0:4] out indices: [0:18]
in: |2615| out: |twenty six fifteen|
inp indices: [5:11] out indices: [19:25]
in: |Forest| out: |Forest|
inp indices: [12:15] out indices: [26:34]
in: |Av,| out: |Avenue ,|
inp indices: [16:17] out indices: [39:44]
in: |1| out: |first|
inp indices: [18:21] out indices: [48:54]
in: |Aug| out: |august|
inp indices: [22:26] out indices: [55:69]
in: |2016| out: |twenty sixteen|
Disclaimer: The heuristic algorithm relies on monotonous alignment and can fail in certain situations,
e.g. when word pieces are reordered by the fst:
python alignment.py --fst=<fst file> --text=\"$1\" --rule=\"tokenize_and_classify\" --start=0 --end=1
inp string: |$1|
out string: |one dollar|
inp indices: [0:1] out indices: [0:3]
in: |$| out: |one|
"""
def parse_args():
args = ArgumentParser("map substring to output with FST")
args.add_argument("--fst", help="FAR file containing FST", type=str, required=True)
args.add_argument(
"--grammar", help="tn or itn", type=str, required=False, choices=[ITN_MODE, TN_MODE], default=TN_MODE
)
args.add_argument(
"--rule",
help="rule name in FAR file containing FST",
type=str,
default='tokenize_and_classify',
required=False,
)
args.add_argument(
"--text",
help="input string",
type=str,
default="2615 Forest Av, 90601 CA, Santa Clara. 10kg, 12/16/2018, $123.25. 1 Aug 2016.",
)
args.add_argument("--start", help="start index of substring to be mapped", type=int, required=False)
args.add_argument("--end", help="end index of substring to be mapped", type=int, required=False)
return args.parse_args()
EPS = "<eps>"
WHITE_SPACE = "\u23B5"
ITN_MODE = "itn"
TN_MODE = "tn"
tn_itn_symbols = list(string.ascii_letters + string.digits) + list("$\:+-=")
def get_word_segments(text: str) -> List[List[int]]:
"""
Returns word segments from given text based on white space in form of list of index spans.
"""
spans = []
cur_span = [0]
for idx, ch in enumerate(text):
if len(cur_span) == 0 and ch != " ":
cur_span.append(idx)
elif ch == " ":
cur_span.append(idx)
assert len(cur_span) == 2
spans.append(cur_span)
cur_span = []
elif idx == len(text) - 1:
idx += 1
cur_span.append(idx)
assert len(cur_span) == 2
spans.append(cur_span)
return spans
def create_symbol_table() -> pynini.SymbolTable:
"""
Creates and returns Pynini SymbolTable used to label alignment with ascii instead of integers
"""
table = pynini.SymbolTable()
for num in range(34, 200): # ascii alphanum + letter range
table.add_symbol(chr(num), num)
table.add_symbol(EPS, 0)
table.add_symbol(WHITE_SPACE, 32)
return table
def get_string_alignment(fst: pynini.Fst, input_text: str, symbol_table: pynini.SymbolTable):
"""
create alignment of input text based on shortest path in FST. Symbols used for alignment are from symbol_table
Returns:
output: list of tuples, each mapping input character to output
"""
lattice = pynini.shortestpath(input_text @ fst)
paths = lattice.paths(input_token_type=symbol_table, output_token_type=symbol_table)
ilabels = paths.ilabels()
olabels = paths.olabels()
logging.debug("input: " + paths.istring())
logging.debug("output: " + paths.ostring())
output = list(zip([symbol_table.find(x) for x in ilabels], [symbol_table.find(x) for x in olabels]))
logging.debug(f"alignment: {output}")
paths.next()
assert paths.done()
output_str = "".join(map(remove, [x[1] for x in output]))
return output, output_str
def _get_aligned_index(alignment: List[tuple], index: int):
"""
Given index in contracted input string computes corresponding index in alignment (which has EPS)
"""
aligned_index = 0
idx = 0
while idx < index:
if alignment[aligned_index][0] != EPS:
idx += 1
aligned_index += 1
while alignment[aligned_index][0] == EPS:
aligned_index += 1
return aligned_index
def _get_original_index(alignment, aligned_index):
"""
Given index in aligned output, returns corresponding index in contracted output string
"""
og_index = 0
idx = 0
while idx < aligned_index:
if alignment[idx][1] != EPS:
og_index += 1
idx += 1
return og_index
remove = lambda x: "" if x == EPS else " " if x == WHITE_SPACE else x
def indexed_map_to_output(alignment: List[tuple], start: int, end: int, mode: str):
"""
Given input start and end index of contracted substring return corresponding output start and end index
Args:
alignment: alignment generated by FST with shortestpath, is longer than original string since including eps transitions
start: inclusive start position in input string
end: exclusive end position in input string
mode: grammar type for either tn or itn
Returns:
output_og_start_index: inclusive start position in output string
output_og_end_index: exclusive end position in output string
"""
# get aligned start and end of input substring
aligned_start = _get_aligned_index(alignment, start)
aligned_end = _get_aligned_index(alignment, end - 1) # inclusive
logging.debug(f"0: |{list(map(remove, [x[0] for x in alignment[aligned_start:aligned_end+1]]))}|")
logging.debug(f"1: |{aligned_start}:{aligned_end+1}|")
# extend aligned_start to left
while (
aligned_start - 1 > 0
and alignment[aligned_start - 1][0] == EPS
and (alignment[aligned_start - 1][1] in tn_itn_symbols or alignment[aligned_start - 1][1] == EPS)
):
aligned_start -= 1
while (
aligned_end + 1 < len(alignment)
and alignment[aligned_end + 1][0] == EPS
and (alignment[aligned_end + 1][1] in tn_itn_symbols or alignment[aligned_end + 1][1] == EPS)
):
aligned_end += 1
if mode == TN_MODE:
while (aligned_end + 1) < len(alignment) and (
alignment[aligned_end + 1][1] in tn_itn_symbols or alignment[aligned_end + 1][1] == EPS
):
aligned_end += 1
output_og_start_index = _get_original_index(alignment=alignment, aligned_index=aligned_start)
output_og_end_index = _get_original_index(alignment=alignment, aligned_index=aligned_end + 1)
return output_og_start_index, output_og_end_index
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
args = parse_args()
fst = Far(args.fst, mode='r')
try:
fst = fst[args.rule]
except:
raise ValueError(f"{args.rule} not found. Please specify valid --rule argument.")
input_text = args.text
table = create_symbol_table()
alignment, output_text = get_string_alignment(fst=fst, input_text=input_text, symbol_table=table)
logging.info(f"inp string: |{args.text}|")
logging.info(f"out string: |{output_text}|")
if args.start is None:
indices = get_word_segments(input_text)
else:
indices = [(args.start, args.end)]
for x in indices:
start, end = indexed_map_to_output(start=x[0], end=x[1], alignment=alignment, mode=args.grammar)
logging.info(f"inp indices: [{x[0]}:{x[1]}] out indices: [{start}:{end}]")
logging.info(f"in: |{input_text[x[0]:x[1]]}| out: |{output_text[start:end]}|")
| NeMo-text-processing-main | nemo_text_processing/fst_alignment/alignment.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/fst_alignment/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import math
import re
from typing import List, Union
from nemo_text_processing.hybrid.mlm_scorer import MLMScorer
from tqdm import tqdm
try:
import torch
except ImportError as e:
raise ImportError("torch is not installed")
def init_models(model_name_list: str):
"""
returns dictionary of Masked Language Models by their HuggingFace name.
"""
model_names = model_name_list.split(",")
models = {}
for model_name in model_names:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
models[model_name] = MLMScorer(model_name=model_name, device=device)
return models
def get_score(texts: Union[List[str], str], model: MLMScorer):
"""Computes MLM score for list of text using model"""
try:
if isinstance(texts, str):
texts = [texts]
score = -1 * sum(model.score_sentences(texts)) / len(texts)
except Exception as e:
print(e)
print(f"Scoring error: {texts}")
score = math.inf
return score
def get_masked_score(text, model, do_lower=True):
"""text is normalized prediction which contains <> around semiotic tokens.
If multiple tokens are present, multiple variants of the text are created where all but one ambiguous semiotic tokens are masked
to avoid unwanted reinforcement of neighboring semiotic tokens."""
text = text.lower() if do_lower else text
spans = re.findall("<\s.+?\s>", text)
if len(spans) > 0:
text_with_mask = []
for match in re.finditer("<\s.+?\s>", text):
new_text = (
text[: match.span()[0]] + match.group().replace("< ", "").replace(" >", "") + text[match.span()[1] :]
)
new_text = re.sub("<\s.+?\s>", model.MASK_LABEL, new_text)
text_with_mask.append(new_text)
text = text_with_mask
return get_score(text, model)
def _get_ambiguous_positions(sentences: List[str]):
"""returns None or index list of ambigous semiotic tokens for list of sentences.
E.g. if sentences = ["< street > < three > A", "< saint > < three > A"], it returns [1, 0] since only
the first semiotic span <street>/<saint> is ambiguous."""
l_sets = [set([x]) for x in re.findall("<\s.+?\s>", sentences[0])]
for sentence in sentences[1:]:
spans = re.findall("<\s.+?\s>", sentence)
if len(spans) != len(l_sets):
return None
for i in range(len(spans)):
l_sets[i].add(spans[i])
ambiguous = []
for span in l_sets:
ambiguous.append(len(span) > 1)
return ambiguous
def score_options(sentences: List[str], context_len, model, do_lower=True):
"""return list of scores for each sentence in list where model is used for MLM Scoring."""
scores = []
if context_len is not None:
diffs = [find_diff(s, context_len) for s in sentences]
if len(set([len(d) for d in diffs])) == 1:
sentences = diffs
ambiguous_positions = None
if sentences and isinstance(sentences[0], str):
ambiguous_positions = _get_ambiguous_positions(sentences)
for sent in tqdm(sentences):
if isinstance(sent, list): # in case of set context len
option_scores = [get_masked_score(s, model, do_lower) for s in sent]
logging.debug(sent)
logging.debug(option_scores)
logging.debug("=" * 50)
if any(math.isnan(x) for x in option_scores):
av_score = math.inf
else:
av_score = round(sum(option_scores) / len(option_scores), 4)
scores.append(av_score)
elif isinstance(sent, str): # in case of full context
if ambiguous_positions:
matches = list(re.finditer("<\s.+?\s>", sent))
for match, pos in zip(matches[::-1], ambiguous_positions[::-1]):
if not pos:
sent = (
sent[: match.span()[0]]
+ match.group().replace("< ", "").replace(" >", "")
+ sent[match.span()[1] :]
)
scores.append(round(get_masked_score(sent, model, do_lower=do_lower)))
else:
raise ValueError()
return scores
def find_diff(text, context_len=3):
"""Finds parts of text normalized by WFST and returns them in list with a context of context_len"""
diffs = []
pattern_start = "< "
pattern_end = " >"
def __clean(s):
return s.replace(pattern_start, "").replace(pattern_end, "").replace(" ", " ")
index_start = 0
while pattern_start in text[index_start:]:
index_start = index_start + text[index_start:].index(pattern_start)
offset = index_start
if pattern_end in text[offset:]:
index_end = offset + text[offset:].index(pattern_end) + len(pattern_end)
center = __clean(text[index_start:index_end])
left_context = " ".join(__clean(text[:index_start]).split()[-context_len:])
if len(left_context) > 0 and text[:index_start][-1].isspace():
left_context = left_context + " "
right_context = " ".join(__clean(text[index_end:]).split()[:context_len])
if len(right_context) > 0 and text[index_end][0].isspace():
right_context = " " + right_context
diffs.append(left_context + center + right_context)
index_end += 1
index_start = index_end + 1
else:
break
if len(diffs) == 0:
diffs = [text]
return diffs
| NeMo-text-processing-main | nemo_text_processing/hybrid/model_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import pickle
import re
import shutil
from typing import Dict, List
import model_utils
import pandas as pd
import utils
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Re-scoring")
parser.add_argument("--lang", default="en", type=str, choices=["en"])
parser.add_argument("--n_tagged", default=100, type=int, help="Number WFST options")
parser.add_argument("--context_len", default=-1, type=int, help="Context length, -1 to use full context")
parser.add_argument("--threshold", default=0.2, type=float, help="delta threshold value")
parser.add_argument("--overwrite_cache", action="store_true", help="overwrite cache")
parser.add_argument("--model_name", type=str, default="bert-base-uncased")
parser.add_argument("--cache_dir", default='cache', type=str, help="use cache dir")
parser.add_argument(
"--data",
default="text_normalization_dataset_files/EngConf.txt",
help="For En only. Path to a file for evaluation.",
)
parser.add_argument("--n_jobs", default=-2, type=int, help="The maximum number of concurrently running jobs")
parser.add_argument(
"--models", default="mlm_bert-base-uncased", type=str, help="Comma separated string of model names"
)
parser.add_argument(
"--regenerate_pkl",
action="store_true",
help="Set to True to re-create pickle file with WFST normalization options",
)
parser.add_argument("--batch_size", default=200, type=int, help="Batch size for parallel processing")
def rank(sentences: List[str], labels: List[int], models: Dict[str, 'Model'], context_len=None, do_lower=True):
"""
computes scores for each sentences using all provided models and returns summary in data frame
"""
df = pd.DataFrame({"sent": sentences, "labels": labels})
for model_name, model in models.items():
scores = model_utils.score_options(
sentences=sentences, context_len=context_len, model=model, do_lower=do_lower
)
df[model_name] = scores
return df
def threshold_weights(norm_texts_weights, delta: float = 0.2):
"""
norm_texts_weights: list of [ List[normalized options of input], list[weights] ]
delta: delta to add to minimum weight in options to compose upper limit for threshhold
returns:
filter list of same format as input
"""
# threshold value is factor applied to lowest/first weight of all normalization options for every input
res = []
for i, options_weights in enumerate(norm_texts_weights):
thresh = options_weights[1][0] + delta # minimum weight plus delta
item = [x for x in zip(*options_weights)]
# filters out all options for every input that is larger than threshold
res.append(list(filter(lambda x: x[1] < thresh, item)))
return [list(map(list, zip(*item))) for item in res]
def _get_unchanged_count(text):
"""
returns number of unchanged words in text
"""
exclude = '#$%&<>'
# remove normalized whitelist
text = re.sub(r"\|norm_start\|[^|]+\|norm_end\|", "", text)
# remove raw text boundaries
text = text.replace("|raw_start|", "").replace("|raw_end|", "")
start_pattern = "<"
end_pattern = ">"
text = utils.remove_punctuation(text, remove_spaces=False, do_lower=False, exclude=exclude)
text_clean = ""
for ch in text:
if ch.isalpha() or ch.isspace() or ch in [start_pattern, end_pattern]:
text_clean += ch
else:
text_clean += " " + ch + " "
text = text_clean
unchanged_count = 0
skip = False
for word in text.split():
if start_pattern == word:
skip = True
elif end_pattern == word:
skip = False
elif not skip:
unchanged_count += 1
return unchanged_count
def _get_replacement_count(text):
"""
returns number of token replacements
"""
start_pattern = "<"
end_pattern = ">"
return min(text.count(start_pattern), text.count(end_pattern))
def threshold(norm_texts_weights, unchanged=True, replacement=True):
"""
Reduces the number of WFST options based for LM rescoring.
Args:
:param norm_texts_weights: WFST options with associated weight
:param unchanged: set to True to filter out examples based on number of words left unchanged
(punct is not taken into account)
:param replacement: set to True to filter out examples based on number of replacements made
(Given A and B are WFST options, if the number of unchanged for A and B are the same,
the option with a smaller number of replacements is preferable (i.e., larger span)).
:return: WFST options with associated weight (reduced)
"""
def __apply(norm_texts_weights, f, use_min=True):
inputs_filtered = []
for example in norm_texts_weights:
texts = example[0]
counts = [f(t) for t in texts]
[logging.debug(f"{c} -- {t}") for t, c in zip(texts, counts)]
target_count = min(counts) if use_min else max(counts)
filtered_texts = []
filtered_weights = []
for i, c in enumerate(counts):
if c == target_count:
filtered_texts.append(example[0][i])
filtered_weights.append(example[1][i])
inputs_filtered.append([filtered_texts, filtered_weights])
return inputs_filtered
logging.debug("BASIC THRESHOLDING INPUT:")
[logging.debug(x) for x in norm_texts_weights[0][0]]
if unchanged:
norm_texts_weights = __apply(norm_texts_weights, _get_unchanged_count)
logging.debug("AFTER UNCHANGED FILTER:")
[logging.debug(x) for x in norm_texts_weights[0][0]]
if replacement:
norm_texts_weights = __apply(norm_texts_weights, _get_replacement_count)
logging.debug("AFTER REPLACEMENT FILTER:")
[logging.debug(x) for x in norm_texts_weights[0][0]]
return norm_texts_weights
def main():
args = parser.parse_args()
logging.getLogger().setLevel(logging.INFO)
lang = args.lang
input_f = args.data
if "LibriTTS.json" in args.data:
args.dataset = "libritts"
elif "GoogleTN.json" in args.data:
args.dataset = "google"
else:
args.dataset = None
if not os.path.exists(args.data):
raise FileNotFoundError(f"{args.data} file not found")
print("Create Masked Language Model...")
models = model_utils.init_models(model_name_list=args.model_name)
input_fs = input_f.split(",")
print("LOAD DATA...")
inputs, targets, _, _ = utils.load_data(input_fs)
pre_inputs, pre_targets = utils.clean_pre_norm(dataset=args.dataset, inputs=inputs, targets=targets)
print("INIT WFST...")
normalizer = NormalizerWithAudio(
input_case="cased", lang=lang, cache_dir=args.cache_dir, lm=True, overwrite_cache=args.overwrite_cache
)
print("APPLYING NORMALIZATION RULES...")
p_file = (
f"norm_texts_weights_{args.n_tagged}_{os.path.basename(args.data)}_{args.context_len}_{args.threshold}.pkl"
)
if not os.path.exists(p_file) or args.regenerate_pkl:
print(f"Creating WFST and saving to {p_file}")
def __process_batch(batch_idx, batch, dir_name):
normalized = []
for x in tqdm(batch):
ns, ws = normalizer.normalize(x, n_tagged=args.n_tagged, punct_post_process=False)
ns = [re.sub(r"<(.+?)>", r"< \1 >", x) for x in ns]
normalized.append((ns, ws))
with open(f"{dir_name}/{batch_idx}.p", "wb") as handle:
pickle.dump(normalized, handle, protocol=pickle.HIGHEST_PROTOCOL)
print(f"Batch -- {batch_idx} -- is complete")
return batch_idx
# to save intermediate results to a file
batch = min(len(pre_inputs), args.batch_size)
tmp_dir = f"/tmp/{os.path.basename(args.data)}"
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir, exist_ok=True)
batch_ids = Parallel(n_jobs=args.n_jobs)(
delayed(__process_batch)(idx, pre_inputs[i : i + batch], tmp_dir)
for idx, i in enumerate(range(0, len(pre_inputs), batch))
)
# aggregate all intermediate results
norm_texts_weights = []
for batch_id in batch_ids:
batch_f = f"{tmp_dir}/{batch_id}.p"
norm_texts_weights.extend(pickle.load(open(batch_f, "rb")))
with open(p_file, "wb") as handle:
pickle.dump(norm_texts_weights, handle, protocol=pickle.HIGHEST_PROTOCOL)
else:
print(f"Loading WFST from {p_file}")
norm_texts_weights = pickle.load(open(p_file, "rb"))
print("THRESHOLDING...")
# apply weights threshold to reduce number of options
if args.threshold > 0:
norm_texts_weights = threshold_weights(norm_texts_weights, delta=args.threshold)
logging.debug("AFTER WEIGHTS THRESHOLDING:")
[logging.debug(x) for x in norm_texts_weights[0][0]]
# reduce number of options by selecting options with the smallest number of unchanged words
norm_texts_weights = threshold(norm_texts_weights)
print("POST PROCESSING...")
post_targets, post_norm_texts_weights = utils.clean_post_norm(
dataset=args.dataset, inputs=pre_inputs, targets=pre_targets, norm_texts=norm_texts_weights
)
print("GETTING LABELS...")
labels = utils.get_labels(targets=post_targets, norm_texts_weights=post_norm_texts_weights)
examples_with_no_labels_among_wfst = [i for i, x in enumerate(labels) if 1 not in x]
print("GATHERING STATS...")
model_stats = {m: 0 for m in models}
gt_in_options = 0
for i, example in tqdm(enumerate(zip(post_norm_texts_weights, labels))):
data, curr_labels = example
assert len(data[0]) == len(curr_labels)
df = rank(
sentences=data[0],
labels=curr_labels,
models=models,
context_len=args.context_len if args.context_len is not None and args.context_len >= 0 else None,
do_lower=True,
)
df['sent'] = df['sent'].apply(lambda x: utils.remove_whitelist_boudaries(x))
df["weights"] = data[1]
do_print = False
for model in models:
# one hot vector for predictions, 1 for the best score option
df[f"{model}_pred"] = (df[model] == min(df[model])).astype(int)
# add constrain when multiple correct labels per example
pred_is_correct = min(sum((df["labels"] == df[f"{model}_pred"]) & df["labels"] == 1), 1)
if not pred_is_correct or logging.getLogger().level <= logging.DEBUG:
do_print = True
if do_print:
print(f"{model} prediction is correct: {pred_is_correct == 1}")
model_stats[model] += pred_is_correct
gt_in_options += 1 in curr_labels
if do_print:
print(f"INPUT: {pre_inputs[i]}")
print(f"GT : {post_targets[i]}\n")
utils.print_df(df)
print("-" * 80 + "\n")
if gt_in_options != len(post_norm_texts_weights):
print("WFST options for some examples don't contain the ground truth:")
for i in examples_with_no_labels_among_wfst:
print(f"INPUT: {pre_inputs[i]}")
print(f"GT : {post_targets[i]}\n")
print(f"WFST:")
for x in post_norm_texts_weights[i]:
print(x)
print("=" * 40)
all_correct = True
for model, correct in model_stats.items():
print(
f"{model} -- correct: {correct}/{len(post_norm_texts_weights)} or ({round(correct/len(post_norm_texts_weights) * 100, 2)}%)"
)
all_correct = all_correct and (correct == len(post_norm_texts_weights))
print(f"examples_with_no_labels_among_wfst: {len(examples_with_no_labels_among_wfst)}")
return all_correct
if __name__ == "__main__":
all_correct = main()
print(f"all_correct: {all_correct}")
| NeMo-text-processing-main | nemo_text_processing/hybrid/wfst_lm_rescoring.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/hybrid/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import difflib
import json
import logging
import re
import string
from typing import List, Optional, Tuple, Union
import pandas as pd
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from pynini.lib.rewrite import top_rewrite
from tqdm import tqdm
DELIMITER = '~~'
cardinal_graph = CardinalFst(input_case="cased").graph_no_exception
cardinal_graph = (
pynini.closure(pynini.union("In ", "in ")) + cardinal_graph + pynini.closure(pynini.accep(" ") + cardinal_graph)
)
inverse_normalizer = InverseNormalizer()
def load_data(input_fs: List[str]):
"""
loads data from list of abs file paths
Returns:
inputs: List[str] list of abs file paths
targets: List[List[str]] list of targets, can contain multiple options for each target
sentences: List[List[str]] list of sentence options
labels: List[List[int]] list of labels (1,0)
"""
inputs = []
sentences = []
cur_sentences = []
labels = []
cur_labels = []
for input_f in input_fs:
if input_f.endswith(".json"):
with open(input_f, "r") as f:
for line in f:
line = json.loads(line)
try:
inputs.append(line['text'].strip())
sentences.append([line['gt_normalized'].strip()])
labels.append([1])
except Exception as e:
print(e)
raise ValueError(f"Check format for line {line}")
else:
with open(input_f, "r") as f:
for line in f:
if line != "\n":
try:
sent, label = line.strip().split(DELIMITER)
except Exception as e:
if line.startswith("#"):
continue
print(e)
raise ValueError(f"Check format for line {line}")
if label == "RAW":
inputs.append(sent)
elif label == "1":
cur_sentences.append(sent)
cur_labels.append(1)
elif label == "0":
cur_sentences.append(sent)
cur_labels.append(0)
else:
sentences.append(cur_sentences)
cur_sentences = []
labels.append(cur_labels)
cur_labels = []
if len(cur_sentences) > 0:
sentences.append(cur_sentences)
labels.append(cur_labels)
assert len(inputs) == len(sentences)
targets = [[x for i, x in enumerate(sents) if ls[i]] for (sents, ls) in zip(sentences, labels)]
return inputs, targets, sentences, labels
def remove_whitelist_boudaries(x):
# remove raw whitelist
x = re.sub(r"\|raw_start\|[^|]+\|raw_end\|", "", x)
# remove norm text boundaries
x = x.replace("|norm_start|", "").replace("|norm_end|", "")
return x
def _clean_pre_norm_libritts(inputs: List[str], targets: List[List[str]]):
"""
standardizes format of inputs and targets before being normalized, so more rules apply.
This is specific for libritts.
"""
for i in range(len(targets)):
for j in range(len(targets[i])):
targets[i][j] = clean_libri_tts(targets[i][j])
for i in range(len(inputs)):
for target in targets[i]:
diffs = get_diff(a=inputs[i].lower(), b=target.lower())
for diff in diffs[::-1]:
in_diff = inputs[i][diff[0][0] : diff[0][1]].lower()
tg_diff = target[diff[1][0] : diff[1][1]].lower()
replacement = inputs[i][: diff[0][0]] + tg_diff + inputs[i][diff[0][1] :]
if (in_diff == "s" and tg_diff == "z") or (in_diff == "z" and tg_diff == "s"):
inputs[i] = replacement
elif (in_diff == "re" and tg_diff == "er") or (in_diff == "er" and tg_diff == "re"):
inputs[i] = replacement
elif (in_diff == "me" and tg_diff == "") or (in_diff == "" and tg_diff == "me"):
inputs[i] = replacement
elif (in_diff == "ue" and tg_diff == "") or (in_diff == "" and tg_diff == "ue"):
inputs[i] = replacement
return inputs, targets
def _clean_pre_norm_google(inputs: List[str], targets: List[List[str]]):
"""
standardizes format of inputs and targets before being normalized, so more rules apply.
This is specific for google dataset.
"""
for i in range(len(inputs)):
inputs[i] = re.sub(r"\$\s([0-9]{1,})", r"$\1", inputs[i])
inputs[i] = re.sub(r"\bmr ", r"Mr. ", inputs[i])
inputs[i] = re.sub(r"\bdr ", r"Dr. ", inputs[i])
inputs[i] = re.sub(r"\bdr$", r"Dr.", inputs[i])
inputs[i] = re.sub(r"\bmrs ", r"Mrs. ", inputs[i])
inputs[i] = re.sub(r"\bjr ", r"Jr. ", inputs[i])
inputs[i] = re.sub(r"\bjr$", r"Jr.", inputs[i])
inputs[i] = re.sub(r"\dsr ", r"Sr. ", inputs[i])
inputs[i] = re.sub(r"\dsr$", r"Sr.", inputs[i])
for target in targets[i]:
diffs = get_diff(a=inputs[i].lower(), b=target.lower())
for diff in diffs[::-1]:
in_diff = inputs[i][diff[0][0] : diff[0][1]].lower()
tg_diff = target[diff[1][0] : diff[1][1]].lower()
replacement = inputs[i][: diff[0][0]] + tg_diff + inputs[i][diff[0][1] :]
if (in_diff == "s" and tg_diff == "z") or (in_diff == "z" and tg_diff == "s"):
inputs[i] = replacement
elif (in_diff == "re" and tg_diff == "er") or (in_diff == "er" and tg_diff == "re"):
inputs[i] = replacement
elif (in_diff == "me" and tg_diff == "") or (in_diff == "" and tg_diff == "me"):
inputs[i] = replacement
elif (in_diff == "" and tg_diff == "u") or (in_diff == "u" and tg_diff == ""):
inputs[i] = replacement
elif (in_diff == "ue" and tg_diff == "") or (in_diff == "" and tg_diff == "ue"):
inputs[i] = replacement
elif re.sub(r"\.", "", in_diff) == re.sub(r"( |\.)", "", tg_diff):
inputs[i] = replacement
return inputs, targets
def clean_pre_norm(inputs: List[str], targets: List[List[str]], dataset: Optional[str] = None):
"""
standardizes format of inputs and targets before being normalized, so more rules apply.
"""
# deep copy
pre_inputs = copy.deepcopy(inputs)
pre_targets = copy.deepcopy(targets)
# --- data specific pre cleaning ---
if dataset == "libritts":
pre_inputs, pre_targets = _clean_pre_norm_libritts(inputs=pre_inputs, targets=pre_targets)
elif dataset == "google":
pre_inputs, pre_targets = _clean_pre_norm_google(inputs=pre_inputs, targets=pre_targets)
else:
pass
# --- general pre cleaning ---
for i in range(len(pre_inputs)):
pre_inputs[i] = re.sub("librivox.org", "librivox dot org", pre_inputs[i])
pre_inputs[i] = re.sub(
rf"([0-9]?[0-9](\.|:)[0-9][0-9]\s?)(a|A|p|P)(\.?)\s(M|m)(\.?)", rf"\1\3\4\5\6", pre_inputs[i]
)
# pre_inputs[i] =re.sub(rf"\b(S|s)t\.", rf"saint", pre_inputs[i])
return pre_inputs, pre_targets
def _clean_post_norm_libritts(inputs: List[str], targets: List[List[str]], norm_texts):
return targets, norm_texts
def _clean_post_norm_google(inputs: List[str], targets: List[List[str]], norm_texts):
"""
standardizes format of inputs and targets, and predicted normalizations for easier evaluation.
This is specific for google dataset.
"""
for i in range(len(targets)):
for target in targets[i]:
for j, norm in enumerate(norm_texts[i][0]):
diffs = get_diff(a=norm.lower(), b=target.lower())
for diff in diffs[::-1]:
norm_diff = norm[diff[0][0] : diff[0][1]].lower()
tg_diff = target[diff[1][0] : diff[1][1]].lower()
replacement = norm[: diff[0][0]] + tg_diff + norm[diff[0][1] :]
if norm_diff == re.sub(r" ", "", tg_diff):
norm_texts[i][0][j] = replacement
return targets, norm_texts
def _clean_post_general(str) -> str:
"""
standardizes format of inputs and targets, and predicted normalizations for easier evaluation.
"""
str = re.sub(rf" oh ", " zero ", str)
str = re.sub(rf" oh$", " zero", str)
str = re.sub(rf"^oh ", "zero ", str)
# str = re.sub(rf" o ", " zero ", str)
str = re.sub(rf"\sO\b", "zero", str)
str = re.sub(rf" o$", " zero", str)
str = re.sub(rf"^o ", "zero ", str)
str = re.sub(rf"'o ", "'zero ", str)
str = str.replace("mountain", "mount")
return str
def _clean_targets(str) -> str:
"""Clean ground truth options."""
str = re.sub(rf" o ", " zero ", str)
return str
def adjust_pred(pred: str, gt: str, dataset: str, delim_present=True):
"""Standardize prediction format to make evaluation easier"""
orig_pred = pred
orig_gt = gt
if delim_present and not re.search(rf"< (.*?) >", pred):
return pred
pred = re.sub(rf"< ", "", pred)
pred = re.sub(rf" >", "", pred)
pred = pred.lower().strip()
gt = gt.lower().strip()
can_be_adjusted = False
if dataset in ["google", "libritts"] and pred != gt:
if is_date(pred=pred, gt=gt, cardinal_graph=cardinal_graph):
pred = gt
elif contains_month(pred, gt):
pred = re.sub(r",", "", pred)
gt = re.sub(r",", "", gt)
pred = re.sub(r" zero ", " o ", pred)
gt = re.sub(r" zero ", " o ", gt)
gt = re.sub(rf" +", " ", gt)
pred = re.sub(rf" +", " ", pred)
if pred != gt:
gt_itn = inverse_normalizer.normalize(gt, verbose=False)
pred_itn = inverse_normalizer.normalize(pred, verbose=False)
if len(gt_itn) == len(pred_itn) and set(gt_itn) == set(pred_itn):
can_be_adjusted = True
pred = gt
elif " of " in gt:
gt = re.sub(r"(^the | of)", "", gt)
idx = gt.index(" ")
idx2 = (gt[idx + 1 :].index(" ") if " " in gt[idx + 1 :] else len(gt[idx + 1 :])) + idx + 1
gt = gt[idx + 1 : idx2] + " " + gt[:idx] + gt[idx2:]
if dataset == "libritts" and pred != gt:
if "dollar" in gt:
gt = re.sub(rf"\band\b", "", gt)
pred = re.sub(rf"\band\b", "", pred)
if re.search(r"\bus dollar", pred) and not re.search(r"\bus dollar", gt):
pred = re.sub(rf"\bus dollar", "dollar", pred)
else:
gt = re.sub(rf"(\bthe\b|\.)", "", gt)
pred = re.sub(rf"\bone\b", "a", pred)
gt = re.sub(rf"\bmr\b", "mister", gt)
gt = re.sub(rf"\bmrs\b", "misses", gt)
gt = re.sub(rf"\bdr\b", "doctor", gt)
gt = re.sub(rf"\bco\b", "company", gt)
if gt != pd and dataset in ["google", "libritts"]:
if gt.replace("/", "").replace(" ", " ") == pred.replace("slash", "").replace(" ", " "):
pred = gt
elif gt in ["s", "z"] and pred in ["s", "z"]:
pred = gt
elif gt == "hash tag" and pred == "hash":
pred = "hash tag"
elif gt[:-2] == pred[:-2] and gt[-2:] in ["er", "re"] and pred[-2:] in ["er", "re"]:
pred = gt
# elif gt.replace("-", " ").replace(" ", " ") == pred.replace("minus", "").replace(" ", " "):
# pred = gt
elif gt.replace("to", "").replace("-", "") == pred.replace("to", "").replace("-", ""):
pred = gt
gt = re.sub(rf" +", " ", gt)
pred = re.sub(rf"(\.)", "", pred)
pred = re.sub(rf" +", " ", pred)
if gt == pred:
can_be_adjusted = True
if can_be_adjusted:
if delim_present:
res = f" < {orig_gt} > "
else:
res = orig_gt
return res
else:
return orig_pred
def clean_post_norm(
inputs: List[str],
targets: List[List[str]],
norm_texts,
dataset: Optional[str] = None,
delim_present: Optional[bool] = True,
):
"""
Args:
inputs (List[str]): inputs
targets (List[List[str]]): targets
norm_texts (List[(List[str], List[float])]): List of normalization options, weights
dataset (Optional[str], optional): _description_. Defaults to None.
delim_present (Optional[str], optional): The flag indicates whether normalization output contain delimiters "<>".
Set to False for NN baseline.
"""
# deep copy
post_norm_texts = copy.deepcopy(norm_texts)
post_targets = copy.deepcopy(targets)
# --- data specific pre cleaning ---
if dataset == "libritts":
post_targets, post_norm_texts = _clean_post_norm_libritts(
inputs=inputs, targets=post_targets, norm_texts=post_norm_texts
)
elif dataset == "google":
post_targets, post_norm_texts = _clean_post_norm_google(
inputs=inputs, targets=post_targets, norm_texts=post_norm_texts
)
else:
pass
# --- general pre cleaning ---
for i in range(len(targets)):
for j, x in enumerate(post_targets[i]):
post_targets[i][j] = _clean_post_general(x)
for j, x in enumerate(post_norm_texts[i][0]):
if x.count("< ") != x.count(" >"):
x = x.replace("<", "< ").replace(">", " >").replace(" ", " ")
post_norm_texts[i][0][j] = _clean_post_general(x)
if dataset in ["libritts", "google"]:
for i, _targets in enumerate(post_targets):
for jj, option in enumerate(post_norm_texts[i][0]):
for _, _target in enumerate(_targets):
if not delim_present:
# nn doesn't have punctuation marks that leads for diff_pred_gt mismatch
_target = remove_punctuation(_target, remove_spaces=False, do_lower=True)
option = remove_punctuation(option, remove_spaces=False, do_lower=True)
diffs = diff_pred_gt(pred=option, gt=_target)
for diff in diffs[::-1]:
if diff[0][1] - diff[0][0] == 0 and diff[1][1] - diff[1][0] == 0:
continue
pred = option[diff[0][0] : diff[0][1]]
gt = _target[diff[1][0] : diff[1][1]]
logging.debug(f"pred: |{pred}|\tgt: |{gt}|")
new_pred = adjust_pred(pred=pred, gt=gt, dataset=dataset, delim_present=delim_present)
new_pred = (
post_norm_texts[i][0][jj][: diff[0][0]]
+ new_pred
+ post_norm_texts[i][0][jj][diff[0][1] :]
)
logging.debug(f"|{post_norm_texts[i][0][jj]}| -> |{new_pred}|")
post_norm_texts[i][0][jj] = new_pred
return post_targets, post_norm_texts
def clean_libri_tts(target: str):
"""
Replace abbreviations in LibriTTS dataset
"""
# Normalized text in LibriTTS by Google which contains abbreviations from `libri_sometimes_converts_abbrs` sometimes wasn't converted.
libri_sometimes_converts_abbrs = {"St.": "saint", "Rev.": "reverend"}
# Normalized text in LibriTTS by Google which contains abbreviations from `libri_wo_changes_abbrs` wasn't converted.
libri_wo_changes_abbrs = {"vs.": "versus"}
google_abbr2expand = {
"mr": "mister",
"Mr": "Mister",
"mrs": "misses",
"Mrs": "Misses",
"dr": "doctor",
"Dr": "Doctor",
"drs": "doctors",
"Drs": "Doctors",
"lt": "lieutenant",
"Lt": "Lieutenant",
"sgt": "sergeant",
"Sgt": "Sergeant",
"st": "saint",
"St": "Saint",
"jr": "junior",
"Jr": "Junior",
"maj": "major",
"Maj": "Major",
"hon": "honorable",
"Hon": "Honorable",
"gov": "governor",
"Gov": "Governor",
"capt": "captain",
"Capt": "Captain",
"esq": "esquire",
"Esq": "Esquire",
"gen": "general",
"Gen": "General",
"ltd": "limited",
"Ltd": "Limited",
"rev": "reverend",
"Rev": "Reverend",
"col": "colonel",
"Col": "Colonel",
"and co": "and Company",
"and Co": "and Company",
"mt": "mount",
"Mt": "Mount",
"ft": "fort",
"Ft": "Fort",
"tenn": "tennessee",
"Tenn": "Tennessee",
"vs": "versus",
"Vs": "Versus",
"&": "and",
"§": "section",
"#": "hash",
"=": "equals",
}
# let's normalize `libri_only_remove_dot_abbrs` abbreviations, because google doesn't do it well
for abbr in google_abbr2expand.keys():
if abbr in target:
# replace abbr in google text via regex and using \b to match only whole words, keep original 1 and 2 groups
target = re.sub(rf'(^|\s|\W){abbr}($|\s)', rf"\1{google_abbr2expand[abbr]}\2", target)
# let's normalize `libri_sometimes_converts_abbrs` abbreviations manually, google sometimes forgets to expand them
for abbr, t in libri_sometimes_converts_abbrs.items():
target = target.replace(abbr, t)
# let's normalize `libri_wo_changes_abbrs` abbreviations manually, google doesn't change, but they should be
for abbr, t in libri_wo_changes_abbrs.items():
target = target.replace(abbr, t)
return target
def remove_punctuation(text: str, remove_spaces=True, do_lower=True, lang="en", exclude=None):
"""Removes punctuation (and optionally spaces) in text for better evaluation"""
all_punct_marks = string.punctuation
if exclude is not None:
for p in exclude:
all_punct_marks = all_punct_marks.replace(p, "")
text = re.sub("[" + all_punct_marks + "]", " ", text)
if lang == "en":
# remove things like \x94 and \x93
text = re.sub(r"[^\x00-\x7f]", r" ", text)
text = re.sub(r" +", " ", text)
if remove_spaces:
text = text.replace(" ", "").replace("\u00A0", "").strip()
if do_lower:
text = text.lower()
return text.strip()
def get_alternative_label(pred: str, targets: List[str]) -> bool:
"""Returns true if prediction matches target options"""
def _relax_diff(text):
text = text.replace("us dollars", "dollars")
text = text.replace("etcetera", "").replace("etc", "")
text = text.replace("one half ounce", "").replace("half an ounce", "")
text = text.replace("television", "").replace("t v ", " ").replace("tv", "")
text = text.replace("hundred", "")
text = text.replace("forty two", "").replace("four two", "")
text = text.replace("re", "").replace("er", "")
text = text.replace("ou", "").replace("o", "")
text = text.replace(" ", " ").strip()
return text
acceptable = False
pred = remove_punctuation(pred, remove_spaces=False, do_lower=True)
for target in targets:
target = _clean_post_general(remove_punctuation(target, remove_spaces=False, do_lower=True))
target = _clean_targets(remove_punctuation(target, remove_spaces=False, do_lower=True))
if _relax_diff(target) == _relax_diff(pred):
acceptable = True
break
return acceptable
def get_labels(targets: List[str], norm_texts_weights: List[Tuple[str, str]], lang="en",) -> List[List[str]]:
"""
Assign labels to generated normalization options (1 - for ground truth, 0 - other options)
Args:
targets: ground truth normalization sentences
norm_texts_weights: List of tuples: (normalization options, weights of normalization options)
returns:
List of labels [1, 0] for every normalization option
"""
print("Assign labels to generated normalization options...")
labels = []
for i, cur_targets in tqdm(enumerate(targets)):
curr_labels = []
cur_targets = [_clean_targets(t) for t in cur_targets]
for norm_option in norm_texts_weights[i][0]:
norm_option = _clean_targets(norm_option)
norm_option = remove_whitelist_boudaries(norm_option)
if is_correct(pred=norm_option, targets=cur_targets, lang=lang):
curr_labels.append(1)
elif get_alternative_label(pred=norm_option, targets=cur_targets):
curr_labels.append(1)
else:
curr_labels.append(0)
labels.append(curr_labels)
return labels
def contains_month(pred, gt):
"""Check is the pred/gt contain month in the span"""
months = [
"january",
"february",
"march",
"april",
"may",
"june",
"july",
"august",
"september",
"october",
"november",
"december",
]
for mon in months:
if mon in gt and mon in pred:
return True
return False
def is_date(pred, gt, cardinal_graph):
"""Returns True is pred and gt are date format modifications and are equal."""
is_date_case = False
# for cases "1890" -> "one thousand eight hundred ninety" vs "eighteen ninety"
if "thousand" in pred and "hundred" in pred and pred.strip().split()[-2:] == gt.strip().split()[-2:]:
is_date_case = True
elif "thousand" in gt and "hundred" in gt and gt.strip().split()[-2:] == pred.strip().split()[-2:]:
is_date_case = True
else:
try:
if top_rewrite(gt.replace(" oh ", " zero ").replace(" o ", " zero "), cardinal_graph).replace(
" ", ""
) == top_rewrite(pred.replace(" oh ", " zero ").replace(" o ", " zero "), cardinal_graph).replace(" ", ""):
is_date_case = True
except:
pass
return is_date_case
def is_correct(pred: str, targets: Union[List[str], str], lang: str) -> bool:
"""
returns True if prediction matches targets for language lang.
"""
if isinstance(targets, List):
targets = [remove_punctuation(x, remove_spaces=True, do_lower=True, lang=lang) for x in targets]
else:
targets = [remove_punctuation(targets, remove_spaces=True, do_lower=True, lang=lang)]
pred = remove_punctuation(pred, remove_spaces=True, do_lower=True)
return pred in targets
def print_df(df):
"""
prints data frame
"""
with pd.option_context(
"display.max_rows", None, "display.max_columns", None, "display.width", 1000, "display.max_colwidth", 400,
):
print(df)
def get_diff(a: str, b: str):
"""returns list of different substrings between and b
Returns:
list of Tuple(pred start and end, gt start and end) subsections
"""
s = difflib.SequenceMatcher(None, a, b, autojunk=False)
# s contains a list of triples. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n].
# The triples are monotonically increasing in i and in j.
s = s.get_matching_blocks()
s = [x for x in s if x[2] != 1]
# get not matching blocks
matches = [[0, 0, 0]] + s
unmatches_l = []
unmatches_r = []
for l, r in zip(matches[:-1], matches[1:]):
unmatches_l.append([l[0] + l[2], r[0]])
unmatches_r.append([l[1] + l[2], r[1]])
result = list(zip(unmatches_l, unmatches_r))
for item in list(zip(unmatches_l, unmatches_r)):
logging.debug(f"a: {a[item[0][0]:item[0][1]]}")
logging.debug(f"b: {b[item[1][0]:item[1][1]]}")
logging.debug("=" * 20)
return result[1:]
def diff_pred_gt(pred: str, gt: str):
"""returns list of different substrings between prediction and gt
relies on that prediction uses '< ' ' >'
Args:
pred (str): prediction
gt (str): ground truth
Returns:
list of Tuple(pred start and end, gt start and end) subsections
e.g. pred="< Edward third >., king Our own . loss had been < two thousand two hundred >"
gt ="Edward III., king Our own loss had been twenty two hundred"
--> [([0, 16], [0, 10]), ([32, 34], [26, 26]), ([48, 76], [40, 58])]
"""
s = difflib.SequenceMatcher(None, pred, gt, autojunk=False)
# s contains a list of triples. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n].
# The triples are monotonically increasing in i and in j.
s = s.get_matching_blocks()
left = list(re.finditer("< ", pred))
left = [x.start() for x in left]
right = list(re.finditer(" >", pred))
right = [x.end() for x in right]
left = [-1] + left + [len(pred)]
right = [0] + right + [len(pred)]
matches = []
assert len(left) == len(right)
idx = 1
for i, seq in enumerate(s):
if i == len(s) - 1 and seq[2] == 0:
break
while idx < len(left) - 1 and (seq[0] >= right[idx]):
idx += 1
if right[idx - 1] <= seq[0] < left[idx] and (seq[0] + seq[2]) <= left[idx]:
matches.append(seq)
# get not matching blocks
matches = [[0, 0, 0]] + matches + [[len(pred), len(gt), 0]]
unmatches_l = []
unmatches_r = []
for l, r in zip(matches[:-1], matches[1:]):
unmatches_l.append([l[0] + l[2], r[0]])
unmatches_r.append([l[1] + l[2], r[1]])
result = list(zip(unmatches_l, unmatches_r))
for item in list(zip(unmatches_l, unmatches_r)):
logging.debug(f"pred: {pred[item[0][0]:item[0][1]]}")
logging.debug(f"gt : {gt[item[1][0]:item[1][1]]}")
logging.debug("=" * 20)
return result
| NeMo-text-processing-main | nemo_text_processing/hybrid/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2020 AWSLABS, AMAZON.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
import numpy as np
try:
import torch
from torch.nn.functional import softmax
except ImportError as e:
raise ImportError("torch is not installed")
try:
from transformers import AutoModelForMaskedLM, AutoTokenizer
except ImportError as e:
raise ImportError("transformers is not installed")
__all__ = ['MLMScorer']
class MLMScorer:
def __init__(self, model_name: str, device: str = 'cpu'):
"""
Creates MLM scorer from https://arxiv.org/abs/1910.14659.
Args:
model_name: HuggingFace pretrained model name
device: either 'cpu' or 'cuda'
"""
self.model = AutoModelForMaskedLM.from_pretrained(model_name).to(device).eval()
self.tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
self.device = device
self.MASK_LABEL = self.tokenizer.mask_token
def score_sentences(self, sentences: List[str]):
"""
returns list of MLM scores for each sentence in list.
"""
return [self.score_sentence(sentence) for sentence in sentences]
def score_sentence(self, sentence: str):
"""
returns MLM score for sentence.
"""
assert type(sentence) == str
tokens = self.tokenizer.tokenize(sentence)
mask_idx = []
token_type = []
attn_mask = []
ids = []
for m_idx, _ in enumerate(tokens):
masked = self.__mask_text__(m_idx, tokens)
mask_idx.append(m_idx)
ids.append(self.tokenizer.encode(masked))
id_len = len(ids[-1])
token_type.append([0] * id_len)
attn_mask.append([1] * id_len)
data = {
'input_ids': torch.tensor(ids, device=self.device),
'attention_mask': torch.tensor(attn_mask, device=self.device),
'token_type_ids': torch.tensor(token_type, device=self.device),
}
with torch.no_grad():
outputs = self.model(**data)
logits = outputs.logits
scores = []
scores_log_prob = 0.0
for i, m_idx in enumerate(mask_idx):
preds = logits[i].squeeze(0)
probs = softmax(preds, dim=1)
token_id = self.tokenizer.convert_tokens_to_ids([tokens[m_idx]])[0]
log_prob = np.log(probs[m_idx + 1, token_id].cpu().numpy()).item()
scores.append(log_prob)
scores_log_prob += log_prob
return scores_log_prob
def __mask_text__(self, idx: int, tokens: List[str]):
"""
replaces string at index idx in list `tokens` with a masked token and returns the modified list.
"""
masked = tokens.copy()
masked[idx] = self.MASK_LABEL
return masked
| NeMo-text-processing-main | nemo_text_processing/hybrid/mlm_scorer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os.path
import shutil
import tarfile
import tempfile
import urllib.request
from os import mkdir
from os.path import dirname, exists, getsize, join
from pathlib import Path
from shutil import rmtree
import pytest
# Those variables probably should go to main NeMo configuration file (config.yaml).
__TEST_DATA_FILENAME = "test_data.tar.gz"
__TEST_DATA_URL = "https://github.com/NVIDIA/NeMo/releases/download/v1.0.0rc1/"
__TEST_DATA_SUBDIR = ".data"
def pytest_addoption(parser):
"""
Additional command-line arguments passed to pytest.
For now:
--cpu: use CPU during testing (DEFAULT: GPU)
--use_local_test_data: use local test data/skip downloading from URL/GitHub (DEFAULT: False)
"""
parser.addoption(
'--cpu', action='store_true', help="pass that argument to use CPU during testing (DEFAULT: False = GPU)"
)
parser.addoption(
'--use_local_test_data',
action='store_true',
help="pass that argument to use local test data/skip downloading from URL/GitHub (DEFAULT: False)",
)
parser.addoption(
'--with_downloads',
action='store_true',
help="pass this argument to active tests which download models from the cloud.",
)
parser.addoption(
'--tn_cache_dir',
type=str,
default=None,
help="path to a directory with .far grammars for CPU TN/ITN tests, (DEFAULT: None, i.e. no cache)",
)
parser.addoption(
'--run_audio_based', action='store_true', help="pass this argument to run audio-based TN tests",
)
@pytest.fixture
def device(request):
""" Simple fixture returning string denoting the device [CPU | GPU] """
if request.config.getoption("--cpu"):
return "CPU"
else:
return "GPU"
@pytest.fixture(autouse=True)
def run_only_on_device_fixture(request, device):
if request.node.get_closest_marker('run_only_on'):
if request.node.get_closest_marker('run_only_on').args[0] != device:
pytest.skip('skipped on this device: {}'.format(device))
@pytest.fixture(autouse=True)
def downloads_weights(request, device):
if request.node.get_closest_marker('with_downloads'):
if not request.config.getoption("--with_downloads"):
pytest.skip(
'To run this test, pass --with_downloads option. It will download (and cache) models from cloud.'
)
@pytest.fixture(autouse=True)
def cleanup_local_folder():
# Asserts in fixture are not recommended, but I'd rather stop users from deleting expensive training runs
assert not Path("./lightning_logs").exists()
assert not Path("./NeMo_experiments").exists()
assert not Path("./nemo_experiments").exists()
yield
if Path("./lightning_logs").exists():
rmtree('./lightning_logs', ignore_errors=True)
if Path("./NeMo_experiments").exists():
rmtree('./NeMo_experiments', ignore_errors=True)
if Path("./nemo_experiments").exists():
rmtree('./nemo_experiments', ignore_errors=True)
@pytest.fixture
def test_data_dir():
""" Fixture returns test_data_dir. """
# Test dir.
test_data_dir_ = join(dirname(__file__), __TEST_DATA_SUBDIR)
return test_data_dir_
def extract_data_from_tar(test_dir, test_data_archive, url=None, local_data=False):
# Remove .data folder.
if exists(test_dir):
if not local_data:
rmtree(test_dir)
else:
with tempfile.TemporaryDirectory() as temp_dir:
print("Copying local tarfile to temporary storage..")
shutil.copy2(test_data_archive, temp_dir)
print("Deleting test dir to cleanup old data")
rmtree(test_dir)
mkdir(test_dir)
print("Restoring local tarfile to test dir")
shutil.copy2(os.path.join(temp_dir, os.path.basename(test_data_archive)), test_data_archive)
# Create one .data folder.
if not exists(test_dir):
mkdir(test_dir)
# Download (if required)
if url is not None and not local_data:
urllib.request.urlretrieve(url, test_data_archive)
# Extract tar
print("Extracting the `{}` test archive, please wait...".format(test_data_archive))
tar = tarfile.open(test_data_archive)
tar.extractall(path=test_dir)
tar.close()
def pytest_configure(config):
"""
Initial configuration of conftest.
The function checks if test_data.tar.gz is present in tests/.data.
If so, compares its size with github's test_data.tar.gz.
If file absent or sizes not equal, function downloads the archive from github and unpacks it.
"""
config.addinivalue_line(
"markers", "run_only_on(device): runs the test only on a given device [CPU | GPU]",
)
config.addinivalue_line(
"markers", "with_downloads: runs the test using data present in tests/.data",
)
# Test dir and archive filepath.
test_dir = join(dirname(__file__), __TEST_DATA_SUBDIR)
test_data_archive = join(dirname(__file__), __TEST_DATA_SUBDIR, __TEST_DATA_FILENAME)
# Get size of local test_data archive.
try:
test_data_local_size = getsize(test_data_archive)
except:
# File does not exist.
test_data_local_size = -1
if config.option.use_local_test_data:
if test_data_local_size == -1:
pytest.exit("Test data `{}` is not present in the system".format(test_data_archive))
else:
print(
"Using the local `{}` test archive ({}B) found in the `{}` folder.".format(
__TEST_DATA_FILENAME, test_data_local_size, test_dir
)
)
# Get size of remote test_data archive.
url = None
if not config.option.use_local_test_data:
try:
url = __TEST_DATA_URL + __TEST_DATA_FILENAME
u = urllib.request.urlopen(url)
except:
# Couldn't access remote archive.
if test_data_local_size == -1:
pytest.exit("Test data not present in the system and cannot access the '{}' URL".format(url))
else:
print(
"Cannot access the '{}' URL, using the test data ({}B) found in the `{}` folder.".format(
url, test_data_local_size, test_dir
)
)
return
# Get metadata.
meta = u.info()
test_data_remote_size = int(meta["Content-Length"])
# Compare sizes.
if test_data_local_size != test_data_remote_size:
print(
"Downloading the `{}` test archive from `{}`, please wait...".format(
__TEST_DATA_FILENAME, __TEST_DATA_URL
)
)
extract_data_from_tar(test_dir, test_data_archive, url=url, local_data=config.option.use_local_test_data)
else:
print(
"A valid `{}` test archive ({}B) found in the `{}` folder.".format(
__TEST_DATA_FILENAME, test_data_local_size, test_dir
)
)
else:
# untar local test data
extract_data_from_tar(test_dir, test_data_archive, local_data=config.option.use_local_test_data)
# Set cache directory for TN/ITN tests
from .nemo_text_processing.utils import set_cache_dir, set_audio_based_tests
set_cache_dir(config.option.tn_cache_dir)
set_audio_based_tests(config.option.run_audio_based)
| NeMo-text-processing-main | tests/conftest.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
CACHE_DIR = None
RUN_AUDIO_BASED_TESTS = False
def set_cache_dir(path: str = None):
"""
Sets cache directory for TN/ITN unittests. Default is None, e.g. no cache during tests.
"""
global CACHE_DIR
CACHE_DIR = path
def set_audio_based_tests(run_audio_based: bool = False):
"""
Sets audio-based test mode for TN/ITN unittests. Default is False, e.g. audio-based tests will be skipped.
"""
global RUN_AUDIO_BASED_TESTS
RUN_AUDIO_BASED_TESTS = run_audio_based
def parse_test_case_file(file_name: str):
"""
Prepares tests pairs for ITN and TN tests
"""
test_pairs = []
with open(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + file_name, 'r') as f:
for line in f:
components = line.strip("\n").split("~")
spoken = components[0]
"""
Some transformations can have multiple correct forms. Instead of
asserting against a single expected value, we assert that the
output matches any of the correct forms.
Example: 200 can be "doscientos" or "doscientas" in Spanish
Test data: 200~doscientos~doscientas
Evaluation: ASSERT "doscientos" in ["doscientos", "doscientas"]
"""
written = components[1] if len(components) == 2 else components[1:]
test_pairs.append((spoken, written))
return test_pairs
def get_test_cases_multiple(file_name: str = 'data_text_normalization/en/test_cases_normalize_with_audio.txt'):
"""
Prepares tests pairs for audio based TN tests
"""
test_pairs = []
with open(os.path.dirname(os.path.abspath(__file__)) + os.path.sep + file_name, 'r') as f:
written = None
normalized_options = []
for line in f:
if line.startswith('~'):
if written:
test_pairs.append((written, normalized_options))
normalized_options = []
written = line.strip().replace('~', '')
else:
normalized_options.append(line.strip())
test_pairs.append((written, normalized_options))
return test_pairs
| NeMo-text-processing-main | tests/nemo_text_processing/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestOrdinal:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestMeasure:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_measure.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestTime:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_time.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestDecimal:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_decimal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestFraction:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_fraction.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/vi/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestElectronic:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_electronic.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestTelephone:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_telephone.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestCardinal:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestDate:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestWord:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_word.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_word.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestMoney:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_money.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestWhitelist:
inverse_normalizer = (
InverseNormalizer(lang='vi', cache_dir=CACHE_DIR, overwrite_cache=False) if PYNINI_AVAILABLE else None
)
@parameterized.expand(parse_test_case_file('vi/data_inverse_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/vi/test_whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestOrdinal:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=500, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestMeasure:
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_measure.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestTime:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_time.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_time.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=500, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestDecimal:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_decimal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_decimal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=50, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, get_test_cases_multiple
class TestNormalizeWithAudio:
normalizer_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(get_test_cases_multiple('sv/data_text_normalization/test_cases_normalize_with_audio.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
if self.normalizer_with_audio:
pred = self.normalizer_with_audio.normalize(test_input, n_tagged=150, punct_post_process=False)
assert len(set(pred).intersection(set(expected))) == len(
expected
), f'missing: {set(expected).difference(set(pred))}'
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_normalization_with_audio.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestFraction:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_fraction.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_fraction.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/sv/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestElectronic:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=50, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestTelephone:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_telephone.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_telephone.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=30, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestCardinal:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=50, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestDate:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_date.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=150, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestWord:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_word.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_word.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected, f"input: {test_input}"
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=150, punct_post_process=False
)
assert expected in pred_non_deterministic, f"input: {test_input}"
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_word.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestMoney:
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_money.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
from parameterized import parameterized
from ..utils import CACHE_DIR, RUN_AUDIO_BASED_TESTS, parse_test_case_file
class TestWhitelist:
inverse_normalizer_sv = InverseNormalizer(lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
inverse_normalizer_sv_cased = InverseNormalizer(
lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False, input_case="cased"
)
@parameterized.expand(parse_test_case_file('sv/data_inverse_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer_sv.inverse_normalize(test_input, verbose=False)
assert pred == expected
pred = self.inverse_normalizer_sv_cased.inverse_normalize(test_input, verbose=False)
assert pred == expected
normalizer_sv = Normalizer(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
normalizer_sv_with_audio = (
NormalizerWithAudio(input_case='cased', lang='sv', cache_dir=CACHE_DIR, overwrite_cache=False)
if RUN_AUDIO_BASED_TESTS
else None
)
@parameterized.expand(parse_test_case_file('sv/data_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm(self, test_input, expected):
pred = self.normalizer_sv.normalize(test_input, verbose=False)
assert pred == expected
if self.normalizer_sv_with_audio:
pred_non_deterministic = self.normalizer_sv_with_audio.normalize(
test_input, n_tagged=10, punct_post_process=False
)
assert expected in pred_non_deterministic
| NeMo-text-processing-main | tests/nemo_text_processing/sv/test_whitelist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/sv/data_text_normalization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/sv/data_inverse_text_normalization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.utils_audio_based import get_alignment
class TestAudioBasedTNUtils:
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_default(self):
raw = 'This is #4 ranking on G.S.K.T.'
pred_text = 'this iss for ranking on g k p'
norm = 'This is nubmer four ranking on GSKT'
output = get_alignment(raw, norm, pred_text, True)
reference = (
['is #4', 'G.S.K.T.'],
['iss for', 'g k p'],
['is nubmer four', 'GSKT'],
['This', '[SEMIOTIC_SPAN]', 'ranking', 'on', '[SEMIOTIC_SPAN]'],
[1, 4],
)
assert output == reference
| NeMo-text-processing-main | tests/nemo_text_processing/audio_based_utils/test_audio_based_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/audio_based_utils/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_measure.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input, punct_post_process=True)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_measure.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_decimal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_decimal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/it/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input, punct_post_process=True)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_electronic.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_cardinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_money.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_money.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.text_normalization.normalize import Normalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestChar:
normalizer = Normalizer(lang='it', cache_dir=CACHE_DIR, overwrite_cache=False, input_case='cased')
@parameterized.expand(parse_test_case_file('it/data_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_char(self, test_input, expected):
preds = self.normalizer.normalize(test_input)
assert expected == preds
| NeMo-text-processing-main | tests/nemo_text_processing/it/test_whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestRuInverseNormalize:
normalizer = InverseNormalizer(lang='ru', cache_dir=CACHE_DIR) if PYNINI_AVAILABLE else None
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_cardinal(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_ordinal(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
# @parameterized.expand(parse_test_case_file('ru_data_inverse_text_normalization/test_cases_ordinal_hard.txt'))
# @pytest.mark.run_only_on('CPU')
# @pytest.mark.unit
# def test_denorm_ordinal_hard(self, test_input, expected):
# pred = self.normalizer.inverse_normalize(test_input, verbose=False)
# assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_decimal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_decimal(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_electronic.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_electronic(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_date(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_measure.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_measure(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_money.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_money(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_time.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_time(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_whitelist(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
@parameterized.expand(parse_test_case_file('ru/data_inverse_text_normalization/test_cases_word.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm_word(self, test_input, expected):
pred = self.normalizer.inverse_normalize(test_input, verbose=False)
assert expected == pred
| NeMo-text-processing-main | tests/nemo_text_processing/ru/test_ru_inverse_normalization.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
try:
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
PYNINI_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
PYNINI_AVAILABLE = False
class TestRuNormalizeWithAudio:
normalizer = NormalizerWithAudio(input_case='cased', lang='ru', cache_dir=CACHE_DIR) if PYNINI_AVAILABLE else None
N_TAGGED = 100
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_cardinal(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=50)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_ordinal(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_decimal.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_decimal(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_measure.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_measure(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_date.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_date(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds, expected not in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_telephone.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_telephone(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds, expected not in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_money.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_money(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds, expected not in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_time.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_time(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds, expected not in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_electronic.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_electronic(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_whitelist.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_whitelist(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
@parameterized.expand(parse_test_case_file('ru/data_text_normalization/test_cases_word.txt'))
@pytest.mark.skipif(
not PYNINI_AVAILABLE,
reason="`pynini` not installed, please install via nemo_text_processing/pynini_install.sh",
)
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_norm_word(self, expected, test_input):
preds = self.normalizer.normalize(test_input, n_tagged=self.N_TAGGED)
assert expected in preds
| NeMo-text-processing-main | tests/nemo_text_processing/ru/test_ru_normalization.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/ru/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/ru/data_text_normalization/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestOrdinal:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_ordinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestMeasure:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_measure.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestTime:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_time.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_time.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestDecimal:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_decimal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | tests/nemo_text_processing/pt/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestElectronic:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_electronic.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestTelephone:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_telephone.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestCardinal:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_cardinal.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestDate:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_date.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from nemo_text_processing.inverse_text_normalization.inverse_normalize import InverseNormalizer
from parameterized import parameterized
from ..utils import CACHE_DIR, parse_test_case_file
class TestWord:
inverse_normalizer = InverseNormalizer(lang='pt', cache_dir=CACHE_DIR, overwrite_cache=False)
@parameterized.expand(parse_test_case_file('pt/data_inverse_text_normalization/test_cases_word.txt'))
@pytest.mark.run_only_on('CPU')
@pytest.mark.unit
def test_denorm(self, test_input, expected):
pred = self.inverse_normalizer.inverse_normalize(test_input, verbose=False)
assert pred == expected
| NeMo-text-processing-main | tests/nemo_text_processing/pt/test_word.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.