python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_preserve_order,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { integer: "zwei" units: "unzen" } } -> "zwei unzen"
measure { cardinal { integer_part: "zwei" quantity: "millionen" units: "unzen" } } -> "zwei millionen unzen"
Args:
decimal: decimal GraphFst
cardinal: cardinal GraphFst
fraction: fraction GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst, deterministic: bool):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
unit = pynutil.delete("units: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
graph_decimal = decimal.fst
graph_cardinal = cardinal.fst
graph_fraction = fraction.fst
graph = (graph_cardinal | graph_decimal | graph_fraction) + pynini.accep(" ") + unit
graph |= unit + delete_extra_space + (graph_cardinal | graph_decimal)
graph += delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_preserve_order,
insert_space,
)
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. fraction { integer: "drei" numerator: "eins" denominator: "zwei" }-> drei ein halb
e.g. fraction { numerator: "vier" denominator: "zwei" } -> vier halbe
e.g. fraction { numerator: "drei" denominator: "vier" } -> drei viertel
Args:
ordinal: ordinal GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "minus ") + pynutil.delete(" "), 0, 1)
change_one = pynini.cdrewrite(
pynutil.add_weight(pynini.cross("eins", "ein"), weight=-0.0001), "[BOS]", "[EOS]", NEMO_SIGMA
)
change_numerator_two = pynini.cdrewrite(pynini.cross("zweitel", "halbe"), "[BOS]", "[EOS]", NEMO_SIGMA)
integer = pynutil.delete("integer_part: \"") + change_one + pynutil.delete("\" ")
numerator = pynutil.delete("numerator: \"") + change_one + pynutil.delete("\" ")
denominator = (
pynutil.delete("denominator: \"")
+ pynini.closure(NEMO_NOT_QUOTE)
@ (
pynini.cdrewrite(pynini.closure(ordinal.ordinal_stem, 0, 1), "", "[EOS]", NEMO_SIGMA)
+ pynutil.insert("tel")
)
@ change_numerator_two
+ pynutil.delete("\"")
)
integer += insert_space + pynini.closure(pynutil.insert("und ", weight=0.001), 0, 1)
denominator_one_half = pynini.cdrewrite(pynini.cross("ein halbe", "ein halb"), "[BOS]", "[EOS]", NEMO_SIGMA)
fraction_default = (numerator + insert_space + denominator) @ denominator_one_half
self.graph = optional_sign + pynini.closure(integer, 0, 1) + fraction_default
graph = self.graph + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_preserve_order
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { country_code: "plus neun und vierzig" number_part: "null eins eins eins null null null" }
-> "plus neun und vierzig null eins eins eins null null null"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="verbalize", deterministic=deterministic)
country_code = pynutil.delete("country_code: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
self.graph = country_code + pynini.accep(" ") + number_part
graph = self.graph + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing roman numerals
e.g. ordinal { integer: "vier" } } -> "vierter"
-> "viertes" ...
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
graph_digit = pynini.string_file(get_abs_path("data/ordinals/digit.tsv")).invert()
graph_ties = pynini.string_file(get_abs_path("data/ordinals/ties.tsv")).invert()
graph_thousands = pynini.string_file(get_abs_path("data/ordinals/thousands.tsv")).invert()
graph = pynutil.delete("integer: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
suffixes = pynini.union("ten", "tem", "ter", "tes", "te")
convert_rest = pynutil.insert(suffixes, weight=0.01)
self.ordinal_stem = graph_digit | graph_ties | graph_thousands
suffix = pynini.cdrewrite(
pynini.closure(self.ordinal_stem, 0, 1) + convert_rest, "", "[EOS]", NEMO_SIGMA,
).optimize()
self.graph = pynini.compose(graph, suffix)
self.suffix = suffix
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.de.taggers.cardinal import CardinalFst as CardinalTagger
from nemo_text_processing.text_normalization.de.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.de.verbalizers.date import DateFst
from nemo_text_processing.text_normalization.de.verbalizers.decimal import DecimalFst
from nemo_text_processing.text_normalization.de.verbalizers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.de.verbalizers.fraction import FractionFst
from nemo_text_processing.text_normalization.de.verbalizers.measure import MeasureFst
from nemo_text_processing.text_normalization.de.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.de.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.de.verbalizers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.de.verbalizers.time import TimeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal_tagger = CardinalTagger(deterministic=deterministic)
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(deterministic=deterministic)
ordinal_graph = ordinal.fst
decimal = DecimalFst(deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(ordinal=ordinal, deterministic=deterministic)
fraction_graph = fraction.fst
date = DateFst(ordinal=ordinal)
date_graph = date.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
electronic = ElectronicFst(deterministic=deterministic)
electronic_graph = electronic.fst
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
money_graph = MoneyFst(decimal=decimal).fst
telephone_graph = TelephoneFst(deterministic=deterministic).fst
time_graph = TimeFst(cardinal_tagger=cardinal_tagger, deterministic=deterministic).fst
graph = (
cardinal_graph
| measure_graph
| decimal_graph
| ordinal_graph
| date_graph
| electronic_graph
| money_graph
| fraction_graph
| whitelist_graph
| telephone_graph
| time_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.de.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"de_tn_{deterministic}_deterministic_verbalizer.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["verbalize"]
logging.info(f'VerbalizeFinalFst graph was restored from {far_file}.')
else:
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"verbalize": self.fst})
logging.info(f"VerbalizeFinalFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.taggers.decimal import quantities
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_preserve_order,
insert_space,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
decimal { negative: "true" integer_part: "elf" fractional_part: "vier null sechs" quantity: "billionen" } -> minus elf komma vier null sechs billionen
decimal { integer_part: "eins" quantity: "billion" } -> eins billion
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
delete_space = pynutil.delete(" ")
self.optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "minus ") + delete_space, 0, 1)
self.integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
self.fractional_default = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
self.fractional = pynutil.insert(" komma ") + self.fractional_default
self.quantity = (
delete_space + insert_space + pynutil.delete("quantity: \"") + quantities + pynutil.delete("\"")
)
self.optional_quantity = pynini.closure(self.quantity, 0, 1)
graph = self.optional_sign + (
self.integer + self.quantity | self.integer + delete_space + self.fractional + self.optional_quantity
)
self.numbers = graph
graph += delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_preserve_order
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "ein"} -> "ein euro"
money { currency_maj: "euro" integer_part: "eins" fractional_part: "null null eins"} -> "eins komma null null eins euro"
money { integer_part: "ein" currency_maj: "pfund" fractional_part: "vierzig" preserve_order: true} -> "ein pfund vierzig"
money { integer_part: "ein" currency_maj: "pfund" fractional_part: "vierzig" currency_min: "pence" preserve_order: true} -> "ein pfund vierzig pence"
money { fractional_part: "ein" currency_min: "penny" preserve_order: true} -> "ein penny"
money { currency_maj: "pfund" integer_part: "null" fractional_part: "null eins" quantity: "million"} -> "null komma null eins million pfund"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
keep_space = pynini.accep(" ")
maj = pynutil.delete("currency_maj: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
min = pynutil.delete("currency_min: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
fractional_part = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_add_and = pynini.closure(pynutil.insert("und "), 0, 1)
# *** currency_maj
graph_integer = integer_part + keep_space + maj
# *** currency_maj + (***) | ((und) *** current_min)
graph_integer_with_minor = (
integer_part
+ keep_space
+ maj
+ keep_space
+ (fractional_part | (optional_add_and + fractional_part + keep_space + min))
+ delete_preserve_order
)
# *** komma *** currency_maj
graph_decimal = decimal.fst + keep_space + maj
# *** current_min
graph_minor = fractional_part + keep_space + min + delete_preserve_order
graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinals
e.g. cardinal { integer: "zwei" } -> "zwei"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "minus "), 0, 1)
self.optional_sign = optional_sign
integer = pynini.closure(NEMO_NOT_QUOTE, 1)
self.integer = pynutil.delete(" \"") + integer + pynutil.delete("\"")
integer = pynutil.delete("integer:") + self.integer
self.numbers = integer
graph = optional_sign + self.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_preserve_order,
insert_space,
)
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. electronic { username: "abc" domain: "hotmail.com" } -> "a b c at hotmail punkt com"
-> "a b c at h o t m a i l punkt c o m"
-> "a b c at hotmail punkt c o m"
-> "a b c at h o t m a i l punkt com"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="verbalize", deterministic=deterministic)
graph_digit_no_zero = pynini.invert(
pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
).optimize() | pynini.cross("1", "eins")
graph_zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/zero.tsv"))).optimize()
graph_digit = graph_digit_no_zero | graph_zero
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv")).optimize()
server_common = pynini.string_file(get_abs_path("data/electronic/server_name.tsv"))
domain_common = pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
def add_space_after_char():
return pynini.closure(NEMO_NOT_QUOTE - pynini.accep(" ") + insert_space) + (
NEMO_NOT_QUOTE - pynini.accep(" ")
)
verbalize_characters = pynini.cdrewrite(graph_symbols | graph_digit, "", "", NEMO_SIGMA)
user_name = pynutil.delete("username: \"") + add_space_after_char() + pynutil.delete("\"")
user_name @= verbalize_characters
convert_defaults = pynutil.add_weight(NEMO_NOT_QUOTE, weight=0.0001) | domain_common | server_common
domain = convert_defaults + pynini.closure(insert_space + convert_defaults)
domain @= verbalize_characters
domain = pynutil.delete("domain: \"") + domain + pynutil.delete("\"")
protocol = (
pynutil.delete("protocol: \"")
+ add_space_after_char() @ pynini.cdrewrite(graph_symbols, "", "", NEMO_SIGMA)
+ pynutil.delete("\"")
)
self.graph = (pynini.closure(protocol + pynini.accep(" "), 0, 1) + domain) | (
user_name + pynini.accep(" ") + pynutil.insert("at ") + domain
)
delete_tokens = self.delete_tokens(self.graph + delete_preserve_order)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.utils import get_abs_path, load_labels
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_preserve_order,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "vier" month: "april" year: "zwei tausend zwei" } -> "vierter april zwei tausend zwei"
date { day: "vier" month: "mai" year: "zwei tausend zwei" } -> "vierter mai zwei tausend zwei"
Args:
ordinal: ordinal verbalizer GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="date", kind="verbalize", deterministic=deterministic)
day_cardinal = pynutil.delete("day: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
day = day_cardinal @ pynini.cdrewrite(ordinal.ordinal_stem, "", "[EOS]", NEMO_SIGMA) + pynutil.insert("ter")
months_names = pynini.union(*[x[1] for x in load_labels(get_abs_path("data/months/abbr_to_name.tsv"))])
month = pynutil.delete("month: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
final_month = month @ months_names
final_month |= month @ pynini.difference(NEMO_SIGMA, months_names) @ pynini.cdrewrite(
ordinal.ordinal_stem, "", "[EOS]", NEMO_SIGMA
) + pynutil.insert("ter")
year = pynutil.delete("year: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
# day month year
graph_dmy = day + pynini.accep(" ") + final_month + pynini.closure(pynini.accep(" ") + year, 0, 1)
graph_dmy |= final_month + pynini.accep(" ") + year
self.graph = graph_dmy | year
final_graph = self.graph + delete_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/months/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/numbers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/ordinals/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/time/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/money/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/de/data/measure/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path)
labels = list(csv.reader(label_tsv, delimiter="\t"))
label_tsv.close()
return labels
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
"-4 1/3" ->
tokens { fraction { negative: "true" integer_part: "quatre" numerator: "un" denominator: "trois" morphosyntactic_features: "ième" } }
Args:
cardinal: CardinalFst
ordinal: OrdinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction will be generated (used for audio-based normalization) - TBD
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
cardinals = cardinal.all_nums_no_tokens
sing_numerator = pynini.accep("1") @ cardinals
pl_numerators = (pynini.closure(NEMO_DIGIT) - "1") @ cardinals
add_denom_suffix = pynini.closure(NEMO_DIGIT) + pynutil.insert("e")
denominators = add_denom_suffix @ ordinal.graph
change_denom_label = pynini.cross("integer", "denominator")
pluralize_denom = pynini.closure(NEMO_SIGMA) + pynini.cross("\"ième\"", "\"ièmes\"")
sing_fraction_graph = (
pynutil.insert("numerator: \"")
+ sing_numerator
+ pynutil.insert("\" ")
+ pynutil.delete("/")
+ (denominators @ (change_denom_label + pynini.closure(NEMO_SIGMA)))
)
pl_fraction_graph = (
pynutil.insert("numerator: \"")
+ pl_numerators
+ pynutil.insert("\" ")
+ pynutil.delete("/")
+ (denominators @ (change_denom_label + pluralize_denom))
)
integer_part = pynutil.insert("integer_part: \"") + cardinals + pynutil.insert("\"")
optional_integer_part = pynini.closure(integer_part + pynini.accep(" "), 0, 1)
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
final_graph = optional_minus_graph + optional_integer_part + (sing_fraction_graph | pl_fraction_graph)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst
from nemo_text_processing.text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
"2e" -> ordinal { integer: "deux" morphosyntactic_features: "ième" }
This grammar covers from single digits to hundreds of billions ("milliardième" in French).
This FST also records the ending of the ordinal (called "morphosyntactic_features").
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify")
numbers = cardinal.all_nums_no_tokens
numbers_graph = pynutil.insert("integer: \"") + numbers + pynutil.insert("\"")
suffixes = pynini.string_file(get_abs_path("data/ordinals/suffixes.tsv"))
suffixes_graph = pynutil.insert("morphosyntactic_features: \"") + suffixes + pynutil.insert("\"")
final_graph = numbers_graph + pynutil.insert(NEMO_SPACE) + suffixes_graph
self.graph = final_graph
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/ordinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from nemo_text_processing.text_normalization.fr.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelist, e.g.
"sr." -> tokens { name: "bonjour" }
This class has highest priority among all classifier grammars. Whitelisted tokens are defined and loaded from "data/whitelist.tsv".
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
input_file: path to a file with whitelist replacements
"""
def __init__(self, input_case: str, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
def _get_whitelist_graph(input_case, file):
whitelist = load_labels(file)
if input_case == "lower_cased":
whitelist = [[x[0].lower()] + x[1:] for x in whitelist]
graph = pynini.string_map(whitelist)
return graph
graph = _get_whitelist_graph(input_case, get_abs_path("data/whitelist.tsv"))
if not deterministic and input_case != "lower_cased":
graph |= pynutil.add_weight(
_get_whitelist_graph("lower_cased", get_abs_path("data/whitelist.tsv")), weight=0.0001
)
if input_file:
whitelist_provided = _get_whitelist_graph(input_case, input_file)
if not deterministic:
graph |= whitelist_provided
else:
graph = whitelist_provided
if not deterministic:
units_graph = _get_whitelist_graph(input_case, file=get_abs_path("data/measures/measurements.tsv"))
graph |= units_graph
self.graph = graph
self.final_graph = convert_space(self.graph).optimize()
self.fst = (pynutil.insert("name: \"") + self.final_graph + pynutil.insert("\"")).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/whitelist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.fr.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.fr.taggers.decimals import DecimalFst
from nemo_text_processing.text_normalization.fr.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.fr.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.fr.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.fr.taggers.word import WordFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finate State aRchive (FAR) File.
More details to deployment at NeMo-text-processing/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = False,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_fr_tn_{deterministic}_deterministic{whitelist_file}.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars. This might take some time...")
self.cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = self.cardinal.fst
self.ordinal = OrdinalFst(cardinal=self.cardinal, deterministic=deterministic)
ordinal_graph = self.ordinal.fst
self.decimal = DecimalFst(cardinal=self.cardinal, deterministic=deterministic)
decimal_graph = self.decimal.fst
self.fraction = FractionFst(cardinal=self.cardinal, ordinal=self.ordinal, deterministic=deterministic)
fraction_graph = self.fraction.fst
word_graph = WordFst(deterministic=deterministic).fst
self.whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist)
whitelist_graph = self.whitelist.fst
punct_graph = PunctuationFst(deterministic=deterministic).fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(word_graph, 200)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, NEMO_SIGMA, GraphFst, insert_space
from nemo_text_processing.text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"1000" -> cardinal { integer: "mille" }
"2,000,000" -> cardinal { integer: "deux millions" }
This grammar covers from single digits to hundreds of billions ("milliards" in French).
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
# Single digits
zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
digits = pynini.string_file(get_abs_path("data/numbers/digit.tsv")) # 1 to 9
digits_no_one = (NEMO_DIGIT - "1") @ digits
one = "1" @ digits
# Double digits
ten = pynini.string_file(get_abs_path("data/numbers/ten.tsv"))
teens = pynini.string_file(get_abs_path("data/numbers/teens.tsv")) # 11 to 19
teens_no_one = (pynini.accep("1") + (NEMO_DIGIT - "1")) @ teens
# Simple tens
tens = pynini.string_file(get_abs_path("data/numbers/tens_simple.tsv"))
ties_simple_unit = pynutil.insert("-") + digits_no_one
ties_simple_one = insert_space + pynini.cross("1", "et un")
ties_simple = tens + (pynutil.delete("0") | (ties_simple_unit | ties_simple_one)) # 20 to 69
# Complex tens
seventy = pynini.string_file(get_abs_path("data/numbers/seventy.tsv"))
seventy_unit = pynutil.insert("-") + ((pynutil.insert("1") + NEMO_DIGIT) @ teens_no_one)
seventy_one = insert_space + pynini.cross("1", "et onze")
seventies = seventy + (pynini.cross("0", "-dix") | (seventy_unit | seventy_one)) # 70 to 79
eighty = pynini.string_file(get_abs_path("data/numbers/eighty.tsv"))
eighties = eighty + (pynini.cross("0", "s") | (pynutil.insert("-") + digits)) # 80 to 89
ninety = pynini.string_file(get_abs_path("data/numbers/ninety.tsv"))
nineties_unit = pynutil.insert("-") + ((pynutil.insert("1") + NEMO_DIGIT) @ teens)
nineties = ninety + (pynini.cross("0", "-dix") | nineties_unit) # 90 to 99
all_double_digits = ten | teens | ties_simple | seventies | eighties | nineties
self.all_double_digits = all_double_digits
one_to_all_tens = digits | all_double_digits
self.one_to_all_tens = one_to_all_tens.optimize()
# Hundreds
hundreds_parts = (pynutil.delete("0") + insert_space + digits) | (insert_space + all_double_digits)
one_hundreds = pynini.cross("1", "cent") + (pynutil.delete("00") | hundreds_parts)
multiple_hundreds = (digits_no_one + insert_space + pynutil.insert("cent")) + (
pynini.cross("00", "s") | hundreds_parts
)
all_hundreds = one_hundreds | multiple_hundreds
self.all_hundreds = all_hundreds
# Thousands
delete_separator = pynini.closure(pynutil.delete(" "), 0, 1)
one_thousand = pynini.cross("1", "mille") + delete_separator
other_thousands = (
(digits_no_one | all_double_digits | all_hundreds)
+ insert_space
+ pynutil.insert("mille")
+ delete_separator
)
all_thousands = (
((one_thousand | other_thousands) + pynutil.delete("000"))
| (one_thousand + pynutil.delete("00") + insert_space + (pynini.cross("1", "et un") | digits_no_one))
| (other_thousands + pynutil.delete("00") + insert_space + digits)
| ((one_thousand | other_thousands) + pynutil.delete("0") + insert_space + all_double_digits)
| ((one_thousand | other_thousands) + insert_space + all_hundreds)
)
# Millions
digits_to_hundreds_no_one = digits_no_one | all_double_digits | all_hundreds
digits_to_thousands_no_one = digits_no_one | all_double_digits | all_hundreds | all_thousands
millions_components = pynini.closure(delete_separator + pynini.closure(NEMO_DIGIT, 3), 2)
delete_zeros = pynini.closure(pynutil.delete("0"), 0, 6)
all_millions = (
one + insert_space + pynutil.insert("million")
| (digits_to_hundreds_no_one + insert_space + pynutil.insert("millions"))
) + (
millions_components
@ (
delete_zeros
+ pynini.closure(insert_space + (digits_to_thousands_no_one | pynini.cross("1", "et un")), 0, 1)
)
)
# Billions
digits_to_millions_no_one = digits_to_thousands_no_one | all_millions
billions_components = pynini.closure(delete_separator + pynini.closure(NEMO_DIGIT, 3), 3)
delete_zeros = pynini.closure(pynutil.delete("0"), 0, 9)
all_billions = (
one + insert_space + pynutil.insert("milliard")
| (digits_to_hundreds_no_one + insert_space + pynutil.insert("milliards"))
) + (
billions_components
@ (
delete_zeros
+ pynini.closure(insert_space + (digits_to_millions_no_one | pynini.cross("1", "et un")), 0, 1)
)
)
# All Numbers Union
final_graph_masc = zero | one_to_all_tens | all_hundreds | all_thousands | all_millions | all_billions
# Adding adjustment for fem gender (choice of gender will be random)
final_graph_fem = final_graph_masc | (
final_graph_masc @ (pynini.closure(NEMO_SIGMA) + pynini.cross("un", "une"))
)
final_graph = final_graph_fem | final_graph_masc
self.all_nums_no_tokens = final_graph
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + final_graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/cardinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
quantities = pynini.string_file(get_abs_path("data/numbers/quantities.tsv"))
digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
def get_quantity(decimal_graph: "pynini.FstLike", cardinal_graph: "pynini.FstLike") -> "pynini.FstLike":
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. 2 millions -> integer_part: "deux" quantity: "millions"
e.g. 2,4 millions -> integer_part: "deux" fractional_part: "quatre" quantity: "millions"
Args:
decimal_graph: DecimalFST
cardinal_graph: CardinalFST
"""
delete_separator = pynini.closure(pynutil.delete(" "), 0, 1)
numbers = pynini.closure(NEMO_DIGIT, 1, 6) @ cardinal_graph
numbers = pynini.cdrewrite(pynutil.delete(delete_separator), "", "", NEMO_SIGMA) @ numbers
res = (
pynutil.insert('integer_part: "')
+ numbers
+ pynutil.insert('"')
+ NEMO_SPACE
+ pynutil.insert('quantity: "')
+ quantities
+ pynutil.insert('"')
)
res |= decimal_graph + NEMO_SPACE + pynutil.insert('quantity: "') + quantities + pynutil.insert('"')
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
-11,406 millions -> decimal { negative: "true" integer_part: "onze" fractional_part: "quatre cent six" quantity: "millions" preserve_order: true }
114 billions -> decimal { integer_part: "cent quatorze" quantity: "billions" preserve_order: true }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
graph_digit = digit | zero
if not deterministic:
graph = pynini.union(graph_digit, cardinal.all_hundreds, cardinal.one_to_all_tens)
graph += pynini.closure(insert_space + graph)
else:
# General pattern is 1-3 digits: map as cardinal, default to tens followed by digits otherwise \
graph = pynini.union(
pynutil.add_weight(graph_digit + pynini.closure(insert_space + zero), -0.00001),
pynutil.add_weight(cardinal.all_double_digits + pynini.closure(insert_space + zero), -0.00002),
pynutil.add_weight(cardinal.all_hundreds + pynini.closure(insert_space + zero), 0.00001),
pynutil.add_weight(
cardinal.all_double_digits
+ pynini.closure(insert_space + cardinal.all_double_digits, 1)
+ pynini.closure(insert_space + zero, 0, 1)
+ (pynini.closure(insert_space + graph_digit, 0, 1) | pynini.closure(insert_space + zero, 0)),
-0.00002,
), # Read out as tens and a possible trailing digit or zeroes
zero
+ pynini.closure(insert_space + zero)
+ pynini.closure(insert_space + graph_digit), # For cases such as "1,001"
)
graph = graph.optimize()
delete_separator = pynutil.delete(",")
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1)
graph_fractional = pynutil.insert('fractional_part: "') + graph + pynutil.insert('"')
integers = cardinal.all_nums_no_tokens
graph_integer = pynutil.insert('integer_part: "') + integers + pynutil.insert('"')
final_graph_wo_sign = graph_integer + delete_separator + insert_space + graph_fractional
final_graph_wo_negative = final_graph_wo_sign | get_quantity(final_graph_wo_sign, integers).optimize()
self.final_graph_wo_negative = final_graph_wo_negative
final_graph = optional_graph_negative + final_graph_wo_negative
self.final_graph = final_graph
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/decimals.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying word.
e.g. bonjour -> tokens { name: "bonjour" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/taggers/word.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. tokens { fraction { integer: "treinta y tres" numerator: "cuatro" denominator: "quinto" } } ->
treinta y tres y cuatro quintos
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="verbalize", deterministic=deterministic)
numerator = pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
denominator = ordinal.graph
irregular_denominators = pynini.string_file(get_abs_path("data/fractions/irregular_denominators.tsv"))
denominator = pynini.union(pynutil.add_weight(denominator @ irregular_denominators, -0.01), denominator)
denominator = (pynini.cross("denominator:", "integer:") + pynini.closure(NEMO_SIGMA)) @ denominator
irregular_half = pynini.closure(NEMO_SIGMA) + pynini.cross("et un demi", "et demi")
optional_integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
optional_integer = pynini.closure(
optional_integer + pynini.accep(NEMO_SPACE) + pynutil.insert("et") + insert_space, 0, 1
)
optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "moins") + insert_space, 0, 1)
final_graph = optional_sign + optional_integer + numerator + pynini.accep(NEMO_SPACE) + denominator
final_graph = pynini.union(pynutil.add_weight(final_graph @ irregular_half, -0.01), final_graph)
self.fst = self.delete_tokens(final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, NEMO_NOT_QUOTE, NEMO_SPACE, GraphFst
from nemo_text_processing.text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinals
e.g. ordinal { integer: "deux" morphosyntactic_features: "ième" } -> "deuxième"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, strip_dashes: bool = False):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
ones = pynini.cross("un", "prem")
irregular_numbers = pynini.string_file(get_abs_path("data/ordinals/irregular_numbers.tsv"))
irregular_numbers = pynini.closure(pynini.closure(NEMO_NOT_QUOTE, 1) + NEMO_SPACE) + irregular_numbers
exceptions = pynini.project(irregular_numbers, "input")
exception_suffix = (
pynutil.delete(" morphosyntactic_features: \"ième")
+ pynini.closure(pynini.accep("s"), 0, 1)
+ pynutil.delete("\"")
)
irregular_numbers_graph = (
pynutil.delete("integer: \"") + irregular_numbers + pynutil.delete("\"") + exception_suffix
)
numbers = pynini.closure(NEMO_NOT_QUOTE, 1)
numbers = pynini.difference(numbers, exceptions)
if strip_dashes:
remove_dashes = pynini.closure(NEMO_ALPHA, 1) + pynini.cross("-", " ") + pynini.closure(NEMO_ALPHA, 1)
remove_dashes = pynini.closure(remove_dashes, 0)
numbers = pynini.union(numbers, pynutil.add_weight(numbers @ remove_dashes, -0.0001))
regular_ordinals = pynini.union(numbers, pynutil.add_weight(ones, -0.0001))
regular_ordinals_graph = (
pynutil.delete("integer: \"")
+ regular_ordinals
+ pynutil.delete("\"")
+ pynutil.delete(" morphosyntactic_features: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
final_graph = pynini.union(regular_ordinals_graph, irregular_numbers_graph)
self.graph = final_graph
self.fst = self.delete_tokens(final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/ordinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.fr.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.fr.verbalizers.decimals import DecimalFst
from nemo_text_processing.text_normalization.fr.verbalizers.fraction import FractionFst
from nemo_text_processing.text_normalization.fr.verbalizers.ordinal import OrdinalFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo-text-processing/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(deterministic=deterministic)
ordinal_graph = ordinal.fst
decimal = DecimalFst(deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(ordinal=ordinal, deterministic=deterministic)
fraction_graph = fraction.fst
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
graph = cardinal_graph | decimal_graph | ordinal_graph | fraction_graph | whitelist_graph
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/verbalize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.fr.verbalizers.verbalize import VerbalizeFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"fr_tn_{deterministic}_deterministic_verbalizer.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["verbalize"]
logging.info(f'VerbalizeFinalFst graph was restored from {far_file}.')
else:
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"verbalize": self.fst})
logging.info(f"VerbalizeFinalFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/verbalize_final.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, insert_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinals
e.g. cardinal { negative: "true" integer: "un milliard et un" } -> "moins un milliard et un"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "moins") + insert_space, 0, 1)
number = pynutil.delete("integer: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
final_graph = optional_sign + number
self.fst = self.delete_tokens(final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_preserve_order,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
decimal { negative: "true" integer_part: "onze" fractional_part: "quatre cent six" quantity: "millions" preserve_order: true } -> moins onze virgule quatre cent six millions
decimal { integer_part: "cent quatorze" quantity: "billions" preserve_order: true } -> cent quatorze billions
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "moins ") + delete_space, 0, 1)
integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
fractional_default = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
conjunction = pynutil.insert(" virgule ")
fractional = conjunction + fractional_default
quantity = (
delete_space
+ insert_space
+ pynutil.delete("quantity: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(quantity, 0, 1)
graph = optional_sign + pynini.union(
(integer + quantity), (integer + delete_space + fractional + optional_quantity)
)
self.numbers_only_quantity = (
optional_sign
+ pynini.union((integer + quantity), (integer + delete_space + fractional + quantity)).optimize()
)
graph += delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/verbalizers/decimals.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/data/numbers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/data/ordinals/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/fr/data/fractions/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, NEMO_SPACE
from nemo_text_processing.text_normalization.es import LOCALIZATION
from nemo_text_processing.text_normalization.es.utils import get_abs_path, load_labels
from pynini.lib import pynutil
digits = pynini.project(pynini.string_file(get_abs_path("data/numbers/digit.tsv")), "input")
tens = pynini.project(pynini.string_file(get_abs_path("data/numbers/ties.tsv")), "input")
teens = pynini.project(pynini.string_file(get_abs_path("data/numbers/teen.tsv")), "input")
twenties = pynini.project(pynini.string_file(get_abs_path("data/numbers/twenties.tsv")), "input")
hundreds = pynini.project(pynini.string_file(get_abs_path("data/numbers/hundreds.tsv")), "input")
accents = pynini.string_map([("á", "a"), ("é", "e"), ("í", "i"), ("ó", "o"), ("ú", "u")])
if LOCALIZATION == "am": # Setting localization for central and northern america formatting
cardinal_separator = pynini.string_map([",", NEMO_SPACE])
decimal_separator = pynini.accep(".")
else:
cardinal_separator = pynini.string_map([".", NEMO_SPACE])
decimal_separator = pynini.accep(",")
ones = pynini.union("un", "ún")
fem_ones = pynini.union(pynini.cross("un", "una"), pynini.cross("ún", "una"), pynini.cross("uno", "una"))
one_to_one_hundred = pynini.union(digits, "uno", tens, teens, twenties, tens + pynini.accep(" y ") + digits)
fem_hundreds = hundreds @ pynini.cdrewrite(pynini.cross("ientos", "ientas"), "", "", NEMO_SIGMA)
def strip_accent(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Converts all accented vowels to non-accented equivalents
Args:
fst: Any fst. Composes vowel conversion onto fst's output strings
"""
return fst @ pynini.cdrewrite(accents, "", "", NEMO_SIGMA)
def shift_cardinal_gender(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Applies gender conversion rules to a cardinal string. These include: rendering all masculine forms of "uno" (including apocopated forms) as "una" and
Converting all gendered numbers in the hundreds series (200,300,400...) to feminine equivalent (e.g. "doscientos" -> "doscientas"). Conversion only applies
to value place for <1000 and multiple of 1000. (e.g. "doscientos mil doscientos" -> "doscientas mil doscientas".) For place values greater than the thousands, there
is no gender shift as the higher powers of ten ("millones", "billones") are masculine nouns and any conversion would be formally
ungrammatical.
e.g.
"doscientos" -> "doscientas"
"doscientos mil" -> "doscientas mil"
"doscientos millones" -> "doscientos millones"
"doscientos mil millones" -> "doscientos mil millones"
"doscientos millones doscientos mil doscientos" -> "doscientos millones doscientas mil doscientas"
Args:
fst: Any fst. Composes conversion onto fst's output strings
"""
before_mil = (
NEMO_SPACE
+ (pynini.accep("mil") | pynini.accep("milésimo"))
+ pynini.closure(NEMO_SPACE + hundreds, 0, 1)
+ pynini.closure(NEMO_SPACE + one_to_one_hundred, 0, 1)
+ pynini.union(pynini.accep("[EOS]"), pynini.accep("\""), decimal_separator)
)
before_double_digits = pynini.closure(NEMO_SPACE + one_to_one_hundred, 0, 1) + pynini.union(
pynini.accep("[EOS]"), pynini.accep("\"")
)
fem_allign = pynini.cdrewrite(fem_hundreds, "", before_mil, NEMO_SIGMA) # doscientas mil dosciento
fem_allign @= pynini.cdrewrite(fem_hundreds, "", before_double_digits, NEMO_SIGMA) # doscientas mil doscienta
fem_allign @= pynini.cdrewrite(
fem_ones, "", pynini.union("[EOS]", "\"", decimal_separator), NEMO_SIGMA
) # If before a quote or EOS, we know it's the end of a string
return fst @ fem_allign
def shift_number_gender(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Performs gender conversion on all verbalized numbers in output. All values in the hundreds series (200,300,400) are changed to
feminine gender (e.g. "doscientos" -> "doscientas") and all forms of "uno" (including apocopated forms) are converted to "una".
This has no boundary restriction and will perform shift across all values in output string.
e.g.
"doscientos" -> "doscientas"
"doscientos millones" -> "doscientas millones"
"doscientos millones doscientos" -> "doscientas millones doscientas"
Args:
fst: Any fst. Composes conversion onto fst's output strings
"""
fem_allign = pynini.cdrewrite(fem_hundreds, "", "", NEMO_SIGMA)
fem_allign @= pynini.cdrewrite(
fem_ones, "", pynini.union(NEMO_SPACE, pynini.accep("[EOS]"), pynini.accep("\"")), NEMO_SIGMA
) # If before a quote or EOS, we know it's the end of a string
return fst @ fem_allign
def strip_cardinal_apocope(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Reverts apocope on cardinal strings in line with formation rules. e.g. "un" -> "uno". Due to cardinal formation rules, this in effect only
affects strings where the final value is a variation of "un".
e.g.
"un" -> "uno"
"veintiún" -> "veintiuno"
Args:
fst: Any fst. Composes conversion onto fst's output strings
"""
# Since cardinals use apocope by default for large values (e.g. "millón"), this only needs to act on the last instance of one
strip = pynini.cross("un", "uno") | pynini.cross("ún", "uno")
strip = pynini.cdrewrite(strip, "", pynini.union("[EOS]", "\""), NEMO_SIGMA)
return fst @ strip
def add_cardinal_apocope_fem(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Adds apocope on cardinal strings in line with stressing rules. e.g. "una" -> "un". This only occurs when "una" precedes a stressed "a" sound in formal speech. This is not predictable
with text string, so is included for non-deterministic cases.
e.g.
"una" -> "un"
"veintiuna" -> "veintiun"
Args:
fst: Any fst. Composes conversion onto fst's output strings
"""
# Since the stress trigger follows the cardinal string and only affects the preceding sound, this only needs to act on the last instance of one
strip = pynini.cross("una", "un") | pynini.cross("veintiuna", "veintiún")
strip = pynini.cdrewrite(strip, "", pynini.union("[EOS]", "\""), NEMO_SIGMA)
return fst @ strip
def roman_to_int(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Alters given fst to convert Roman integers (lower and upper cased) into Arabic numerals. Valid for values up to 1000.
e.g.
"V" -> "5"
"i" -> "1"
Args:
fst: Any fst. Composes fst onto Roman conversion outputs.
"""
def _load_roman(file: str):
roman = load_labels(get_abs_path(file))
roman_numerals = [(x, y) for x, y in roman] + [(x.upper(), y) for x, y in roman]
return pynini.string_map(roman_numerals)
digit = _load_roman("data/roman/digit.tsv")
ties = _load_roman("data/roman/ties.tsv")
hundreds = _load_roman("data/roman/hundreds.tsv")
thousands = _load_roman("data/roman/thousands.tsv")
graph = (
digit
| ties + (digit | pynutil.add_weight(pynutil.insert("0"), 0.01))
| (
hundreds
+ (ties | pynutil.add_weight(pynutil.insert("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.insert("0"), 0.01))
)
| (
thousands
+ (hundreds | pynutil.add_weight(pynutil.insert("0"), 0.01))
+ (ties | pynutil.add_weight(pynutil.insert("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.insert("0"), 0.01))
)
).optimize()
return graph @ fst
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/graph_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOCALIZATION = "eu" # Set to am for alternate formatting
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path)
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
time_zones = pynini.string_file(get_abs_path("data/time/time_zone.tsv"))
suffix = pynini.string_file(get_abs_path("data/time/time_suffix.tsv"))
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time, e.g.
"02:15 est" -> time { hours: "dos" minutes: "quince" zone: "e s t"}
"2 h" -> time { hours: "dos" }
"9 h" -> time { hours: "nueve" }
"02:15:10 h" -> time { hours: "dos" minutes: "quince" seconds: "diez"}
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
delete_time_delimiter = pynutil.delete(pynini.union(".", ":"))
fem_one = pynini.string_map([("un", "una"), ("ún", "una")])
change_one = pynini.cdrewrite(fem_one, "", "", NEMO_SIGMA)
cardinal_graph_fem = cardinal.graph @ change_one
cardinal_graph_masc = cardinal.graph
day_suffix = pynutil.insert("suffix: \"") + suffix + pynutil.insert("\"")
day_suffix = delete_space + insert_space + day_suffix
delete_hora_suffix = delete_space + insert_space + pynutil.delete("h")
delete_minute_suffix = delete_space + insert_space + (pynutil.delete("min") | pynutil.delete("m"))
delete_second_suffix = delete_space + insert_space + (pynutil.delete("seg") | pynutil.delete("s"))
labels_hour_24 = [
str(x) for x in range(0, 25)
] # Can see both systems. Twelve hour requires am/pm for ambiguity resolution
labels_hour_12 = [str(x) for x in range(1, 13)]
labels_minute_or_second = [str(x) for x in range(1, 60)]
delete_leading_zero_to_double_digit = (
pynini.closure(pynutil.delete("0") | (NEMO_DIGIT - "0"), 0, 1) + NEMO_DIGIT
)
graph_24 = (
pynini.closure(NEMO_DIGIT, 1, 2) @ delete_leading_zero_to_double_digit @ pynini.union(*labels_hour_24)
)
graph_12 = (
pynini.closure(NEMO_DIGIT, 1, 2) @ delete_leading_zero_to_double_digit @ pynini.union(*labels_hour_12)
)
graph_minute_or_second = (
pynini.closure(NEMO_DIGIT, 1, 2)
@ delete_leading_zero_to_double_digit
@ pynini.union(*labels_minute_or_second)
)
graph_hour_24 = graph_24 @ cardinal_graph_fem
graph_hour_12 = graph_12 @ cardinal_graph_fem
graph_minute_or_second @= cardinal_graph_masc
final_graph_hour_only_24 = (
pynutil.insert("hours: \"") + graph_hour_24 + pynutil.insert("\"") + delete_hora_suffix
)
final_graph_hour_only_12 = pynutil.insert("hours: \"") + graph_hour_12 + pynutil.insert("\"") + day_suffix
final_graph_hour_24 = pynutil.insert("hours: \"") + graph_hour_24 + pynutil.insert("\"")
final_graph_hour_12 = pynutil.insert("hours: \"") + graph_hour_12 + pynutil.insert("\"")
final_graph_minute = pynutil.insert("minutes: \"") + graph_minute_or_second + pynutil.insert("\"")
final_graph_second = pynutil.insert("seconds: \"") + graph_minute_or_second + pynutil.insert("\"")
# handle suffixes like gmt or utc+1
utc_one = pynini.string_map([("un", "uno"), ("ún", "uno")])
change_utc_one = pynini.cdrewrite(utc_one, "", "", NEMO_SIGMA)
utc_cardinal_graph = cardinal.graph @ change_utc_one
utc_or_gmt_numbers = (NEMO_DIGIT @ utc_cardinal_graph) | (
graph_hour_24 + delete_time_delimiter + pynutil.insert(" ") + graph_minute_or_second
)
utc_or_gmt_diff = (
pynutil.delete(pynini.closure(" ", 0, 1))
+ pynutil.insert(" ")
+ pynini.string_map([("+", "más"), ("-", "menos")])
+ pynutil.delete(pynini.closure(" ", 0, 1))
+ pynutil.insert(" ")
+ utc_or_gmt_numbers
)
time_zone_graph = time_zones + pynini.closure(utc_or_gmt_diff, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space + insert_space + pynutil.insert("zone: \"") + time_zone_graph + pynutil.insert("\""), 0, 1,
)
# 02.30 h
graph_hm = (
final_graph_hour_24
+ delete_time_delimiter
+ (pynutil.delete("00") | (insert_space + final_graph_minute))
+ pynini.closure(
(delete_time_delimiter + (pynutil.delete("00") | (insert_space + final_graph_second))), 0, 1
)
+ pynini.closure(delete_hora_suffix, 0, 1) # 2.30 is valid if unambiguous
+ final_time_zone_optional
)
# 2 h 30 min
graph_hm |= (
final_graph_hour_24
+ delete_hora_suffix
+ delete_space
+ (pynutil.delete("00") | (insert_space + final_graph_minute))
+ delete_minute_suffix
+ pynini.closure(
(delete_space + (pynutil.delete("00") | (insert_space + final_graph_second)) + delete_second_suffix),
0,
1,
)
+ final_time_zone_optional
)
# 2.30 a. m. (Only for 12 hour clock)
graph_hm |= (
final_graph_hour_12
+ delete_time_delimiter
+ (pynutil.delete("00") | (insert_space + final_graph_minute))
+ pynini.closure(
(delete_time_delimiter + (pynutil.delete("00") | (insert_space + final_graph_second))), 0, 1
)
+ day_suffix
+ final_time_zone_optional
)
graph_h = (
pynini.union(final_graph_hour_only_24, final_graph_hour_only_12) + final_time_zone_optional
) # Should always have a time indicator, else we'll pass to cardinals
if not deterministic:
# This includes alternate vocalization (hour menos min, min para hour), here we shift the times and indicate a `style` tag
hour_shift_24 = pynini.invert(pynini.string_file(get_abs_path("data/time/hour_to_24.tsv")))
hour_shift_12 = pynini.invert(pynini.string_file(get_abs_path("data/time/hour_to_12.tsv")))
minute_shift = pynini.string_file(get_abs_path("data/time/minute_to.tsv"))
graph_hour_to_24 = graph_24 @ hour_shift_24 @ cardinal_graph_fem
graph_hour_to_12 = graph_12 @ hour_shift_12 @ cardinal_graph_fem
graph_minute_to = minute_shift @ graph_minute_or_second
final_graph_hour_to_24 = pynutil.insert("hours: \"") + graph_hour_to_24 + pynutil.insert("\"")
final_graph_hour_to_12 = pynutil.insert("hours: \"") + graph_hour_to_12 + pynutil.insert("\"")
final_graph_minute_to = pynutil.insert("minutes: \"") + graph_minute_to + pynutil.insert("\"")
graph_menos = pynutil.insert(" style: \"1\"")
graph_para = pynutil.insert(" style: \"2\"")
final_graph_style = graph_menos | graph_para
# 02.30 h (omitting seconds since a bit awkward)
graph_hm |= (
final_graph_hour_to_24
+ delete_time_delimiter
+ insert_space
+ final_graph_minute_to
+ pynini.closure(delete_hora_suffix, 0, 1) # 2.30 is valid if unambiguous
+ final_time_zone_optional
+ final_graph_style
)
# 2 h 30 min
graph_hm |= (
final_graph_hour_to_24
+ delete_hora_suffix
+ delete_space
+ insert_space
+ final_graph_minute_to
+ delete_minute_suffix
+ final_time_zone_optional
+ final_graph_style
)
# 2.30 a. m. (Only for 12 hour clock)
graph_hm |= (
final_graph_hour_to_12
+ delete_time_delimiter
+ insert_space
+ final_graph_minute_to
+ day_suffix
+ final_time_zone_optional
+ final_graph_style
)
final_graph = graph_hm | graph_h
if deterministic:
final_graph = final_graph + pynutil.insert(" preserve_order: true")
final_graph = final_graph.optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/time.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_NON_BREAKING_SPACE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
convert_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
unit = pynini.string_file(get_abs_path("data/measures/measurements.tsv"))
unit_complex = pynini.string_file(get_abs_path("data/measures/measurements_complex.tsv"))
unit_plural_fem = pynini.string_file(get_abs_path("data/measures/measurements_plural_fem.tsv"))
unit_plural_masc = pynini.string_file(get_abs_path("data/measures/measurements_plural_masc.tsv"))
math_symbols = pynini.string_file(get_abs_path("data/measures/math_symbols.tsv"))
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure, e.g.
"2,4 g" -> measure { cardinal { integer_part: "dos" fractional_part: "cuatro" units: "gramos" preserve_order: true } }
"1 g" -> measure { cardinal { integer: "un" units: "gramo" preserve_order: true } }
"1 millón g" -> measure { cardinal { integer: "un quantity: "millón" units: "gramos" preserve_order: true } }
This class also converts words containing numbers and letters
e.g. "a-8" —> "a ocho"
e.g. "1,2-a" —> "uno coma dos a"
Args:
cardinal: CardinalFst
decimal: DecimalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
unit_singular = unit
unit_plural = unit_singular @ (unit_plural_fem | unit_plural_masc)
graph_unit_singular = convert_space(unit_singular)
graph_unit_plural = convert_space(unit_plural)
optional_graph_negative = pynini.closure("-", 0, 1)
unit_plural = pynutil.insert('units: "') + graph_unit_plural + pynutil.insert('"')
unit_singular_graph = pynutil.insert('units: "') + graph_unit_singular + pynutil.insert('"')
unit_complex_plural = unit_complex @ (unit_plural_fem | unit_plural_masc)
graph_unit_complex = convert_space(unit_complex)
graph_unit_complex_plural = convert_space(unit_complex_plural)
graph_unit_denominator = (
pynini.cross("/", "por") + pynutil.insert(NEMO_NON_BREAKING_SPACE) + graph_unit_singular
)
optional_unit_denominator = pynini.closure(
pynutil.insert(NEMO_NON_BREAKING_SPACE) + graph_unit_denominator, 0, 1,
)
complex_unit_singular_graph = (
pynutil.insert('units: "')
+ ((graph_unit_singular + optional_unit_denominator) | graph_unit_denominator | graph_unit_complex)
+ pynutil.insert('"')
)
complex_unit_plural_graph = (
pynutil.insert('units: "')
+ ((graph_unit_plural + optional_unit_denominator) | graph_unit_denominator | graph_unit_complex_plural)
+ pynutil.insert('"')
)
subgraph_decimal = (
decimal.fst + insert_space + pynini.closure(NEMO_SPACE, 0, 1) + (unit_plural | complex_unit_plural_graph)
)
subgraph_cardinal = (
(optional_graph_negative + (NEMO_SIGMA - "1")) @ cardinal.fst
+ insert_space
+ pynini.closure(delete_space, 0, 1)
+ (unit_plural | complex_unit_plural_graph)
)
subgraph_cardinal |= (
(optional_graph_negative + pynini.accep("1")) @ cardinal.fst
+ insert_space
+ pynini.closure(delete_space, 0, 1)
+ (unit_singular_graph | complex_unit_singular_graph)
)
subgraph_fraction = (
fraction.fst
+ insert_space
+ pynini.closure(delete_space, 0, 1)
+ (unit_plural | complex_unit_singular_graph)
)
decimal_times = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(' } units: "')
+ pynini.union("x", "X")
+ pynutil.insert('"')
)
cardinal_times = (
pynutil.insert('cardinal { integer: "')
+ strip_cardinal_apocope(cardinal_graph)
+ pynutil.insert('" } units: "')
+ pynini.union("x", "X")
+ pynutil.insert('"')
)
cardinal_dash_alpha = (
pynutil.insert('cardinal { integer: "')
+ strip_cardinal_apocope(cardinal_graph)
+ pynutil.delete("-")
+ pynutil.insert('" } units: "')
+ pynini.closure(NEMO_ALPHA, 1)
+ pynutil.insert('"')
)
decimal_dash_alpha = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.delete("-")
+ pynutil.insert(' } units: "')
+ pynini.closure(NEMO_ALPHA, 1)
+ pynutil.insert("\"")
)
alpha_dash_cardinal = (
pynutil.insert('units: "')
+ pynini.closure(NEMO_ALPHA, 1)
+ pynutil.delete("-")
+ pynutil.insert('"')
+ pynutil.insert(' cardinal { integer: "')
+ cardinal_graph
+ pynutil.insert('" } preserve_order: true')
)
alpha_dash_decimal = (
pynutil.insert('units: "')
+ pynini.closure(NEMO_ALPHA, 1)
+ pynutil.delete("-")
+ pynutil.insert('"')
+ pynutil.insert(" decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(" } preserve_order: true")
)
delimiter = pynini.accep(" ") | pynutil.insert(" ")
math_numbers = cardinal_graph | pynutil.add_weight(cardinal_graph @ pynini.cross("un", "uno"), -1) | NEMO_ALPHA
math = (
math_numbers
+ pynini.closure(delimiter + math_symbols + delimiter + math_numbers, 1)
+ delimiter
+ pynini.cross("=", "es igual a")
+ delimiter
+ math_numbers
)
math |= (
math_numbers
+ delimiter
+ pynini.cross("=", "es igual a")
+ math_numbers
+ pynini.closure(delimiter + math_symbols + delimiter + math_numbers, 1)
)
math = (
pynutil.insert("units: \"math\" cardinal { integer: \"")
+ math
+ pynutil.insert("\" } preserve_order: true")
)
final_graph = (
subgraph_decimal
| subgraph_cardinal
| cardinal_dash_alpha
| alpha_dash_cardinal
| decimal_dash_alpha
| subgraph_fraction
| decimal_times
| cardinal_times
| alpha_dash_decimal
| math
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
)
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
ordinal_exceptions = pynini.string_file(get_abs_path("data/fractions/ordinal_exceptions.tsv"))
higher_powers_of_ten = pynini.string_file(get_abs_path("data/fractions/powers_of_ten.tsv"))
fraction_symbols = pynini.string_file(get_abs_path("data/fractions/fraction_symbols.tsv"))
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
"23 4/5" ->
tokens { fraction { integer: "veintitrés" numerator: "cuatro" denominator: "quinto" mophosyntactic_features: "ordinal" } }
Args:
cardinal: CardinalFst
ordinal: OrdinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
ordinal_graph = ordinal.graph
# 2-10 are all ordinals
three_to_ten = pynini.string_map(["2", "3", "4", "5", "6", "7", "8", "9", "10",])
block_three_to_ten = pynutil.delete(three_to_ten) # To block cardinal productions
if not deterministic: # Multiples of tens are sometimes rendered as ordinals
three_to_ten |= pynini.string_map(["20", "30", "40", "50", "60", "70", "80", "90",])
graph_three_to_ten = three_to_ten @ ordinal_graph
graph_three_to_ten @= pynini.cdrewrite(ordinal_exceptions, "", "", NEMO_SIGMA)
# Higher powers of tens (and multiples) are converted to ordinals.
hundreds = pynini.string_map(["100", "200", "300", "400", "500", "600", "700", "800", "900",])
graph_hundreds = hundreds @ ordinal_graph
multiples_of_thousand = ordinal.multiples_of_thousand # So we can have X milésimos
graph_higher_powers_of_ten = (
pynini.closure(ordinal.one_to_one_thousand + NEMO_SPACE, 0, 1)
+ pynini.closure("mil ", 0, 1)
+ pynini.closure(ordinal.one_to_one_thousand + NEMO_SPACE, 0, 1)
) # x millones / x mil millones / x mil z millones
graph_higher_powers_of_ten += higher_powers_of_ten
graph_higher_powers_of_ten = cardinal_graph @ graph_higher_powers_of_ten
graph_higher_powers_of_ten @= pynini.cdrewrite(
pynutil.delete("un "), pynini.accep("[BOS]"), pynini.project(higher_powers_of_ten, "output"), NEMO_SIGMA,
) # we drop 'un' from these ordinals (millionths, not one-millionths)
graph_higher_powers_of_ten = multiples_of_thousand | graph_hundreds | graph_higher_powers_of_ten
block_higher_powers_of_ten = pynutil.delete(
pynini.project(graph_higher_powers_of_ten, "input")
) # For cardinal graph
graph_fractions_ordinals = graph_higher_powers_of_ten | graph_three_to_ten
graph_fractions_ordinals += pynutil.insert(
'" morphosyntactic_features: "ordinal"'
) # We note the root for processing later
# Blocking the digits and hundreds from Cardinal graph
graph_fractions_cardinals = pynini.cdrewrite(
block_three_to_ten | block_higher_powers_of_ten, pynini.accep("[BOS]"), pynini.accep("[EOS]"), NEMO_SIGMA,
)
graph_fractions_cardinals @= NEMO_CHAR.plus @ pynini.cdrewrite(
pynutil.delete("0"), pynini.accep("[BOS]"), pynini.accep("[EOS]"), NEMO_SIGMA,
) # Empty characters become '0' for NEMO_CHAR fst, so need to block
graph_fractions_cardinals @= cardinal_graph
graph_fractions_cardinals += pynutil.insert(
'" morphosyntactic_features: "add_root"'
) # blocking these entries to reduce erroneous possibilities in debugging
if deterministic:
graph_fractions_cardinals = (
pynini.closure(NEMO_DIGIT, 1, 2) @ graph_fractions_cardinals
) # Past hundreds the conventional scheme can be hard to read. For determinism we stop here
graph_denominator = pynini.union(
graph_fractions_ordinals,
graph_fractions_cardinals,
pynutil.add_weight(cardinal_graph + pynutil.insert('"'), 0.001),
) # Last form is simply recording the cardinal. Weighting so last resort
integer = pynutil.insert('integer_part: "') + cardinal_graph + pynutil.insert('"') + NEMO_SPACE
numerator = (
pynutil.insert('numerator: "') + cardinal_graph + (pynini.cross("/", '" ') | pynini.cross(" / ", '" '))
)
denominator = pynutil.insert('denominator: "') + graph_denominator
fraction = (fraction_symbols @ (numerator + denominator)) | (numerator + denominator)
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1)
self.graph = optional_minus_graph + pynini.closure(integer, 0, 1) + fraction
final_graph = self.add_tokens(self.graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/fraction.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import ones
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
123-123-5678 -> { number_part: "uno dos tres uno dos tres cinco seis siete ocho" }.
In Spanish, digits are generally read individually, or as 2-digit numbers,
eg. "123" = "uno dos tres",
"1234" = "doce treinta y cuatro".
This will verbalize sequences of 10 (3+3+4 e.g. 123-456-7890).
9 (3+3+3 e.g. 123-456-789) and 8 (4+4 e.g. 1234-5678) digits.
(we ignore more complicated cases such as "doscientos y dos" or "tres nueves").
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="classify")
# create `single_digits` and `double_digits` graphs as these will be
# the building blocks of possible telephone numbers
single_digits = pynini.invert(graph_digit).optimize() | pynini.cross("0", "cero")
single_digits @= pynini.cdrewrite(pynini.cross(ones, "uno"), "", "", NEMO_SIGMA)
# Any double digit
teen = pynini.invert(graph_teen)
ties = pynini.invert(graph_ties)
twenties = pynini.invert(graph_twenties)
double_digits = teen
double_digits |= ties + (pynutil.delete('0') | (pynutil.insert(" y ") + single_digits))
double_digits |= twenties
# define separators
separators = pynini.union("-", " ", ".")
delete_separator = pynini.closure(pynutil.delete(separators), 0, 1)
# process country codes as '+1' -> country_code: "one"
triple_number = pynini.closure(single_digits + insert_space, 2, 2) + single_digits
country_code = pynini.closure(pynini.cross("+", "más "), 0, 1) + (
single_digits | double_digits | triple_number
)
# add ip and telephone prompts to this tag (as is in EN)
ip_prompts = pynini.string_file(get_abs_path("data/telephone/ip_prompt.tsv"))
telephone_prompts = pynini.string_file(get_abs_path("data/telephone/telephone_prompt.tsv"))
tel_prompt_sequence = telephone_prompts + NEMO_SPACE + pynini.closure(country_code, 0, 1)
country_code_graph = (
pynutil.insert("country_code: \"")
+ (country_code | ip_prompts | tel_prompt_sequence)
+ delete_separator
+ pynutil.insert("\"")
)
# process IP addresses
digit_to_str_graph = single_digits + pynini.closure(pynutil.insert(" ") + single_digits, 0, 2)
ip_graph = digit_to_str_graph + (pynini.cross(".", " punto ") + digit_to_str_graph) ** 3
# process area codes with or without parentheses i.e. "212" or (212)
area_code = (
pynini.closure(pynutil.delete("("), 0, 1)
+ pynini.closure(single_digits + insert_space, 3, 3)
+ pynini.closure(pynutil.delete(")"), 0, 1)
)
# process extensions
delete_ext = pynini.closure(pynutil.delete("ext."), 0, 1)
ext_graph = (
pynutil.insert("extension: \"")
+ delete_separator
+ delete_ext
+ delete_separator
+ pynutil.insert("extensión ")
+ pynini.closure(single_digits + insert_space, 1, 3)
+ single_digits
+ pynutil.insert("\"")
)
# define `ten_digit_graph`, `nine_digit_graph`, `eight_digit_graph`
# which produces telephone numbers spoken (1) only with single digits,
# or (2) spoken with double digits (and sometimes single digits)
# 10-digit option (1): all single digits
ten_digit_graph = (
area_code
+ delete_separator
+ pynini.closure(single_digits + insert_space, 3, 3)
+ delete_separator
+ pynini.closure(single_digits + insert_space, 3, 3)
+ single_digits
)
# 9-digit option (1): all single digits
nine_digit_graph = (
area_code
+ delete_separator
+ pynini.closure(single_digits + insert_space, 3, 3)
+ delete_separator
+ pynini.closure(single_digits + insert_space, 2, 2)
+ single_digits
)
# 8-digit option (1): all single digits
eight_digit_graph = (
pynini.closure(area_code, 0, 1)
+ delete_separator
+ pynini.closure(single_digits + insert_space, 4, 4)
+ delete_separator
+ pynini.closure(single_digits + insert_space, 3, 3)
+ single_digits
)
if not deterministic:
# 10-digit option (2): (1+2) + (1+2) + (2+2) digits
ten_digit_graph |= (
pynini.closure(single_digits + insert_space + double_digits + insert_space + delete_separator, 2, 2)
+ double_digits
+ insert_space
+ double_digits
)
# 9-digit option (2): (1+2) + (1+2) + (1+2) digits
nine_digit_graph |= (
pynini.closure(single_digits + insert_space + double_digits + insert_space + delete_separator, 2, 2)
+ single_digits
+ insert_space
+ double_digits
)
# 8-digit option (2): (2+2) + (2+2) digits
eight_digit_graph |= (
double_digits
+ insert_space
+ double_digits
+ insert_space
+ delete_separator
+ double_digits
+ insert_space
+ double_digits
)
# handle numbers with letters like "1-800-go-u-haul"
num_letter_area_code = area_code @ pynini.cross("ocho cero cero ", "ochocientos ")
number_word = pynini.closure(single_digits | NEMO_ALPHA, 1, 8)
number_words = pynini.closure(number_word + pynini.cross(separators, " "), 0, 2) + number_word
nums_w_letters_graph = pynutil.add_weight(num_letter_area_code + delete_separator + number_words, 0.01)
number_part = pynini.union(
ten_digit_graph, nine_digit_graph, eight_digit_graph, nums_w_letters_graph, ip_graph
)
number_part = pynutil.insert("number_part: \"") + number_part + pynutil.insert("\"")
graph = (
pynini.closure(country_code_graph + delete_separator + insert_space, 0, 1)
+ number_part
+ pynini.closure(delete_separator + insert_space + ext_graph, 0, 1)
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import roman_to_int, strip_accent
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
digit = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/digit.tsv")))
teens = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/teen.tsv")))
twenties = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/twenties.tsv")))
ties = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/ties.tsv")))
hundreds = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/hundreds.tsv")))
gender_suffix = pynini.string_file(get_abs_path("data/ordinals/gender_suffix.tsv"))
def get_one_to_one_thousand(cardinal: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Produces an acceptor for verbalizations of all numbers from 1 to 1000. Needed for ordinals and fractions.
Args:
cardinal: CardinalFst
Returns:
fst: A pynini.FstLike object
"""
numbers = pynini.string_map([str(_) for _ in range(1, 1000)]) @ cardinal
return pynini.project(numbers, "output").optimize()
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
"21.º" -> ordinal { integer: "vigésimo primero" morphosyntactic_features: "gender_masc" }
This class converts ordinal up to the millionth (millonésimo) order (exclusive).
This FST also records the ending of the ordinal (called "morphosyntactic_features"):
either as gender_masc, gender_fem, or apocope. Also introduces plural feature for non-deterministic graphs.
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify")
cardinal_graph = cardinal.graph
graph_digit = digit.optimize()
graph_teens = teens.optimize()
graph_ties = ties.optimize()
graph_twenties = twenties.optimize()
graph_hundreds = hundreds.optimize()
if not deterministic:
# Some alternative derivations
graph_ties = graph_ties | pynini.cross("sesenta", "setuagésimo")
graph_teens = graph_teens | pynini.cross("once", "decimoprimero")
graph_teens |= pynini.cross("doce", "decimosegundo")
graph_digit = graph_digit | pynini.cross("nueve", "nono")
graph_digit |= pynini.cross("siete", "sétimo")
graph_tens_component = (
graph_teens
| (graph_ties + pynini.closure(pynini.cross(" y ", NEMO_SPACE) + graph_digit, 0, 1))
| graph_twenties
)
graph_hundred_component = pynini.union(
graph_hundreds + pynini.closure(NEMO_SPACE + pynini.union(graph_tens_component, graph_digit), 0, 1),
graph_tens_component,
graph_digit,
)
# Need to go up to thousands for fractions
self.one_to_one_thousand = get_one_to_one_thousand(cardinal_graph)
thousands = pynini.cross("mil", "milésimo")
graph_thousands = (
strip_accent(self.one_to_one_thousand) + NEMO_SPACE + thousands
) # Cardinals become prefix for thousands series. Snce accent on the powers of ten we strip accent from leading words
graph_thousands @= pynini.cdrewrite(delete_space, "", "milésimo", NEMO_SIGMA) # merge as a prefix
graph_thousands |= thousands
self.multiples_of_thousand = (cardinal_graph @ graph_thousands).optimize()
if (
not deterministic
): # Formally the words preceding the power of ten should be a prefix, but some maintain word boundaries.
graph_thousands |= (self.one_to_one_thousand @ graph_hundred_component) + NEMO_SPACE + thousands
graph_thousands += pynini.closure(NEMO_SPACE + graph_hundred_component, 0, 1)
ordinal_graph = graph_thousands | graph_hundred_component
ordinal_graph = cardinal_graph @ ordinal_graph
if not deterministic:
# The 10's and 20's series can also be two words
split_words = pynini.cross("decimo", "décimo ") | pynini.cross("vigesimo", "vigésimo ")
split_words = pynini.cdrewrite(split_words, "", NEMO_CHAR, NEMO_SIGMA)
ordinal_graph |= ordinal_graph @ split_words
# If "octavo" is preceeded by the "o" within string, it needs deletion
ordinal_graph @= pynini.cdrewrite(pynutil.delete("o"), "", "octavo", NEMO_SIGMA)
self.graph = ordinal_graph.optimize()
delete_period = pynini.closure(pynutil.delete("."), 0, 1) # Sometimes the period is omitted
convert_gender_suffix = delete_period + gender_suffix
# Managing Romanization, excluding words that may be ambiguous
roman_ordinals = roman_to_int(ordinal_graph)
# exceptions = pynini.accep("vi") | pynini.accep("di") | pynini.accep("mi")
exceptions = pynini.string_file(get_abs_path("data/ordinals/roman_exceptions.tsv"))
graph_exception = pynini.project(exceptions, 'input')
roman_ordinals = (pynini.project(roman_ordinals, "input") - graph_exception.arcsort()) @ roman_ordinals
graph_roman = pynutil.insert("integer: \"") + roman_ordinals + pynutil.insert("\"")
if not deterministic:
# Introduce plural
plural = pynini.closure(pynutil.insert("/plural"), 0, 1)
convert_gender_suffix += plural
# Romanizations have no morphology marker, so in non-deterministic case we provide option for all
insert_morphology = pynutil.insert(convert_gender_suffix) + plural
insert_morphology = (
pynutil.insert(" morphosyntactic_features: \"") + insert_morphology + pynutil.insert("\"")
)
graph_roman += insert_morphology
else:
# We insert both genders as default
graph_roman += pynutil.insert(" morphosyntactic_features: \"gender_masc\"") | pynutil.insert(
" morphosyntactic_features: \"gender_fem\""
)
graph = (
pynutil.insert("integer: \"")
+ ordinal_graph
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"")
+ convert_gender_suffix
+ pynutil.insert("\"")
)
graph = pynini.union(graph, graph_roman)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from nemo_text_processing.text_normalization.es.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelist, e.g.
"sr." -> tokens { name: "señor" }
This class has highest priority among all classifier grammars. Whitelisted tokens are defined and loaded from "data/whitelist.tsv".
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
input_file: path to a file with whitelist replacements
"""
def __init__(self, input_case: str, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
def _get_whitelist_graph(input_case, file):
whitelist = load_labels(file)
if input_case == "lower_cased":
whitelist = [[x[0].lower()] + x[1:] for x in whitelist]
graph = pynini.string_map(whitelist)
return graph
graph = _get_whitelist_graph(input_case, get_abs_path("data/whitelist.tsv"))
if not deterministic and input_case != "lower_cased":
graph |= pynutil.add_weight(
_get_whitelist_graph("lower_cased", get_abs_path("data/whitelist.tsv")), weight=0.0001
)
if input_file:
whitelist_provided = _get_whitelist_graph(input_case, input_file)
if not deterministic:
graph |= whitelist_provided
else:
graph = whitelist_provided
if not deterministic:
units_graph = _get_whitelist_graph(input_case, file=get_abs_path("data/measures/measurements.tsv"))
graph |= units_graph
self.graph = graph
self.final_graph = convert_space(self.graph).optimize()
self.fst = (pynutil.insert("name: \"") + self.final_graph + pynutil.insert("\"")).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/whitelist.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.es.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.es.taggers.date import DateFst
from nemo_text_processing.text_normalization.es.taggers.decimals import DecimalFst
from nemo_text_processing.text_normalization.es.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.es.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.es.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.es.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.es.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.es.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.es.taggers.time import TimeFst
from nemo_text_processing.text_normalization.es.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.es.taggers.word import WordFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State aRchive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = False,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_es_tn_{deterministic}_deterministic{whitelist_file}.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars. This might take some time...")
self.cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = self.cardinal.fst
self.ordinal = OrdinalFst(cardinal=self.cardinal, deterministic=deterministic)
ordinal_graph = self.ordinal.fst
self.decimal = DecimalFst(cardinal=self.cardinal, deterministic=deterministic)
decimal_graph = self.decimal.fst
self.fraction = FractionFst(cardinal=self.cardinal, ordinal=self.ordinal, deterministic=deterministic)
fraction_graph = self.fraction.fst
self.measure = MeasureFst(
cardinal=self.cardinal, decimal=self.decimal, fraction=self.fraction, deterministic=deterministic
)
measure_graph = self.measure.fst
self.date = DateFst(cardinal=self.cardinal, deterministic=deterministic)
date_graph = self.date.fst
word_graph = WordFst(deterministic=deterministic).fst
self.time = TimeFst(self.cardinal, deterministic=deterministic)
time_graph = self.time.fst
self.telephone = TelephoneFst(deterministic=deterministic)
telephone_graph = self.telephone.fst
self.electronic = ElectronicFst(deterministic=deterministic)
electronic_graph = self.electronic.fst
self.money = MoneyFst(cardinal=self.cardinal, decimal=self.decimal, deterministic=deterministic)
money_graph = self.money.fst
self.whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist)
whitelist_graph = self.whitelist.fst
punct_graph = PunctuationFst(deterministic=deterministic).fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.09)
| pynutil.add_weight(measure_graph, 1.08)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(date_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.09)
| pynutil.add_weight(telephone_graph, 1.11)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 200)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/tokenize_and_classify.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import decimal_separator
from nemo_text_processing.text_normalization.es.utils import get_abs_path, load_labels
from pynini.lib import pynutil
maj_singular_labels = load_labels(get_abs_path("data/money/currency_major.tsv"))
maj_singular = pynini.string_file((get_abs_path("data/money/currency_major.tsv")))
min_singular = pynini.string_file(get_abs_path("data/money/currency_minor.tsv"))
fem_plural = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv")))
masc_plural = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv")))
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money, e.g.
"€1" -> money { currency_maj: "euro" integer_part: "un"}
"€1,000" -> money { currency_maj: "euro" integer_part: "un" }
"€1,001" -> money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un" }
"£1,4" -> money { integer_part: "una" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true }
-> money { integer_part: "una" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "penique" preserve_order: true }
"0,01 £" -> money { fractional_part: "un" currency_min: "penique" preserve_order: true }
"0,02 £" -> money { fractional_part: "dos" currency_min: "peniques" preserve_order: true }
"£0,01 million" -> money { currency_maj: "libra" integer_part: "cero" fractional_part: "cero un" quantity: "million" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
graph_decimal_final = decimal.final_graph_wo_negative
maj_singular_graph = maj_singular
min_singular_graph = min_singular
maj_plural_graph = maj_singular @ (fem_plural | masc_plural)
min_plural_graph = min_singular @ (fem_plural | masc_plural)
graph_maj_singular = pynutil.insert("currency_maj: \"") + maj_singular_graph + pynutil.insert("\"")
graph_maj_plural = pynutil.insert("currency_maj: \"") + maj_plural_graph + pynutil.insert("\"")
graph_integer_one = pynutil.insert("integer_part: \"") + pynini.cross("1", "un") + pynutil.insert("\"")
decimal_with_quantity = (NEMO_SIGMA + NEMO_ALPHA) @ graph_decimal_final
graph_decimal_plural = pynini.union(
graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + graph_decimal_final, # $1,05
graph_decimal_final + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_plural, # 1,05 $
)
graph_decimal_plural = (
(NEMO_SIGMA - "1") + decimal_separator + NEMO_SIGMA
) @ graph_decimal_plural # Can't have "un euros"
graph_decimal_singular = pynini.union(
graph_maj_singular + pynini.closure(delete_space, 0, 1) + insert_space + graph_decimal_final, # $1,05
graph_decimal_final + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_singular, # 1,05 $
)
graph_decimal_singular = (pynini.accep("1") + decimal_separator + NEMO_SIGMA) @ graph_decimal_singular
graph_decimal = pynini.union(
graph_decimal_singular,
graph_decimal_plural,
graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + decimal_with_quantity,
)
graph_integer = (
pynutil.insert("integer_part: \"") + ((NEMO_SIGMA - "1") @ cardinal_graph) + pynutil.insert("\"")
)
graph_integer_only = pynini.union(
graph_maj_singular + pynini.closure(delete_space, 0, 1) + insert_space + graph_integer_one,
graph_integer_one + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_singular,
)
graph_integer_only |= pynini.union(
graph_maj_plural + pynini.closure(delete_space, 0, 1) + insert_space + graph_integer,
graph_integer + pynini.closure(delete_space, 0, 1) + insert_space + graph_maj_plural,
)
graph = graph_integer_only | graph_decimal
# remove trailing zeros of non zero number in the first 2 digits and fill up to 2 digits
# e.g. 2000 -> 20, 0200->02, 01 -> 01, 10 -> 10
# not accepted: 002, 00, 0,
two_digits_fractional_part = (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(pynutil.delete("0"))
) @ (
(pynutil.delete("0") + (NEMO_DIGIT - "0"))
| ((NEMO_DIGIT - "0") + pynutil.insert("0"))
| ((NEMO_DIGIT - "0") + NEMO_DIGIT)
)
graph_min_singular = pynutil.insert("currency_min: \"") + min_singular_graph + pynutil.insert("\"")
graph_min_plural = pynutil.insert("currency_min: \"") + min_plural_graph + pynutil.insert("\"")
# format ** euro ** cent
decimal_graph_with_minor = None
for curr_symbol, _ in maj_singular_labels:
preserve_order = pynutil.insert(" preserve_order: true")
integer_plus_maj = pynini.union(
graph_integer + insert_space + pynutil.insert(curr_symbol) @ graph_maj_plural,
graph_integer_one + insert_space + pynutil.insert(curr_symbol) @ graph_maj_singular,
)
# non zero integer part
integer_plus_maj = (pynini.closure(NEMO_DIGIT) - "0") @ integer_plus_maj
graph_fractional_one = (
pynutil.insert("fractional_part: \"")
+ two_digits_fractional_part @ pynini.cross("1", "un")
+ pynutil.insert("\"")
)
graph_fractional = (
two_digits_fractional_part @ (pynini.closure(NEMO_DIGIT, 1, 2) - "1") @ cardinal.two_digit_non_zero
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_fractional + pynutil.insert("\"")
fractional_plus_min = pynini.union(
graph_fractional + insert_space + pynutil.insert(curr_symbol) @ graph_min_plural,
graph_fractional_one + insert_space + pynutil.insert(curr_symbol) @ graph_min_singular,
)
decimal_graph_with_minor_curr = (
integer_plus_maj + pynini.cross(decimal_separator, NEMO_SPACE) + fractional_plus_min
)
decimal_graph_with_minor_curr |= pynutil.add_weight(
integer_plus_maj
+ pynini.cross(decimal_separator, NEMO_SPACE)
+ pynutil.insert("fractional_part: \"")
+ two_digits_fractional_part @ cardinal.two_digit_non_zero
+ pynutil.insert("\""),
weight=0.0001,
)
decimal_graph_with_minor_curr |= pynutil.delete("0,") + fractional_plus_min
decimal_graph_with_minor_curr = pynini.union(
pynutil.delete(curr_symbol)
+ pynini.closure(delete_space, 0, 1)
+ decimal_graph_with_minor_curr
+ preserve_order,
decimal_graph_with_minor_curr
+ preserve_order
+ pynini.closure(delete_space, 0, 1)
+ pynutil.delete(curr_symbol),
)
decimal_graph_with_minor = (
decimal_graph_with_minor_curr
if decimal_graph_with_minor is None
else pynini.union(decimal_graph_with_minor, decimal_graph_with_minor_curr)
)
final_graph = graph | pynutil.add_weight(decimal_graph_with_minor, -0.001)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import cardinal_separator
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/zero.tsv")))
digit = pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")))
teen = pynini.invert(pynini.string_file(get_abs_path("data/numbers/teen.tsv")))
ties = pynini.invert(pynini.string_file(get_abs_path("data/numbers/ties.tsv")))
twenties = pynini.invert(pynini.string_file(get_abs_path("data/numbers/twenties.tsv")))
hundreds = pynini.invert(pynini.string_file(get_abs_path("data/numbers/hundreds.tsv")))
def filter_punctuation(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Helper function for parsing number strings. Converts common cardinal strings (groups of three digits delineated by 'cardinal_separator' - see graph_utils)
and converts to a string of digits:
"1 000" -> "1000"
"1.000.000" -> "1000000"
Args:
fst: Any pynini.FstLike object. Function composes fst onto string parser fst
Returns:
fst: A pynini.FstLike object
"""
exactly_three_digits = NEMO_DIGIT ** 3 # for blocks of three
up_to_three_digits = pynini.closure(NEMO_DIGIT, 1, 3) # for start of string
cardinal_string = pynini.closure(
NEMO_DIGIT, 1
) # For string w/o punctuation (used for page numbers, thousand series)
cardinal_string |= (
up_to_three_digits
+ pynutil.delete(cardinal_separator)
+ pynini.closure(exactly_three_digits + pynutil.delete(cardinal_separator))
+ exactly_three_digits
)
return cardinal_string @ fst
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"1000" -> cardinal { integer: "mil" }
"2.000.000" -> cardinal { integer: "dos millones" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
# Any single digit
graph_digit = digit
digits_no_one = (NEMO_DIGIT - "1") @ graph_digit
# Any double digit
graph_tens = teen
graph_tens |= ties + (pynutil.delete('0') | (pynutil.insert(" y ") + graph_digit))
graph_tens |= twenties
self.tens = graph_tens.optimize()
self.two_digit_non_zero = pynini.union(
graph_digit, graph_tens, (pynini.cross("0", NEMO_SPACE) + graph_digit)
).optimize()
# Three digit strings
graph_hundreds = hundreds + pynini.union(
pynutil.delete("00"), (insert_space + graph_tens), (pynini.cross("0", NEMO_SPACE) + graph_digit)
)
graph_hundreds |= pynini.cross("100", "cien")
graph_hundreds |= (
pynini.cross("1", "ciento") + insert_space + pynini.union(graph_tens, pynutil.delete("0") + graph_digit)
)
self.hundreds = graph_hundreds.optimize()
# For all three digit strings with leading zeroes (graph appends '0's to manage place in string)
graph_hundreds_component = pynini.union(graph_hundreds, pynutil.delete("0") + graph_tens)
graph_hundreds_component_at_least_one_none_zero_digit = graph_hundreds_component | (
pynutil.delete("00") + graph_digit
)
graph_hundreds_component_at_least_one_none_zero_digit_no_one = graph_hundreds_component | (
pynutil.delete("00") + digits_no_one
)
graph_thousands_component_at_least_one_none_zero_digit = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_none_zero_digit,
graph_hundreds_component_at_least_one_none_zero_digit_no_one
+ pynutil.insert(" mil")
+ ((insert_space + graph_hundreds_component_at_least_one_none_zero_digit) | pynutil.delete("000")),
pynini.cross("001", "mil")
+ ((insert_space + graph_hundreds_component_at_least_one_none_zero_digit) | pynutil.delete("000")),
)
graph_thousands_component_at_least_one_none_zero_digit_no_one = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_none_zero_digit_no_one,
graph_hundreds_component_at_least_one_none_zero_digit_no_one
+ pynutil.insert(" mil")
+ ((insert_space + graph_hundreds_component_at_least_one_none_zero_digit) | pynutil.delete("000")),
pynini.cross("001", "mil")
+ ((insert_space + graph_hundreds_component_at_least_one_none_zero_digit) | pynutil.delete("000")),
)
graph_million = pynutil.add_weight(pynini.cross("000001", "un millón"), -0.001)
graph_million |= graph_thousands_component_at_least_one_none_zero_digit_no_one + pynutil.insert(" millones")
graph_million |= pynutil.delete("000000")
graph_million += insert_space
graph_billion = pynutil.add_weight(pynini.cross("000001", "un billón"), -0.001)
graph_billion |= graph_thousands_component_at_least_one_none_zero_digit_no_one + pynutil.insert(" billones")
graph_billion |= pynutil.delete("000000")
graph_billion += insert_space
graph_trillion = pynutil.add_weight(pynini.cross("000001", "un trillón"), -0.001)
graph_trillion |= graph_thousands_component_at_least_one_none_zero_digit_no_one + pynutil.insert(" trillones")
graph_trillion |= pynutil.delete("000000")
graph_trillion += insert_space
graph = (
graph_trillion
+ graph_billion
+ graph_million
+ (graph_thousands_component_at_least_one_none_zero_digit | pynutil.delete("000000"))
)
self.graph = (
((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0))
@ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", NEMO_SIGMA)
@ NEMO_DIGIT ** 24
@ graph
@ pynini.cdrewrite(delete_space, "[BOS]", "", NEMO_SIGMA)
@ pynini.cdrewrite(delete_space, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(
pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 2), NEMO_SPACE), NEMO_ALPHA, NEMO_ALPHA, NEMO_SIGMA
)
)
self.graph |= zero
self.graph = filter_punctuation(self.graph).optimize()
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import (
cardinal_separator,
decimal_separator,
strip_cardinal_apocope,
)
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
quantities = pynini.string_file(get_abs_path("data/numbers/quantities.tsv"))
digit = pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")))
zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/zero.tsv")))
def get_quantity(decimal_graph: "pynini.FstLike", cardinal_graph: "pynini.FstLike") -> "pynini.FstLike":
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. 2 millones -> integer_part: "dos" quantity: "millones"
e.g. 2,4 millones -> integer_part: "dos" fractional_part: "cuatro" quantity: "millones"
Args:
decimal_graph: DecimalFST
cardinal_graph: CardinalFST
"""
numbers = pynini.closure(NEMO_DIGIT, 1, 6) @ cardinal_graph
numbers = pynini.cdrewrite(pynutil.delete(cardinal_separator), "", "", NEMO_SIGMA) @ numbers
res = (
pynutil.insert('integer_part: "')
+ numbers # The cardinal we're passing only produces 'un' for one, so gender agreement is safe (all quantities are masculine). Limit to 10^6 power.
+ pynutil.insert('"')
+ NEMO_SPACE
+ pynutil.insert('quantity: "')
+ quantities
+ pynutil.insert('"')
)
res |= decimal_graph + NEMO_SPACE + pynutil.insert('quantity: "') + quantities + pynutil.insert('"')
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
-11,4006 billones -> decimal { negative: "true" integer_part: "once" fractional_part: "cuatro cero cero seis" quantity: "billones" preserve_order: true }
1 billón -> decimal { integer_part: "un" quantity: "billón" preserve_order: true }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
graph_digit = digit | zero
if not deterministic:
graph = pynini.union(graph_digit, cardinal.hundreds, cardinal.tens)
graph += pynini.closure(insert_space + graph)
else:
# General pattern is 1-3 digits: map as cardinal, default to tens followed by digits otherwise \
graph = pynini.union(
pynutil.add_weight(graph_digit + pynini.closure(insert_space + zero), -0.00001),
pynutil.add_weight(cardinal.tens + pynini.closure(insert_space + zero), -0.00002),
pynutil.add_weight(cardinal.hundreds + pynini.closure(insert_space + zero), 0.00001),
pynutil.add_weight(
cardinal.tens
+ pynini.closure(insert_space + cardinal.tens, 1)
+ pynini.closure(insert_space + zero, 0, 1)
+ (pynini.closure(insert_space + graph_digit, 0, 1) | pynini.closure(insert_space + zero, 0)),
-0.00002,
), # Read out as tens and a possible trailing digit or zeroes
zero
+ pynini.closure(insert_space + zero)
+ pynini.closure(insert_space + graph_digit), # For cases such as "1,010"
)
# Need to strip apocope everywhere BUT end of string
reverse_apocope = pynini.string_map([("un", "uno"), ("ún", "uno")])
apply_reverse_apocope = pynini.cdrewrite(reverse_apocope, "", NEMO_SPACE, NEMO_SIGMA)
graph @= apply_reverse_apocope
# Technically decimals should be space delineated groups of three, e.g. (1,333 333). This removes any possible spaces
strip_formatting = pynini.cdrewrite(delete_space, "", "", NEMO_SIGMA)
graph = strip_formatting @ graph
self.graph = graph.optimize()
graph_separator = pynutil.delete(decimal_separator)
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", '"true" '), 0, 1)
self.graph_fractional = pynutil.insert('fractional_part: "') + self.graph + pynutil.insert('"')
# Integer graph maintains apocope except for ones place
graph_integer = (
strip_cardinal_apocope(cardinal.graph)
if deterministic
else pynini.union(cardinal.graph, strip_cardinal_apocope(cardinal.graph))
) # Gives us forms w/ and w/o apocope
self.graph_integer = pynutil.insert('integer_part: "') + graph_integer + pynutil.insert('"')
final_graph_wo_sign = self.graph_integer + graph_separator + insert_space + self.graph_fractional
self.final_graph_wo_negative = (
final_graph_wo_sign | get_quantity(final_graph_wo_sign, cardinal.graph).optimize()
)
final_graph = optional_graph_negative + self.final_graph_wo_negative
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/decimals.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, NEMO_DIGIT, GraphFst, insert_space
from nemo_text_processing.text_normalization.es.utils import get_abs_path, load_labels
from pynini.lib import pynutil
common_domains = [x[0] for x in load_labels(get_abs_path("data/electronic/domain.tsv"))]
symbols = [x[0] for x in load_labels(get_abs_path("data/electronic/symbols.tsv"))]
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: email addresses
e.g. "[email protected]" -> electronic { username: "abc" domain: "hotmail.com" preserve_order: true }
e.g. "www.abc.com/123" -> electronic { protocol: "www." domain: "abc.com/123" preserve_order: true }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
dot = pynini.accep(".")
accepted_common_domains = pynini.union(*common_domains)
accepted_symbols = pynini.union(*symbols) - dot
accepted_characters = pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols)
acceepted_characters_with_dot = pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols | dot)
# email
username = (
pynutil.insert("username: \"")
+ acceepted_characters_with_dot
+ pynutil.insert("\"")
+ pynini.cross('@', ' ')
)
domain_graph = accepted_characters + dot + accepted_characters
domain_graph = pynutil.insert("domain: \"") + domain_graph + pynutil.insert("\"")
domain_common_graph = (
pynutil.insert("domain: \"")
+ accepted_characters
+ accepted_common_domains
+ pynini.closure((accepted_symbols | dot) + pynini.closure(accepted_characters, 1), 0, 1)
+ pynutil.insert("\"")
)
graph = (username + domain_graph) | domain_common_graph
# url
protocol_start = pynini.accep("https://") | pynini.accep("http://")
protocol_end = (
pynini.accep("www.")
if deterministic
else pynini.accep("www.") | pynini.cross("www.", "doble ve doble ve doble ve.")
)
protocol = protocol_start | protocol_end | (protocol_start + protocol_end)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
graph |= protocol + insert_space + (domain_graph | domain_common_graph)
self.graph = graph
final_graph = self.add_tokens(self.graph + pynutil.insert(" preserve_order: true"))
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, NEMO_SPACE, GraphFst, delete_extra_space
from nemo_text_processing.text_normalization.es.graph_utils import roman_to_int, strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
articles = pynini.union("de", "del", "el", "del año", "año")
delete_leading_zero = (pynutil.delete("0") | (NEMO_DIGIT - "0")) + NEMO_DIGIT
month_numbers = pynini.string_file(get_abs_path("data/dates/months.tsv"))
month_abbr = pynini.string_file(get_abs_path("data/dates/months_abbr.tsv"))
weekdays = pynini.string_file(get_abs_path("data/dates/days_abbr.tsv"))
year_suffix = pynini.string_file(get_abs_path("data/dates/year_suffix.tsv"))
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, e.g.
"01.04.2010" -> date { day: "un" month: "enero" year: "dos mil diez" preserve_order: true }
"marzo 4 2000" -> date { month: "marzo" day: "cuatro" year: "dos mil" }
"1990-20-01" -> date { year: "mil novecientos noventa" day: "veinte" month: "enero" }
Args:
cardinal: cardinal GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool):
super().__init__(name="date", kind="classify", deterministic=deterministic)
number_to_month = month_numbers.optimize()
month_graph = pynini.project(number_to_month, "output")
abbr_to_month = month_abbr.optimize()
month_graph |= abbr_to_month
numbers = cardinal.graph
optional_leading_zero = delete_leading_zero | NEMO_DIGIT
# 01, 31, 1
digit_day = optional_leading_zero @ pynini.union(*[str(x) for x in range(1, 32)]) @ numbers
# weekdays
optional_weekdays = pynini.closure(weekdays + NEMO_SPACE, 0, 1)
day = (pynutil.insert("day: \"") + optional_weekdays + digit_day + pynutil.insert("\"")).optimize()
digit_month = optional_leading_zero @ pynini.union(*[str(x) for x in range(1, 13)])
number_to_month = digit_month @ number_to_month
month_name = (pynutil.insert("month: \"") + month_graph + pynutil.insert("\"")).optimize()
month_number = (pynutil.insert("month: \"") + number_to_month + pynutil.insert("\"")).optimize()
# prefer cardinal over year
year = (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 1, 3) # 90, 990, 1990
year = strip_cardinal_apocope(year @ numbers)
self.year = year
# handle roman centuries and years
roman_numbers = roman_to_int(numbers)
roman_centuries = pynini.union("siglo ", "año ") + roman_numbers
roman_centuries_graph = (
pynutil.insert("year: \"")
+ roman_centuries
+ pynutil.insert("\"")
+ pynutil.insert(" preserve_order: true")
).optimize()
year_only = pynutil.insert("year: \"") + year + pynutil.insert("\"")
year_with_articles = (
pynutil.insert("year: \"") + pynini.closure(articles + NEMO_SPACE, 0, 1) + year + pynutil.insert("\"")
)
# handle years with eras
year_suffix_graph = (
pynutil.insert("year: \"")
+ (numbers | roman_centuries)
+ pynini.closure(NEMO_SPACE, 0, 1)
+ year_suffix
+ pynutil.insert("\"")
)
graph_dmy = (
day
+ pynini.closure(pynutil.delete(" de"))
+ NEMO_SPACE
+ month_name
+ pynini.closure(NEMO_SPACE + year_with_articles, 0, 1)
)
graph_mdy = ( # English influences on language
month_name + delete_extra_space + day + pynini.closure(NEMO_SPACE + year_with_articles, 0, 1)
)
separators = [".", "-", "/"]
for sep in separators:
year_optional = pynini.closure(pynini.cross(sep, NEMO_SPACE) + year_only, 0, 1)
new_graph = day + pynini.cross(sep, NEMO_SPACE) + month_number + year_optional
graph_dmy |= new_graph
if not deterministic:
new_graph = month_number + pynini.cross(sep, NEMO_SPACE) + day + year_optional
graph_mdy |= new_graph
dash = "-"
day_optional = pynini.closure(pynini.cross(dash, NEMO_SPACE) + day, 0, 1)
graph_ymd = NEMO_DIGIT ** 4 @ year_only + pynini.cross(dash, NEMO_SPACE) + month_number + day_optional
final_graph = graph_dmy + pynutil.insert(" preserve_order: true")
final_graph |= graph_ymd
final_graph |= graph_mdy
final_graph |= roman_centuries_graph
final_graph |= year_suffix_graph
self.final_graph = final_graph.optimize()
self.fst = self.add_tokens(self.final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying word.
e.g. dormir -> tokens { name: "dormir" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/taggers/word.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_preserve_order,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
alt_minutes = pynini.string_file(get_abs_path("data/time/alt_minutes.tsv"))
morning_times = pynini.string_file(get_abs_path("data/time/morning_times.tsv"))
afternoon_times = pynini.string_file(get_abs_path("data/time/afternoon_times.tsv"))
evening_times = pynini.string_file(get_abs_path("data/time/evening_times.tsv"))
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "doce" minutes: "media" suffix: "a m" } -> doce y media de la noche
time { hours: "doce" } -> twelve o'clock
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="time", kind="verbalize", deterministic=deterministic)
change_minutes = pynini.cdrewrite(alt_minutes, pynini.accep("[BOS]"), pynini.accep("[EOS]"), NEMO_SIGMA)
morning_phrases = pynini.cross("am", "de la mañana")
afternoon_phrases = pynini.cross("pm", "de la tarde")
evening_phrases = pynini.cross("pm", "de la noche")
# For the 12's
mid_times = pynini.accep("doce")
mid_phrases = (
pynini.string_map([("pm", "del mediodía"), ("am", "de la noche")])
if deterministic
else pynini.string_map(
[
("pm", "de la mañana"),
("pm", "del día"),
("pm", "del mediodía"),
("am", "de la noche"),
("am", "de la medianoche"),
]
)
)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
minute = (minute @ change_minutes) if deterministic else pynini.union(minute, minute @ change_minutes)
suffix = (
pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
zone = (
pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(delete_space + insert_space + zone, 0, 1)
second = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph_hms = (
hour
+ pynutil.insert(" horas ")
+ delete_space
+ minute
+ pynutil.insert(" minutos y ")
+ delete_space
+ second
+ pynutil.insert(" segundos")
)
graph_hm = hour + delete_space + pynutil.insert(" y ") + minute
graph_hm |= pynini.union(
(hour @ morning_times)
+ delete_space
+ pynutil.insert(" y ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ morning_phrases),
(hour @ afternoon_times)
+ delete_space
+ pynutil.insert(" y ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ afternoon_phrases),
(hour @ evening_times)
+ delete_space
+ pynutil.insert(" y ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ evening_phrases),
(hour @ mid_times)
+ delete_space
+ pynutil.insert(" y ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ mid_phrases),
)
graph_h = pynini.union(
hour,
(hour @ morning_times) + delete_space + insert_space + (suffix @ morning_phrases),
(hour @ afternoon_times) + delete_space + insert_space + (suffix @ afternoon_phrases),
(hour @ evening_times) + delete_space + insert_space + (suffix @ evening_phrases),
(hour @ mid_times) + delete_space + insert_space + (suffix @ mid_phrases),
)
graph = (graph_hms | graph_hm | graph_h) + optional_zone
if not deterministic:
graph_style_1 = pynutil.delete(" style: \"1\"")
graph_style_2 = pynutil.delete(" style: \"2\"")
graph_menos = hour + delete_space + pynutil.insert(" menos ") + minute + graph_style_1
graph_menos |= (
(hour @ morning_times)
+ delete_space
+ pynutil.insert(" menos ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ morning_phrases)
+ graph_style_1
)
graph_menos |= (
(hour @ afternoon_times)
+ delete_space
+ pynutil.insert(" menos ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ afternoon_phrases)
+ graph_style_1
)
graph_menos |= (
(hour @ evening_times)
+ delete_space
+ pynutil.insert(" menos ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ evening_phrases)
+ graph_style_1
)
graph_menos |= (
(hour @ mid_times)
+ delete_space
+ pynutil.insert(" menos ")
+ minute
+ delete_space
+ insert_space
+ (suffix @ mid_phrases)
+ graph_style_1
)
graph_menos += optional_zone
graph_para = minute + pynutil.insert(" para las ") + delete_space + hour + graph_style_2
graph_para |= (
minute
+ pynutil.insert(" para las ")
+ delete_space
+ (hour @ morning_times)
+ delete_space
+ insert_space
+ (suffix @ morning_phrases)
+ graph_style_2
)
graph_para |= (
minute
+ pynutil.insert(" para las ")
+ delete_space
+ (hour @ afternoon_times)
+ delete_space
+ insert_space
+ (suffix @ afternoon_phrases)
+ graph_style_2
)
graph_para |= (
minute
+ pynutil.insert(" para las ")
+ delete_space
+ (hour @ evening_times)
+ delete_space
+ insert_space
+ (suffix @ evening_phrases)
+ graph_style_2
)
graph_para |= (
minute
+ pynutil.insert(" para las ")
+ delete_space
+ (hour @ mid_times)
+ delete_space
+ insert_space
+ (suffix @ mid_phrases)
+ graph_style_2
)
graph_para += optional_zone
graph_para @= pynini.cdrewrite(
pynini.cross(" las ", " la "), "para", "una", NEMO_SIGMA
) # Need agreement with one
graph |= graph_menos | graph_para
delete_tokens = self.delete_tokens(graph + delete_preserve_order)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/time.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_preserve_order,
delete_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import ones
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
unit_plural_fem = pynini.string_file(get_abs_path("data/measures/measurements_plural_fem.tsv"))
unit_plural_masc = pynini.string_file(get_abs_path("data/measures/measurements_plural_masc.tsv"))
unit_singular_fem = pynini.project(unit_plural_fem, "input")
unit_singular_masc = pynini.project(unit_plural_masc, "input")
unit_plural_fem = pynini.project(unit_plural_fem, "output")
unit_plural_masc = pynini.project(unit_plural_masc, "output")
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { integer: "dos" units: "gramos" } } -> "dos gramos"
measure { decimal { integer_part: "dos" quantity: "millones" units: "gramos" } } -> "dos millones de gramos"
Args:
decimal: DecimalFst
cardinal: CardinalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst, deterministic: bool):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
graph_decimal_masc = decimal.delete_tokens(decimal.graph_masc)
graph_decimal_fem = decimal.delete_tokens(decimal.graph_fem)
graph_cardinal_masc = cardinal.delete_tokens(cardinal.graph_masc)
graph_cardinal_fem = cardinal.delete_tokens(cardinal.graph_fem)
graph_fraction_fem = fraction.delete_tokens(fraction.graph_fem)
graph_fraction_masc = fraction.delete_tokens(fraction.graph_masc)
unit_masc = (unit_plural_masc | unit_singular_masc) + pynini.closure(
NEMO_WHITE_SPACE + "por" + pynini.closure(NEMO_NOT_QUOTE, 1), 0, 1
)
unit_masc |= "por" + pynini.closure(NEMO_NOT_QUOTE, 1)
unit_masc = pynutil.delete("units: \"") + (pynini.closure(NEMO_NOT_QUOTE) @ unit_masc) + pynutil.delete("\"")
unit_fem = (unit_plural_fem | unit_singular_fem) + pynini.closure(
NEMO_WHITE_SPACE + "por" + pynini.closure(NEMO_NOT_QUOTE, 1), 0, 1
)
unit_fem = pynutil.delete("units: \"") + (pynini.closure(NEMO_NOT_QUOTE) @ unit_fem) + pynutil.delete("\"")
graph_masc = (graph_cardinal_masc | graph_decimal_masc) + NEMO_WHITE_SPACE + unit_masc
graph_masc |= graph_fraction_masc + NEMO_WHITE_SPACE + pynutil.insert("de ") + unit_masc
graph_masc |= pynutil.add_weight(
graph_fraction_masc @ (NEMO_SIGMA + pynini.union("medio", "medios")) + NEMO_WHITE_SPACE + unit_masc, -0.001
) # "medio litro" not "medio de litro"
graph_fem = (graph_cardinal_fem | graph_decimal_fem) + NEMO_WHITE_SPACE + unit_fem
graph_fem |= graph_fraction_fem + NEMO_WHITE_SPACE + pynutil.insert("de ") + unit_fem
graph_fem |= pynutil.add_weight(
graph_fraction_fem @ (NEMO_SIGMA + pynini.union("media", "medias")) + NEMO_WHITE_SPACE + unit_fem, -0.001
)
graph = graph_masc | graph_fem
graph = (
pynini.cdrewrite(
pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA
)
@ graph
) # billones de xyz
graph @= pynini.cdrewrite(pynini.cross(ones, "uno"), "", NEMO_WHITE_SPACE + "por", NEMO_SIGMA)
# To manage alphanumeric combonations ("a-8, 5x"), we let them use a weighted default path.
alpha_num_unit = pynutil.delete("units: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
graph_alpha_num = pynini.union(
(graph_cardinal_masc | graph_decimal_masc) + NEMO_SPACE + alpha_num_unit,
alpha_num_unit + delete_extra_space + (graph_cardinal_masc | graph_decimal_masc),
)
math = pynutil.add_weight(
pynutil.delete("units: \"math\"") + delete_space + graph_cardinal_masc + delete_space, -1
)
graph |= pynutil.add_weight(graph_alpha_num, 0.01)
graph |= math
graph += delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_NOT_QUOTE,
NEMO_NOT_SPACE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es.graph_utils import (
accents,
shift_cardinal_gender,
strip_cardinal_apocope,
)
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. tokens { fraction { integer: "treinta y tres" numerator: "cuatro" denominator: "quinto" } } ->
treinta y tres y cuatro quintos
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="fraction", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross('negative: "true" ', "menos "), 0, 1)
self.optional_sign = optional_sign
# Derivational strings append 'avo' as a suffix. Adding space for processing aid
fraction_stem = pynutil.insert(" avo")
plural = pynutil.insert("s")
conjunction = pynutil.insert(" y ")
integer = (
pynutil.delete('integer_part: "')
+ strip_cardinal_apocope(pynini.closure(NEMO_NOT_QUOTE))
+ pynutil.delete('"')
)
numerator_one = pynutil.delete('numerator: "') + pynini.accep("un") + pynutil.delete('" ')
numerator = (
pynutil.delete('numerator: "')
+ pynini.difference(pynini.closure(NEMO_NOT_QUOTE), "un")
+ pynutil.delete('" ')
)
denominator_add_stem = pynutil.delete('denominator: "') + (
pynini.closure(NEMO_NOT_QUOTE) + fraction_stem + pynutil.delete('" morphosyntactic_features: "add_root"')
)
denominator_ordinal = pynutil.delete('denominator: "') + (
pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete('" morphosyntactic_features: "ordinal"')
)
denominator_cardinal = pynutil.delete('denominator: "') + (
pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete('"')
)
denominator_singular = pynini.union(denominator_add_stem, denominator_ordinal)
if not deterministic:
# Occasional exceptions
denominator_singular |= denominator_add_stem @ pynini.string_map(
[("once avo", "undécimo"), ("doce avo", "duodécimo")]
)
denominator_plural = denominator_singular + plural
# Merging operations
merge = pynini.cdrewrite(
pynini.cross(" y ", "i"), "", "", NEMO_SIGMA
) # The denominator must be a single word, with the conjunction "y" replaced by i
merge @= pynini.cdrewrite(delete_space, "", pynini.difference(NEMO_CHAR, "parte"), NEMO_SIGMA)
# The merger can produce duplicate vowels. This is not allowed in orthography
delete_duplicates = pynini.string_map([("aa", "a"), ("oo", "o")]) # Removes vowels
delete_duplicates = pynini.cdrewrite(delete_duplicates, "", "", NEMO_SIGMA)
remove_accents = pynini.cdrewrite(
accents,
pynini.union(NEMO_SPACE, pynini.accep("[BOS]")) + pynini.closure(NEMO_NOT_SPACE),
pynini.closure(NEMO_NOT_SPACE) + pynini.union("avo", "ava", "ésimo", "ésima"),
NEMO_SIGMA,
)
merge_into_single_word = merge @ remove_accents @ delete_duplicates
fraction_default = numerator + delete_space + insert_space + (denominator_plural @ merge_into_single_word)
fraction_with_one = (
numerator_one + delete_space + insert_space + (denominator_singular @ merge_into_single_word)
)
fraction_with_cardinal = strip_cardinal_apocope(numerator | numerator_one)
fraction_with_cardinal += (
delete_space + pynutil.insert(" sobre ") + strip_cardinal_apocope(denominator_cardinal)
)
if not deterministic:
# There is an alternative rendering where ordinals act as adjectives for 'parte'. This requires use of the feminine
# Other rules will manage use of "un" at end, so just worry about endings
exceptions = pynini.string_map([("tercia", "tercera")])
apply_exceptions = pynini.cdrewrite(exceptions, "", "", NEMO_SIGMA)
vowel_change = pynini.cdrewrite(pynini.cross("o", "a"), "", pynini.accep("[EOS]"), NEMO_SIGMA)
denominator_singular_fem = shift_cardinal_gender(denominator_singular) @ vowel_change @ apply_exceptions
denominator_plural_fem = denominator_singular_fem + plural
numerator_one_fem = shift_cardinal_gender(numerator_one)
numerator_fem = shift_cardinal_gender(numerator)
fraction_with_cardinal |= (
(numerator_one_fem | numerator_fem)
+ delete_space
+ pynutil.insert(" sobre ")
+ shift_cardinal_gender(denominator_cardinal)
)
# Still need to manage stems
merge_stem = pynini.cdrewrite(
delete_space, "", pynini.union("avo", "ava", "avos", "avas"), NEMO_SIGMA
) # For managing alternative spacing
merge_stem @= remove_accents @ delete_duplicates
fraction_with_one_fem = numerator_one_fem + delete_space + insert_space
fraction_with_one_fem += pynini.union(
denominator_singular_fem @ merge_stem, denominator_singular_fem @ merge_into_single_word,
) # Both forms exists
fraction_with_one_fem += pynutil.insert(" parte")
fraction_with_one_fem @= pynini.cdrewrite(
pynini.cross("una media", "media"), "", "", NEMO_SIGMA
) # "media" not "una media"
fraction_default_fem = numerator_fem + delete_space + insert_space
fraction_default_fem += pynini.union(
denominator_plural_fem @ merge_stem, denominator_plural_fem @ merge_into_single_word,
)
fraction_default_fem += pynutil.insert(" partes")
fraction_default |= (
numerator + delete_space + insert_space + denominator_plural @ merge_stem
) # Case of no merger
fraction_default |= fraction_default_fem
fraction_with_one |= numerator_one + delete_space + insert_space + denominator_singular @ merge_stem
fraction_with_one |= fraction_with_one_fem
fraction_with_one @= pynini.cdrewrite(
pynini.cross("un medio", "medio"), "", "", NEMO_SIGMA
) # "medio" not "un medio"
fraction = fraction_with_one | fraction_default | fraction_with_cardinal
graph_masc = optional_sign + pynini.closure(integer + delete_space + conjunction, 0, 1) + fraction
# Manage cases of fem gender (only shows on integer except for "medio")
integer_fem = shift_cardinal_gender(integer)
fraction_default |= (
shift_cardinal_gender(numerator)
+ delete_space
+ insert_space
+ (denominator_plural @ pynini.cross("medios", "medias"))
)
fraction_with_one |= (
pynutil.delete(numerator_one) + delete_space + (denominator_singular @ pynini.cross("medio", "media"))
)
fraction_fem = fraction_with_one | fraction_default | fraction_with_cardinal
graph_fem = optional_sign + pynini.closure(integer_fem + delete_space + conjunction, 0, 1) + fraction_fem
self.graph_masc = pynini.optimize(graph_masc)
self.graph_fem = pynini.optimize(graph_fem)
self.graph = graph_masc | graph_fem
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/fraction.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "uno dos tres uno dos tres cinco seis siete ocho" }
-> uno dos tres uno dos tres cinco seis siete ocho
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="verbalize")
optional_country_code = pynini.closure(
pynutil.delete("country_code: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ delete_space
+ insert_space,
0,
1,
)
number_part = (
pynutil.delete("number_part: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynini.closure(pynutil.add_weight(pynutil.delete(" "), -0.0001), 0, 1)
+ pynutil.delete("\"")
)
optional_extension = pynini.closure(
delete_space
+ insert_space
+ pynutil.delete("extension: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\""),
0,
1,
)
graph = optional_country_code + number_part + optional_extension
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
# number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
# delete_tokens = self.delete_tokens(number_part)
# self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst
from nemo_text_processing.text_normalization.es.graph_utils import shift_number_gender
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinals
e.g. ordinal { integer: "tercer" } } -> "tercero"
-> "tercera"
-> "tercer"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
graph = pynutil.delete("integer: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
# masculne gender we leave as is
graph_masc = graph + pynutil.delete(" morphosyntactic_features: \"gender_masc")
# shift gender
graph_fem_ending = graph @ pynini.cdrewrite(
pynini.cross("o", "a"), "", NEMO_SPACE | pynini.accep("[EOS]"), NEMO_SIGMA
)
graph_fem = shift_number_gender(graph_fem_ending) + pynutil.delete(" morphosyntactic_features: \"gender_fem")
# Apocope just changes tercero and primero. May occur if someone wrote 11.er (uncommon)
graph_apocope = (
pynini.cross("tercero", "tercer")
| pynini.cross("primero", "primer")
| pynini.cross("undécimo", "decimoprimer")
) # In case someone wrote 11.er with deterministic
graph_apocope = (graph @ pynini.cdrewrite(graph_apocope, "", "", NEMO_SIGMA)) + pynutil.delete(
" morphosyntactic_features: \"apocope"
)
graph = graph_apocope | graph_masc | graph_fem
if not deterministic:
# Plural graph
graph_plural = pynini.cdrewrite(
pynutil.insert("s"), pynini.union("o", "a"), NEMO_SPACE | pynini.accep("[EOS]"), NEMO_SIGMA
)
graph |= (graph @ graph_plural) + pynutil.delete("/plural")
self.graph = graph + pynutil.delete("\"")
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.es.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.es.verbalizers.date import DateFst
from nemo_text_processing.text_normalization.es.verbalizers.decimals import DecimalFst
from nemo_text_processing.text_normalization.es.verbalizers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.es.verbalizers.fraction import FractionFst
from nemo_text_processing.text_normalization.es.verbalizers.measure import MeasureFst
from nemo_text_processing.text_normalization.es.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.es.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.es.verbalizers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.es.verbalizers.time import TimeFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(deterministic=deterministic)
ordinal_graph = ordinal.fst
decimal = DecimalFst(deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(deterministic=deterministic)
fraction_graph = fraction.fst
date = DateFst(deterministic=deterministic)
date_graph = date.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
electronic = ElectronicFst(deterministic=deterministic)
electronic_graph = electronic.fst
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
money_graph = MoneyFst(decimal=decimal, deterministic=deterministic).fst
telephone_graph = TelephoneFst(deterministic=deterministic).fst
time_graph = TimeFst(deterministic=deterministic).fst
graph = (
cardinal_graph
| measure_graph
| decimal_graph
| ordinal_graph
| date_graph
| electronic_graph
| money_graph
| fraction_graph
| whitelist_graph
| telephone_graph
| time_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/verbalize.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.es.verbalizers.verbalize import VerbalizeFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"es_tn_{deterministic}_deterministic_verbalizer.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["verbalize"]
logging.info(f'VerbalizeFinalFst graph was restored from {far_file}.')
else:
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"verbalize": self.fst})
logging.info(f"VerbalizeFinalFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/verbalize_final.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_preserve_order,
)
from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
fem = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv")))
masc = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv")))
fem_singular = pynini.project(fem, "input")
masc_singular = pynini.project(masc, "input")
fem_plural = pynini.project(fem, "output")
masc_plural = pynini.project(masc, "output")
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
maj_singular_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
maj_singular_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
maj_plural_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
maj_plural_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
min_singular_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
min_plural_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
min_plural_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_add_and = pynini.closure(pynutil.insert(pynini.union("con ", "y ")), 0, 1)
# *** currency_maj
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
# *** currency_maj + (***) | ((con) *** current_min)
graph_integer_with_minor_masc = (
graph_integer_masc
+ NEMO_SPACE
+ pynini.union(
optional_add_and + strip_cardinal_apocope(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor_fem = (
graph_integer_fem
+ NEMO_SPACE
+ pynini.union(
optional_add_and + shift_cardinal_gender(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc
## *** coma *** currency_maj
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity # can still have "x billions" with fem currency
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = (
pynini.cdrewrite(
pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA
)
@ graph_decimal
) # formally it's millones/billones de ***
# *** current_min
graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order
graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from nemo_text_processing.text_normalization.es.graph_utils import (
add_cardinal_apocope_fem,
shift_cardinal_gender,
strip_cardinal_apocope,
)
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinals
e.g. cardinal { integer: "dos" } -> "dos"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "menos "), 0, 1)
self.optional_sign = optional_sign
integer = pynini.closure(NEMO_NOT_QUOTE, 1)
self.integer = pynutil.delete(" \"") + integer + pynutil.delete("\"")
integer = pynutil.delete("integer:") + self.integer
graph_masc = optional_sign + integer
graph_fem = shift_cardinal_gender(graph_masc)
self.graph_masc = pynini.optimize(graph_masc)
self.graph_fem = pynini.optimize(graph_fem)
# Adding adjustment for fem gender (choice of gender will be random)
graph = graph_masc | graph_fem
if not deterministic:
# For alternate renderings when apocope is omitted (i.e. cardinal stands alone)
graph |= strip_cardinal_apocope(graph_masc)
# "una" will drop to "un" in unique contexts
graph |= add_cardinal_apocope_fem(graph_fem)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_preserve_order,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.es import LOCALIZATION
from nemo_text_processing.text_normalization.es.graph_utils import (
add_cardinal_apocope_fem,
shift_cardinal_gender,
shift_number_gender,
strip_cardinal_apocope,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
decimal { negative: "true" integer_part: "dos" fractional_part: "cuatro cero" quantity: "billones" } -> menos dos coma quatro cero billones
decimal { integer_part: "un" quantity: "billón" } -> un billón
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "menos ") + delete_space, 0, 1)
integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
fractional_default = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
conjunction = pynutil.insert(" punto ") if LOCALIZATION == "am" else pynutil.insert(" coma ")
if not deterministic:
conjunction |= pynutil.insert(pynini.union(" con ", " y "))
fractional_default |= strip_cardinal_apocope(fractional_default)
fractional = conjunction + fractional_default
quantity = (
delete_space
+ insert_space
+ pynutil.delete("quantity: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(quantity, 0, 1)
graph_masc = optional_sign + pynini.union(
(integer + quantity), (integer + delete_space + fractional + optional_quantity)
)
# Allowing permutation for fem gender, don't include quantity since "million","billion", etc.. are masculine
graph_fem = optional_sign + (shift_cardinal_gender(integer) + delete_space + shift_number_gender(fractional))
if not deterministic: # "una" will drop to "un" in certain cases
graph_fem |= add_cardinal_apocope_fem(graph_fem)
self.numbers_only_quantity = (
optional_sign
+ pynini.union((integer + quantity), (integer + delete_space + fractional + quantity)).optimize()
)
self.graph_masc = (graph_masc + delete_preserve_order).optimize()
self.graph_fem = (graph_fem + delete_preserve_order).optimize()
graph = graph_masc | graph_fem
graph += delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/decimals.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
delete_preserve_order,
insert_space,
)
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
digit_no_zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")))
zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/zero.tsv")))
graph_symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv"))
server_common = pynini.string_file(get_abs_path("data/electronic/server_name.tsv"))
domain_common = pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. electronic { username: "abc" domain: "hotmail.com" } -> "a b c arroba hotmail punto com"
-> "a b c arroba h o t m a i l punto c o m"
-> "a b c arroba hotmail punto c o m"
-> "a b c at h o t m a i l punto com"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="verbalize", deterministic=deterministic)
graph_digit_no_zero = (
digit_no_zero @ pynini.cdrewrite(pynini.cross("un", "uno"), "", "", NEMO_SIGMA).optimize()
)
graph_digit = graph_digit_no_zero | zero
def add_space_after_char():
return pynini.closure(NEMO_NOT_QUOTE - pynini.accep(" ") + insert_space) + (
NEMO_NOT_QUOTE - pynini.accep(" ")
)
verbalize_characters = pynini.cdrewrite(graph_symbols | graph_digit, "", "", NEMO_SIGMA)
user_name = pynutil.delete("username: \"") + add_space_after_char() + pynutil.delete("\"")
user_name @= verbalize_characters
convert_defaults = pynutil.add_weight(NEMO_NOT_QUOTE, weight=0.0001) | domain_common | server_common
domain = convert_defaults + pynini.closure(insert_space + convert_defaults)
domain @= verbalize_characters
domain = pynutil.delete("domain: \"") + domain + pynutil.delete("\"")
protocol = (
pynutil.delete("protocol: \"")
+ add_space_after_char() @ pynini.cdrewrite(graph_symbols, "", "", NEMO_SIGMA)
+ pynutil.delete("\"")
)
self.graph = (pynini.closure(protocol + pynini.accep(" "), 0, 1) + domain) | (
user_name + pynini.accep(" ") + pynutil.insert("arroba ") + domain
)
delete_tokens = self.delete_tokens(self.graph + delete_preserve_order)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_preserve_order,
)
from nemo_text_processing.text_normalization.es.graph_utils import strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.taggers.date import articles
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "treinta y uno" month: "marzo" year: "dos mil" } -> "treinta y uno de marzo de dos mil"
date { day: "uno" month: "mayo" year: "del mil novecientos noventa" } -> "primero de mayo del mil novecientos noventa"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="date", kind="verbalize", deterministic=deterministic)
day_cardinal = pynutil.delete("day: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
day = strip_cardinal_apocope(day_cardinal)
primero = pynini.cdrewrite(pynini.cross("uno", "primero"), "[BOS]", "[EOS]", NEMO_SIGMA)
day = (
(day @ primero) if deterministic else pynini.union(day, day @ primero)
) # Primero for first day is traditional, but will vary depending on region
month = pynutil.delete("month: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
year = (
pynutil.delete("year: \"")
+ articles
+ NEMO_SPACE
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# Insert preposition if wasn't originally with the year. This would mean a space was present
year = pynutil.add_weight(year, -0.001)
year |= (
pynutil.delete("year: \"")
+ pynutil.insert("de ")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# Roman centuries
graph_roman_centuries = pynutil.delete("year: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
# day month year
graph_dmy = day + pynini.cross(NEMO_SPACE, " de ") + month + pynini.closure(pynini.accep(" ") + year, 0, 1)
graph_mdy = month + NEMO_SPACE + day + pynini.closure(NEMO_SPACE + year, 0, 1)
if deterministic:
graph_mdy += pynutil.delete(" preserve_order: true") # Only accepts this if was explicitly passed
self.graph = graph_dmy | graph_mdy | graph_roman_centuries
final_graph = self.graph + delete_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/verbalizers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/dates/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/numbers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/ordinals/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/electronic/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/telephone/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/time/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/money/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/measures/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/roman/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/es/data/fractions/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
from pathlib import Path
from typing import Dict
import pynini
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from pynini import Far
from pynini.examples import plurals
from pynini.export import export
from pynini.lib import byte, pynutil, utf8
NEMO_CHAR = utf8.VALID_UTF8_CHAR
NEMO_DIGIT = byte.DIGIT
NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize()
NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize()
NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize()
NEMO_ALNUM = pynini.union(NEMO_DIGIT, NEMO_ALPHA).optimize()
NEMO_HEX = pynini.union(*string.hexdigits).optimize()
NEMO_NON_BREAKING_SPACE = u"\u00A0"
NEMO_SPACE = " "
NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", u"\u00A0").optimize()
NEMO_NOT_SPACE = pynini.difference(NEMO_CHAR, NEMO_WHITE_SPACE).optimize()
NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, r'"').optimize()
NEMO_PUNCT = pynini.union(*map(pynini.escape, string.punctuation)).optimize()
NEMO_GRAPH = pynini.union(NEMO_ALNUM, NEMO_PUNCT).optimize()
NEMO_SIGMA = pynini.closure(NEMO_CHAR)
NEMO_LOWER_NOT_A = pynini.union(
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
).optimize()
delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE))
delete_zero_or_one_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE, 0, 1))
insert_space = pynutil.insert(" ")
delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ")
delete_preserve_order = pynini.closure(
pynutil.delete(" preserve_order: true")
| (pynutil.delete(" field_order: \"") + NEMO_NOT_QUOTE + pynutil.delete("\""))
)
suppletive = pynini.string_file(get_abs_path("data/suppletive.tsv"))
# _v = pynini.union("a", "e", "i", "o", "u")
_c = pynini.union(
"b", "c", "d", "f", "g", "h", "j", "k", "l", "m", "n", "p", "q", "r", "s", "t", "v", "w", "x", "y", "z"
)
_ies = NEMO_SIGMA + _c + pynini.cross("y", "ies")
_es = NEMO_SIGMA + pynini.union("s", "sh", "ch", "x", "z") + pynutil.insert("es")
_s = NEMO_SIGMA + pynutil.insert("s")
graph_plural = plurals._priority_union(
suppletive, plurals._priority_union(_ies, plurals._priority_union(_es, _s, NEMO_SIGMA), NEMO_SIGMA), NEMO_SIGMA
).optimize()
SINGULAR_TO_PLURAL = graph_plural
PLURAL_TO_SINGULAR = pynini.invert(graph_plural)
TO_LOWER = pynini.union(*[pynini.cross(x, y) for x, y in zip(string.ascii_uppercase, string.ascii_lowercase)])
TO_UPPER = pynini.invert(TO_LOWER)
MIN_NEG_WEIGHT = -0.0001
MIN_POS_WEIGHT = 0.0001
INPUT_CASED = "cased"
INPUT_LOWER_CASED = "lower_cased"
MINUS = pynini.union("minus", "Minus").optimize()
def capitalized_input_graph(
graph: 'pynini.FstLike', original_graph_weight: float = None, capitalized_graph_weight: float = None
) -> 'pynini.FstLike':
"""
Allow graph input to be capitalized, e.g. for ITN)
Args:
graph: FstGraph
original_graph_weight: weight to add to the original `graph`
capitalized_graph_weight: weight to add to the capitalized graph
"""
capitalized_graph = pynini.compose(TO_LOWER + NEMO_SIGMA, graph).optimize()
if original_graph_weight is not None:
graph = pynutil.add_weight(graph, weight=original_graph_weight)
if capitalized_graph_weight is not None:
capitalized_graph = pynutil.add_weight(capitalized_graph, weight=capitalized_graph_weight)
graph |= capitalized_graph
return graph
def generator_main(file_name: str, graphs: Dict[str, 'pynini.FstLike']):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
logging.info(f'Created {file_name}')
def get_plurals(fst):
"""
Given singular returns plurals
Args:
fst: Fst
Returns plurals to given singular forms
"""
return SINGULAR_TO_PLURAL @ fst
def get_singulars(fst):
"""
Given plural returns singulars
Args:
fst: Fst
Returns singulars to given plural forms
"""
return PLURAL_TO_SINGULAR @ fst
def convert_space(fst) -> 'pynini.FstLike':
"""
Converts space to nonbreaking space.
Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty"
This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it.
Args:
fst: input fst
Returns output fst where breaking spaces are converted to non breaking spaces
"""
return fst @ pynini.cdrewrite(pynini.cross(NEMO_SPACE, NEMO_NON_BREAKING_SPACE), "", "", NEMO_SIGMA)
def string_map_cased(input_file: str, input_case: str = INPUT_LOWER_CASED):
labels = load_labels(input_file)
if input_case == INPUT_CASED:
additional_labels = []
for written, spoken, *weight in labels:
written_capitalized = written[0].upper() + written[1:]
additional_labels.extend(
[
[written_capitalized, spoken.capitalize()], # first letter capitalized
[
written_capitalized,
spoken.upper().replace(" AND ", " and "),
], # # add pairs with the all letters capitalized
]
)
spoken_no_space = spoken.replace(" ", "")
# add abbreviations without spaces (both lower and upper case), i.e. "BMW" not "B M W"
if len(spoken) == (2 * len(spoken_no_space) - 1):
logging.debug(f"This is weight {weight}")
if len(weight) == 0:
additional_labels.extend(
[[written, spoken_no_space], [written_capitalized, spoken_no_space.upper()]]
)
else:
additional_labels.extend(
[
[written, spoken_no_space, weight[0]],
[written_capitalized, spoken_no_space.upper(), weight[0]],
]
)
labels += additional_labels
whitelist = pynini.string_map(labels).invert().optimize()
return whitelist
class GraphFst:
"""
Base class for all grammar fsts.
Args:
name: name of grammar class
kind: either 'classify' or 'verbalize'
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, name: str, kind: str, deterministic: bool = True):
self.name = name
self.kind = kind
self._fst = None
self.deterministic = deterministic
self.far_path = Path(os.path.dirname(__file__) + '/grammars/' + kind + '/' + name + '.far')
if self.far_exist():
self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst()
def far_exist(self) -> bool:
"""
Returns true if FAR can be loaded
"""
return self.far_path.exists()
@property
def fst(self) -> 'pynini.FstLike':
return self._fst
@fst.setter
def fst(self, fst):
self._fst = fst
def add_tokens(self, fst) -> 'pynini.FstLike':
"""
Wraps class name around to given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }")
def delete_tokens(self, fst) -> 'pynini.FstLike':
"""
Deletes class name wrap around output of given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
res = (
pynutil.delete(f"{self.name}")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ fst
+ delete_space
+ pynutil.delete("}")
)
return res @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/graph_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
label_tsv = open(abs_path, encoding="utf-8")
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels
def augment_labels_with_punct_at_end(labels):
"""
augments labels: if key ends on a punctuation that value does not have, add a new label
where the value maintains the punctuation
Args:
labels : input labels
Returns:
additional labels
"""
res = []
for label in labels:
if len(label) > 1:
if label[0][-1] == "." and label[1][-1] != ".":
res.append([label[0], label[1] + "."] + label[2:])
return res
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from typing import List
import regex as re
from nemo_text_processing.text_normalization.data_loader_utils import (
EOS_TYPE,
Instance,
load_files,
training_data_to_sentences,
)
"""
This file is for evaluation purposes.
filter_loaded_data() cleans data (list of instances) for text normalization. Filters and cleaners can be specified for each semiotic class individually.
For example, normalized text should only include characters and whitespace characters but no punctuation.
Cardinal unnormalized instances should contain at least one integer and all other characters are removed.
"""
class Filter:
"""
Filter class
Args:
class_type: semiotic class used in dataset
process_func: function to transform text
filter_func: function to filter text
"""
def __init__(self, class_type: str, process_func: object, filter_func: object):
self.class_type = class_type
self.process_func = process_func
self.filter_func = filter_func
def filter(self, instance: Instance) -> bool:
"""
filter function
Args:
filters given instance with filter function
Returns: True if given instance fulfills criteria or does not belong to class type
"""
if instance.token_type != self.class_type:
return True
return self.filter_func(instance)
def process(self, instance: Instance) -> Instance:
"""
process function
Args:
processes given instance with process function
Returns: processed instance if instance belongs to expected class type or original instance
"""
if instance.token_type != self.class_type:
return instance
return self.process_func(instance)
def filter_cardinal_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_cardinal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r"[^0-9]", "", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_ordinal_1(instance: Instance) -> bool:
ok = re.search(r"(st|nd|rd|th)\s*$", instance.un_normalized)
return ok
def process_ordinal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r"[,\s]", "", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_decimal_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_decimal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r",", "", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_measure_1(instance: Instance) -> bool:
ok = True
return ok
def process_measure_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r",", "", un_normalized)
un_normalized = re.sub(r"m2", "m²", un_normalized)
un_normalized = re.sub(r"(\d)([^\d.\s])", r"\1 \2", un_normalized)
normalized = re.sub(r"[^a-z\s]", "", normalized)
normalized = re.sub(r"per ([a-z\s]*)s$", r"per \1", normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_money_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_money_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r",", "", un_normalized)
un_normalized = re.sub(r"a\$", r"$", un_normalized)
un_normalized = re.sub(r"us\$", r"$", un_normalized)
un_normalized = re.sub(r"(\d)m\s*$", r"\1 million", un_normalized)
un_normalized = re.sub(r"(\d)bn?\s*$", r"\1 billion", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_time_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_time_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r": ", ":", un_normalized)
un_normalized = re.sub(r"(\d)\s?a\s?m\s?", r"\1 a.m.", un_normalized)
un_normalized = re.sub(r"(\d)\s?p\s?m\s?", r"\1 p.m.", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_plain_1(instance: Instance) -> bool:
ok = True
return ok
def process_plain_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_punct_1(instance: Instance) -> bool:
ok = True
return ok
def process_punct_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_date_1(instance: Instance) -> bool:
ok = True
return ok
def process_date_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r",", "", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_letters_1(instance: Instance) -> bool:
ok = True
return ok
def process_letters_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_verbatim_1(instance: Instance) -> bool:
ok = True
return ok
def process_verbatim_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_digit_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_digit_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_telephone_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_telephone_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_electronic_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_electronic_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_fraction_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_fraction_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_address_1(instance: Instance) -> bool:
ok = True
return ok
def process_address_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
filters = []
filters.append(Filter(class_type="CARDINAL", process_func=process_cardinal_1, filter_func=filter_cardinal_1))
filters.append(Filter(class_type="ORDINAL", process_func=process_ordinal_1, filter_func=filter_ordinal_1))
filters.append(Filter(class_type="DECIMAL", process_func=process_decimal_1, filter_func=filter_decimal_1))
filters.append(Filter(class_type="MEASURE", process_func=process_measure_1, filter_func=filter_measure_1))
filters.append(Filter(class_type="MONEY", process_func=process_money_1, filter_func=filter_money_1))
filters.append(Filter(class_type="TIME", process_func=process_time_1, filter_func=filter_time_1))
filters.append(Filter(class_type="DATE", process_func=process_date_1, filter_func=filter_date_1))
filters.append(Filter(class_type="PLAIN", process_func=process_plain_1, filter_func=filter_plain_1))
filters.append(Filter(class_type="PUNCT", process_func=process_punct_1, filter_func=filter_punct_1))
filters.append(Filter(class_type="LETTERS", process_func=process_letters_1, filter_func=filter_letters_1))
filters.append(Filter(class_type="VERBATIM", process_func=process_verbatim_1, filter_func=filter_verbatim_1))
filters.append(Filter(class_type="DIGIT", process_func=process_digit_1, filter_func=filter_digit_1))
filters.append(Filter(class_type="TELEPHONE", process_func=process_telephone_1, filter_func=filter_telephone_1))
filters.append(Filter(class_type="ELECTRONIC", process_func=process_electronic_1, filter_func=filter_electronic_1))
filters.append(Filter(class_type="FRACTION", process_func=process_fraction_1, filter_func=filter_fraction_1))
filters.append(Filter(class_type="ADDRESS", process_func=process_address_1, filter_func=filter_address_1))
filters.append(Filter(class_type=EOS_TYPE, process_func=lambda x: x, filter_func=lambda x: True))
def filter_loaded_data(data: List[Instance], verbose: bool = False) -> List[Instance]:
"""
Filters list of instances
Args:
data: list of instances
Returns: filtered and transformed list of instances
"""
updates_instances = []
for instance in data:
updated_instance = False
for fil in filters:
if fil.class_type == instance.token_type and fil.filter(instance):
instance = fil.process(instance)
updated_instance = True
if updated_instance:
if verbose:
print(instance)
updates_instances.append(instance)
return updates_instances
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", help="input file path", type=str, default='./en_with_types/output-00001-of-00100')
parser.add_argument("--verbose", help="print filtered instances", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
file_path = args.input
print("Loading training data: " + file_path)
instance_list = load_files([file_path]) # List of instances
filtered_instance_list = filter_loaded_data(instance_list, args.verbose)
training_data_to_sentences(filtered_instance_list)
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/clean_eval_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
GraphFst,
convert_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import (
augment_labels_with_punct_at_end,
get_abs_path,
load_labels,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time, e.g.
12:30 a.m. est -> time { hours: "twelve" minutes: "thirty" suffix: "a m" zone: "e s t" }
2.30 a.m. -> time { hours: "two" minutes: "thirty" suffix: "a m" }
02.30 a.m. -> time { hours: "two" minutes: "thirty" suffix: "a m" }
2.00 a.m. -> time { hours: "two" suffix: "a m" }
2 a.m. -> time { hours: "two" suffix: "a m" }
02:00 -> time { hours: "two" }
2:00 -> time { hours: "two" }
10:00:05 a.m. -> time { hours: "ten" minutes: "zero" seconds: "five" suffix: "a m" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
suffix_labels = load_labels(get_abs_path("data/time/suffix.tsv"))
suffix_labels.extend(augment_labels_with_punct_at_end(suffix_labels))
suffix_graph = pynini.string_map(suffix_labels)
time_zone_graph = pynini.string_file(get_abs_path("data/time/zone.tsv"))
# only used for < 1000 thousand -> 0 weight
cardinal = cardinal.graph
labels_hour = [str(x) for x in range(0, 24)]
labels_minute_single = [str(x) for x in range(1, 10)]
labels_minute_double = [str(x) for x in range(10, 60)]
delete_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (
pynini.closure(pynutil.delete("0"), 0, 1) + NEMO_DIGIT
)
graph_hour = delete_leading_zero_to_double_digit @ pynini.union(*labels_hour) @ cardinal
graph_minute_single = pynini.union(*labels_minute_single) @ cardinal
graph_minute_double = pynini.union(*labels_minute_double) @ cardinal
final_graph_hour = pynutil.insert("hours: \"") + graph_hour + pynutil.insert("\"")
final_graph_minute = (
pynutil.insert("minutes: \"")
+ (pynini.cross("0", "o") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_graph_second = (
pynutil.insert("seconds: \"")
+ (pynini.cross("0", "o") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(delete_space + insert_space + final_suffix, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert("zone: \"")
+ convert_space(time_zone_graph)
+ pynutil.insert("\""),
0,
1,
)
# 2:30 pm, 02:30, 2:00
graph_hm = (
final_graph_hour
+ pynutil.delete(":")
+ (pynutil.delete("00") | insert_space + final_graph_minute)
+ final_suffix_optional
+ final_time_zone_optional
)
# 10:30:05 pm,
graph_hms = (
final_graph_hour
+ pynutil.delete(":")
+ (pynini.cross("00", " minutes: \"zero\"") | insert_space + final_graph_minute)
+ pynutil.delete(":")
+ (pynini.cross("00", " seconds: \"zero\"") | insert_space + final_graph_second)
+ final_suffix_optional
+ final_time_zone_optional
)
# 2.xx pm/am
graph_hm2 = (
final_graph_hour
+ pynutil.delete(".")
+ (pynutil.delete("00") | insert_space + final_graph_minute)
+ delete_space
+ insert_space
+ final_suffix
+ final_time_zone_optional
)
# 2 pm est
graph_h = final_graph_hour + delete_space + insert_space + final_suffix + final_time_zone_optional
final_graph = (graph_hm | graph_h | graph_hm2 | graph_hms).optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class RomanFst(GraphFst):
"""
Finite state transducer for classifying roman numbers:
e.g. "IV" -> tokens { roman { integer: "four" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, lm: bool = False):
super().__init__(name="roman", kind="classify", deterministic=deterministic)
roman_dict = load_labels(get_abs_path("data/roman/roman_to_spoken.tsv"))
default_graph = pynini.string_map(roman_dict).optimize()
default_graph = pynutil.insert("integer: \"") + default_graph + pynutil.insert("\"")
ordinal_limit = 19
if deterministic:
# exclude "I"
start_idx = 1
else:
start_idx = 0
graph_teens = pynini.string_map([x[0] for x in roman_dict[start_idx:ordinal_limit]]).optimize()
# roman numerals up to ordinal_limit with a preceding name are converted to ordinal form
names = get_names()
graph = (
pynutil.insert("key_the_ordinal: \"")
+ names
+ pynutil.insert("\"")
+ pynini.accep(" ")
+ graph_teens @ default_graph
).optimize()
# single symbol roman numerals with preceding key words (multiple formats) are converted to cardinal form
key_words = []
for k_word in load_labels(get_abs_path("data/roman/key_word.tsv")):
key_words.append(k_word)
key_words.append([k_word[0][0].upper() + k_word[0][1:]])
key_words.append([k_word[0].upper()])
key_words = pynini.string_map(key_words).optimize()
graph |= (
pynutil.insert("key_cardinal: \"") + key_words + pynutil.insert("\"") + pynini.accep(" ") + default_graph
).optimize()
if deterministic or lm:
# two digit roman numerals up to 49
roman_to_cardinal = pynini.compose(
pynini.closure(NEMO_ALPHA, 2),
(
pynutil.insert("default_cardinal: \"default\" ")
+ (pynini.string_map([x[0] for x in roman_dict[:50]]).optimize()) @ default_graph
),
)
graph |= roman_to_cardinal
elif not lm:
# two or more digit roman numerals
roman_to_cardinal = pynini.compose(
pynini.difference(NEMO_SIGMA, "I"),
(
pynutil.insert("default_cardinal: \"default\" integer: \"")
+ pynini.string_map(roman_dict).optimize()
+ pynutil.insert("\"")
),
).optimize()
graph |= roman_to_cardinal
# convert three digit roman or up with suffix to ordinal
roman_to_ordinal = pynini.compose(
pynini.closure(NEMO_ALPHA, 3),
(pynutil.insert("default_ordinal: \"default\" ") + graph_teens @ default_graph + pynutil.delete("th")),
)
graph |= roman_to_ordinal
graph = self.add_tokens(graph.optimize())
self.fst = graph.optimize()
def get_names():
"""
Returns the graph that matched common male and female names.
"""
male_labels = load_labels(get_abs_path("data/roman/male.tsv"))
female_labels = load_labels(get_abs_path("data/roman/female.tsv"))
male_labels.extend([[x[0].upper()] for x in male_labels])
female_labels.extend([[x[0].upper()] for x in female_labels])
names = pynini.string_map(male_labels).optimize()
names |= pynini.string_map(female_labels).optimize()
return names
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/roman.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_NON_BREAKING_SPACE,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_UPPER,
SINGULAR_TO_PLURAL,
TO_LOWER,
GraphFst,
convert_space,
delete_space,
delete_zero_or_one_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.taggers.ordinal import OrdinalFst as OrdinalTagger
from nemo_text_processing.text_normalization.en.taggers.whitelist import get_formats
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst as OrdinalVerbalizer
from pynini.examples import plurals
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure, suppletive aware, e.g.
-12kg -> measure { negative: "true" cardinal { integer: "twelve" } units: "kilograms" }
1kg -> measure { cardinal { integer: "one" } units: "kilogram" }
.5kg -> measure { decimal { fractional_part: "five" } units: "kilograms" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph_with_and | self.get_range(cardinal.graph_with_and)
graph_unit = pynini.string_file(get_abs_path("data/measure/unit.tsv"))
if not deterministic:
graph_unit |= pynini.string_file(get_abs_path("data/measure/unit_alternatives.tsv"))
graph_unit |= pynini.compose(
pynini.closure(TO_LOWER, 1) + (NEMO_ALPHA | TO_LOWER) + pynini.closure(NEMO_ALPHA | TO_LOWER), graph_unit
).optimize()
graph_unit_plural = convert_space(graph_unit @ SINGULAR_TO_PLURAL)
graph_unit = convert_space(graph_unit)
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
graph_unit2 = (
pynini.cross("/", "per") + delete_zero_or_one_space + pynutil.insert(NEMO_NON_BREAKING_SPACE) + graph_unit
)
optional_graph_unit2 = pynini.closure(
delete_zero_or_one_space + pynutil.insert(NEMO_NON_BREAKING_SPACE) + graph_unit2, 0, 1,
)
unit_plural = (
pynutil.insert("units: \"")
+ (graph_unit_plural + optional_graph_unit2 | graph_unit2)
+ pynutil.insert("\"")
)
unit_singular = (
pynutil.insert("units: \"") + (graph_unit + optional_graph_unit2 | graph_unit2) + pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ delete_space
+ pynutil.insert(" } ")
+ unit_plural
)
# support radio FM/AM
subgraph_decimal |= (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ delete_space
+ pynutil.insert(" } ")
+ pynutil.insert("units: \"")
+ pynini.union("AM", "FM")
+ pynutil.insert("\"")
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - "1") @ cardinal_graph)
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_plural
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ pynini.cross("1", "one")
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_singular
)
unit_graph = (
pynutil.insert("cardinal { integer: \"-\" } units: \"")
+ ((pynini.cross("/", "per") + delete_zero_or_one_space) | (pynini.accep("per") + pynutil.delete(" ")))
+ pynutil.insert(NEMO_NON_BREAKING_SPACE)
+ graph_unit
+ pynutil.insert("\" preserve_order: true")
)
decimal_dash_alpha = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynini.cross('-', '')
+ pynutil.insert(" } units: \"")
+ pynini.closure(NEMO_ALPHA, 1)
+ pynutil.insert("\"")
)
decimal_times = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(" } units: \"")
+ (pynini.cross(pynini.union('x', "X"), 'x') | pynini.cross(pynini.union('x', "X"), ' times'))
+ pynutil.insert("\"")
)
alpha_dash_decimal = (
pynutil.insert("units: \"")
+ pynini.closure(NEMO_ALPHA, 1)
+ pynini.accep('-')
+ pynutil.insert("\"")
+ pynutil.insert(" decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(" } preserve_order: true")
)
subgraph_fraction = (
pynutil.insert("fraction { ") + fraction.graph + delete_space + pynutil.insert(" } ") + unit_plural
)
address = self.get_address_graph(cardinal)
address = (
pynutil.insert("units: \"address\" cardinal { integer: \"")
+ address
+ pynutil.insert("\" } preserve_order: true")
)
math_operations = pynini.string_file(get_abs_path("data/measure/math_operation.tsv"))
delimiter = pynini.accep(" ") | pynutil.insert(" ")
math = (
(cardinal_graph | NEMO_ALPHA)
+ delimiter
+ math_operations
+ (delimiter | NEMO_ALPHA)
+ cardinal_graph
+ delimiter
+ pynini.cross("=", "equals")
+ delimiter
+ (cardinal_graph | NEMO_ALPHA)
)
math |= (
(cardinal_graph | NEMO_ALPHA)
+ delimiter
+ pynini.cross("=", "equals")
+ delimiter
+ (cardinal_graph | NEMO_ALPHA)
+ delimiter
+ math_operations
+ delimiter
+ cardinal_graph
)
math = (
pynutil.insert("units: \"math\" cardinal { integer: \"")
+ math
+ pynutil.insert("\" } preserve_order: true")
)
final_graph = (
subgraph_decimal
| subgraph_cardinal
| unit_graph
| decimal_dash_alpha
| decimal_times
| alpha_dash_decimal
| subgraph_fraction
| address
| math
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
def get_range(self, cardinal: GraphFst):
"""
Returns range forms for measure tagger, e.g. 2-3, 2x3, 2*2
Args:
cardinal: cardinal GraphFst
"""
range_graph = cardinal + pynini.cross(pynini.union("-", " - "), " to ") + cardinal
for x in [" x ", "x"]:
range_graph |= cardinal + pynini.cross(x, " by ") + cardinal
if not self.deterministic:
range_graph |= cardinal + pynini.cross(x, " times ") + pynini.closure(cardinal, 0, 1)
for x in ["*", " * "]:
range_graph |= cardinal + pynini.cross(x, " times ") + cardinal
return range_graph.optimize()
def get_address_graph(self, cardinal):
"""
Finite state transducer for classifying serial.
The serial is a combination of digits, letters and dashes, e.g.:
2788 San Tomas Expy, Santa Clara, CA 95051 ->
units: "address" cardinal
{ integer: "two seven eight eight San Tomas Expressway Santa Clara California nine five zero five one" }
preserve_order: true
"""
ordinal_verbalizer = OrdinalVerbalizer().graph
ordinal_tagger = OrdinalTagger(cardinal=cardinal).graph
ordinal_num = pynini.compose(
pynutil.insert("integer: \"") + ordinal_tagger + pynutil.insert("\""), ordinal_verbalizer
)
address_num = NEMO_DIGIT ** (1, 2) @ cardinal.graph_hundred_component_at_least_one_none_zero_digit
address_num += insert_space + NEMO_DIGIT ** 2 @ (
pynini.closure(pynini.cross("0", "zero "), 0, 1)
+ cardinal.graph_hundred_component_at_least_one_none_zero_digit
)
# to handle the rest of the numbers
address_num = pynini.compose(NEMO_DIGIT ** (3, 4), address_num)
address_num = plurals._priority_union(address_num, cardinal.graph, NEMO_SIGMA)
direction = (
pynini.cross("E", "East")
| pynini.cross("S", "South")
| pynini.cross("W", "West")
| pynini.cross("N", "North")
) + pynini.closure(pynutil.delete("."), 0, 1)
direction = pynini.closure(pynini.accep(NEMO_SPACE) + direction, 0, 1)
address_words = get_formats(get_abs_path("data/address/address_word.tsv"))
address_words = (
pynini.accep(NEMO_SPACE)
+ (pynini.closure(ordinal_num, 0, 1) | NEMO_UPPER + pynini.closure(NEMO_ALPHA, 1))
+ NEMO_SPACE
+ pynini.closure(NEMO_UPPER + pynini.closure(NEMO_ALPHA) + NEMO_SPACE)
+ address_words
)
city = pynini.closure(NEMO_ALPHA | pynini.accep(NEMO_SPACE), 1)
city = pynini.closure(pynini.accep(",") + pynini.accep(NEMO_SPACE) + city, 0, 1)
states = load_labels(get_abs_path("data/address/state.tsv"))
additional_options = []
for x, y in states:
additional_options.append((x, f"{y[0]}.{y[1:]}"))
states.extend(additional_options)
state_graph = pynini.string_map(states)
state = pynini.invert(state_graph)
state = pynini.closure(pynini.accep(",") + pynini.accep(NEMO_SPACE) + state, 0, 1)
zip_code = pynini.compose(NEMO_DIGIT ** 5, cardinal.single_digits_graph)
zip_code = pynini.closure(pynini.closure(pynini.accep(","), 0, 1) + pynini.accep(NEMO_SPACE) + zip_code, 0, 1,)
address = address_num + direction + address_words + pynini.closure(city + state + zip_code, 0, 1)
address |= address_num + direction + address_words + pynini.closure(pynini.cross(".", ""), 0, 1)
return address
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, get_abs_path
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
"23 4/5" ->
tokens { fraction { integer: "twenty three" numerator: "four" denominator: "five" } }
"23 4/5th" ->
tokens { fraction { integer: "twenty three" numerator: "four" denominator: "five" } }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
numerator = (
pynutil.insert("numerator: \"") + cardinal_graph + (pynini.cross("/", "\" ") | pynini.cross(" / ", "\" "))
)
endings = ["rd", "th", "st", "nd"]
endings += [x.upper() for x in endings]
optional_end = pynini.closure(pynini.cross(pynini.union(*endings), ""), 0, 1)
denominator = pynutil.insert("denominator: \"") + cardinal_graph + optional_end + pynutil.insert("\"")
graph = pynini.closure(integer + pynini.accep(" "), 0, 1) + (numerator + denominator)
graph |= pynini.closure(integer + (pynini.accep(" ") | pynutil.insert(" ")), 0, 1) + pynini.compose(
pynini.string_file(get_abs_path("data/number/fraction.tsv")), (numerator + denominator)
)
self.graph = graph
final_graph = self.add_tokens(self.graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
plurals,
)
from nemo_text_processing.text_normalization.en.utils import get_abs_path
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone, and IP, and SSN which includes country code, number part and extension
country code optional: +***
number part: ***-***-****, or (***) ***-****
extension optional: 1-9999
E.g
+1 123-123-5678-1 -> telephone { country_code: "one" number_part: "one two three, one two three, five six seven eight" extension: "one" }
1-800-GO-U-HAUL -> telephone { country_code: "one" number_part: "one, eight hundred GO U HAUL" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="classify", deterministic=deterministic)
add_separator = pynutil.insert(", ") # between components
zero = pynini.cross("0", "zero")
if not deterministic:
zero |= pynini.cross("0", pynini.union("o", "oh"))
digit = pynini.invert(pynini.string_file(get_abs_path("data/number/digit.tsv"))).optimize() | zero
telephone_prompts = pynini.string_file(get_abs_path("data/telephone/telephone_prompt.tsv"))
country_code = (
pynini.closure(telephone_prompts + delete_extra_space, 0, 1)
+ pynini.closure(pynini.cross("+", "plus "), 0, 1)
+ pynini.closure(digit + insert_space, 0, 2)
+ digit
+ pynutil.insert(",")
)
country_code |= telephone_prompts
country_code = pynutil.insert("country_code: \"") + country_code + pynutil.insert("\"")
country_code = country_code + pynini.closure(pynutil.delete("-"), 0, 1) + delete_space + insert_space
area_part_default = pynini.closure(digit + insert_space, 2, 2) + digit
area_part = pynini.cross("800", "eight hundred") | pynini.compose(
pynini.difference(NEMO_SIGMA, "800"), area_part_default
)
area_part = (
(area_part + (pynutil.delete("-") | pynutil.delete(".")))
| (
pynutil.delete("(")
+ area_part
+ ((pynutil.delete(")") + pynini.closure(pynutil.delete(" "), 0, 1)) | pynutil.delete(")-"))
)
) + add_separator
del_separator = pynini.closure(pynini.union("-", " ", "."), 0, 1)
number_length = ((NEMO_DIGIT + del_separator) | (NEMO_ALPHA + del_separator)) ** 7
number_words = pynini.closure(
(NEMO_DIGIT @ digit) + (insert_space | (pynini.cross("-", ', ')))
| NEMO_ALPHA
| (NEMO_ALPHA + pynini.cross("-", ' '))
)
number_words |= pynini.closure(
(NEMO_DIGIT @ digit) + (insert_space | (pynini.cross(".", ', ')))
| NEMO_ALPHA
| (NEMO_ALPHA + pynini.cross(".", ' '))
)
number_words = pynini.compose(number_length, number_words)
number_part = area_part + number_words
number_part = pynutil.insert("number_part: \"") + number_part + pynutil.insert("\"")
extension = (
pynutil.insert("extension: \"") + pynini.closure(digit + insert_space, 0, 3) + digit + pynutil.insert("\"")
)
extension = pynini.closure(insert_space + extension, 0, 1)
graph = plurals._priority_union(country_code + number_part, number_part, NEMO_SIGMA).optimize()
graph = plurals._priority_union(country_code + number_part + extension, graph, NEMO_SIGMA).optimize()
graph = plurals._priority_union(number_part + extension, graph, NEMO_SIGMA).optimize()
# ip
ip_prompts = pynini.string_file(get_abs_path("data/telephone/ip_prompt.tsv"))
digit_to_str_graph = digit + pynini.closure(pynutil.insert(" ") + digit, 0, 2)
ip_graph = digit_to_str_graph + (pynini.cross(".", " dot ") + digit_to_str_graph) ** 3
graph |= (
pynini.closure(
pynutil.insert("country_code: \"") + ip_prompts + pynutil.insert("\"") + delete_extra_space, 0, 1
)
+ pynutil.insert("number_part: \"")
+ ip_graph.optimize()
+ pynutil.insert("\"")
)
# ssn
ssn_prompts = pynini.string_file(get_abs_path("data/telephone/ssn_prompt.tsv"))
three_digit_part = digit + (pynutil.insert(" ") + digit) ** 2
two_digit_part = digit + pynutil.insert(" ") + digit
four_digit_part = digit + (pynutil.insert(" ") + digit) ** 3
ssn_separator = pynini.cross("-", ", ")
ssn_graph = three_digit_part + ssn_separator + two_digit_part + ssn_separator + four_digit_part
graph |= (
pynini.closure(
pynutil.insert("country_code: \"") + ssn_prompts + pynutil.insert("\"") + delete_extra_space, 0, 1
)
+ pynutil.insert("number_part: \"")
+ ssn_graph.optimize()
+ pynutil.insert("\"")
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal, e.g.
13th -> ordinal { integer: "thirteen" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
cardinal_format = pynini.closure(NEMO_DIGIT | pynini.accep(","))
st_format = (
pynini.closure(cardinal_format + (NEMO_DIGIT - "1"), 0, 1)
+ pynini.accep("1")
+ pynutil.delete(pynini.union("st", "ST", "ˢᵗ"))
)
nd_format = (
pynini.closure(cardinal_format + (NEMO_DIGIT - "1"), 0, 1)
+ pynini.accep("2")
+ pynutil.delete(pynini.union("nd", "ND", "ⁿᵈ"))
)
rd_format = (
pynini.closure(cardinal_format + (NEMO_DIGIT - "1"), 0, 1)
+ pynini.accep("3")
+ pynutil.delete(pynini.union("rd", "RD", "ʳᵈ"))
)
th_format = pynini.closure(
(NEMO_DIGIT - "1" - "2" - "3")
| (cardinal_format + "1" + NEMO_DIGIT)
| (cardinal_format + (NEMO_DIGIT - "1") + (NEMO_DIGIT - "1" - "2" - "3")),
1,
) + pynutil.delete(pynini.union("th", "TH", "ᵗʰ"))
self.graph = (st_format | nd_format | rd_format | th_format) @ cardinal_graph
final_graph = pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_UPPER, GraphFst, insert_space
from pynini.lib import pynutil
class AbbreviationFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. "ABC" -> tokens { abbreviation { value: "A B C" } }
Args:
whitelist: whitelist FST
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, whitelist: 'pynini.FstLike', deterministic: bool = True):
super().__init__(name="abbreviation", kind="classify", deterministic=deterministic)
dot = pynini.accep(".")
# A.B.C. -> A. B. C.
graph = NEMO_UPPER + dot + pynini.closure(insert_space + NEMO_UPPER + dot, 1)
# A.B.C. -> A.B.C.
graph |= NEMO_UPPER + dot + pynini.closure(NEMO_UPPER + dot, 1)
# ABC -> A B C
graph |= NEMO_UPPER + pynini.closure(insert_space + NEMO_UPPER, 1)
# exclude words that are included in the whitelist
graph = pynini.compose(
pynini.difference(pynini.project(graph, "input"), pynini.project(whitelist.graph, "input")), graph
)
graph = pynutil.insert("value: \"") + graph.optimize() + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/abbreviation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
NEMO_CHAR,
NEMO_NOT_SPACE,
NEMO_SIGMA,
NEMO_UPPER,
SINGULAR_TO_PLURAL,
GraphFst,
convert_space,
)
from nemo_text_processing.text_normalization.en.taggers.roman import get_names
from nemo_text_processing.text_normalization.en.utils import (
augment_labels_with_punct_at_end,
get_abs_path,
load_labels,
)
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelist, e.g.
misses -> tokens { name: "mrs" }
for non-deterministic case: "Dr. Abc" ->
tokens { name: "drive" } tokens { name: "Abc" }
tokens { name: "doctor" } tokens { name: "Abc" }
tokens { name: "Dr." } tokens { name: "Abc" }
This class has highest priority among all classifier grammars. Whitelisted tokens are defined and loaded from "data/whitelist.tsv".
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
input_file: path to a file with whitelist replacements
"""
def __init__(self, input_case: str, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
def _get_whitelist_graph(input_case, file, keep_punct_add_end: bool = False):
whitelist = load_labels(file)
if input_case == INPUT_LOWER_CASED:
whitelist = [[x.lower(), y] for x, y in whitelist]
else:
whitelist = [[x, y] for x, y in whitelist]
if keep_punct_add_end:
whitelist.extend(augment_labels_with_punct_at_end(whitelist))
graph = pynini.string_map(whitelist)
return graph
graph = _get_whitelist_graph(input_case, get_abs_path("data/whitelist/tts.tsv"))
graph |= pynini.compose(
pynini.difference(NEMO_SIGMA, pynini.accep("/")).optimize(),
_get_whitelist_graph(input_case, get_abs_path("data/whitelist/symbol.tsv")),
).optimize()
if deterministic:
names = get_names()
graph |= (
pynini.cross(pynini.union("st", "St", "ST"), "Saint")
+ pynini.closure(pynutil.delete("."))
+ pynini.accep(" ")
+ names
)
else:
graph |= _get_whitelist_graph(
input_case, get_abs_path("data/whitelist/alternatives.tsv"), keep_punct_add_end=True
)
for x in [".", ". "]:
graph |= (
NEMO_UPPER
+ pynini.closure(pynutil.delete(x) + NEMO_UPPER, 2)
+ pynini.closure(pynutil.delete("."), 0, 1)
)
if not deterministic:
multiple_forms_whitelist_graph = get_formats(get_abs_path("data/whitelist/alternatives_all_format.tsv"))
graph |= multiple_forms_whitelist_graph
graph_unit = pynini.string_file(get_abs_path("data/measure/unit.tsv")) | pynini.string_file(
get_abs_path("data/measure/unit_alternatives.tsv")
)
graph_unit_plural = graph_unit @ SINGULAR_TO_PLURAL
units_graph = pynini.compose(NEMO_CHAR ** (3, ...), convert_space(graph_unit | graph_unit_plural))
graph |= units_graph
# convert to states only if comma is present before the abbreviation to avoid converting all caps words,
# e.g. "IN", "OH", "OK"
# TODO or only exclude above?
states = load_labels(get_abs_path("data/address/state.tsv"))
additional_options = []
for x, y in states:
if input_case == INPUT_LOWER_CASED:
x = x.lower()
additional_options.append((x, f"{y[0]}.{y[1:]}"))
if not deterministic:
additional_options.append((x, f"{y[0]}.{y[1:]}."))
states.extend(additional_options)
state_graph = pynini.string_map(states)
graph |= pynini.closure(NEMO_NOT_SPACE, 1) + pynini.union(", ", ",") + pynini.invert(state_graph).optimize()
if input_file:
whitelist_provided = _get_whitelist_graph(input_case, input_file)
if not deterministic:
graph |= whitelist_provided
else:
graph = whitelist_provided
self.graph = (convert_space(graph)).optimize()
self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize()
def get_formats(input_f, input_case=INPUT_CASED, is_default=True):
"""
Adds various abbreviation format options to the list of acceptable input forms
"""
multiple_formats = load_labels(input_f)
additional_options = []
for x, y in multiple_formats:
if input_case == INPUT_LOWER_CASED:
x = x.lower()
additional_options.append((f"{x}.", y)) # default "dr" -> doctor, this includes period "dr." -> doctor
additional_options.append((f"{x[0].upper() + x[1:]}", f"{y[0].upper() + y[1:]}")) # "Dr" -> Doctor
additional_options.append((f"{x[0].upper() + x[1:]}.", f"{y[0].upper() + y[1:]}")) # "Dr." -> Doctor
multiple_formats.extend(additional_options)
if not is_default:
multiple_formats = [(x, f"|raw_start|{x}|raw_end||norm_start|{y}|norm_end|") for (x, y) in multiple_formats]
multiple_formats = pynini.string_map(multiple_formats)
return multiple_formats
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/whitelist.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, GraphFst, convert_space
from pynini.lib import pynutil
class RangeFst(GraphFst):
"""
This class is a composite class of two other class instances
Args:
time: composed tagger and verbalizer
date: composed tagger and verbalizer
cardinal: tagger
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
lm: whether to use for hybrid LM
"""
def __init__(
self, time: GraphFst, date: GraphFst, cardinal: GraphFst, deterministic: bool = True, lm: bool = False,
):
super().__init__(name="range", kind="classify", deterministic=deterministic)
delete_space = pynini.closure(pynutil.delete(" "), 0, 1)
approx = pynini.cross("~", "approximately")
# TIME
time_graph = time + delete_space + pynini.cross("-", " to ") + delete_space + time
self.graph = time_graph | (approx + time)
cardinal = cardinal.graph_with_and
# YEAR
date_year_four_digit = (NEMO_DIGIT ** 4 + pynini.closure(pynini.accep("s"), 0, 1)) @ date
date_year_two_digit = (NEMO_DIGIT ** 2 + pynini.closure(pynini.accep("s"), 0, 1)) @ date
year_to_year_graph = (
date_year_four_digit
+ delete_space
+ pynini.cross("-", " to ")
+ delete_space
+ (date_year_four_digit | date_year_two_digit | (NEMO_DIGIT ** 2 @ cardinal))
)
mid_year_graph = pynini.accep("mid") + pynini.cross("-", " ") + (date_year_four_digit | date_year_two_digit)
self.graph |= year_to_year_graph
self.graph |= mid_year_graph
# ADDITION
range_graph = cardinal + pynini.closure(pynini.cross("+", " plus ") + cardinal, 1)
range_graph |= cardinal + pynini.closure(pynini.cross(" + ", " plus ") + cardinal, 1)
range_graph |= approx + cardinal
range_graph |= cardinal + (pynini.cross("...", " ... ") | pynini.accep(" ... ")) + cardinal
if not deterministic or lm:
# cardinal ----
cardinal_to_cardinal_graph = (
cardinal + delete_space + pynini.cross("-", pynini.union(" to ", " minus ")) + delete_space + cardinal
)
range_graph |= cardinal_to_cardinal_graph | (
cardinal + delete_space + pynini.cross(":", " to ") + delete_space + cardinal
)
# MULTIPLY
for x in [" x ", "x"]:
range_graph |= cardinal + pynini.cross(x, pynini.union(" by ", " times ")) + cardinal
# 40x -> "40 times" ("40 x" cases is covered in serial)
for x in [" x", "x"]:
range_graph |= cardinal + pynini.cross(x, " times")
# 5x to 7x-> five to seven x/times
range_graph |= (
cardinal
+ pynutil.delete(x)
+ pynini.union(" to ", "-", " - ")
+ cardinal
+ pynini.cross(x, pynini.union(" x", " times"))
)
for x in ["*", " * "]:
range_graph |= cardinal + pynini.closure(pynini.cross(x, " times ") + cardinal, 1)
# supports "No. 12" -> "Number 12"
range_graph |= (
(pynini.cross(pynini.union("NO", "No"), "Number") | pynini.cross("no", "number"))
+ pynini.closure(pynini.union(". ", " "), 0, 1)
+ cardinal
)
for x in ["/", " / "]:
range_graph |= cardinal + pynini.closure(pynini.cross(x, " divided by ") + cardinal, 1)
# 10% to 20% -> ten to twenty percent
range_graph |= (
cardinal
+ pynini.closure(pynini.cross("%", " percent") | pynutil.delete("%"), 0, 1)
+ pynini.union(" to ", "-", " - ")
+ cardinal
+ pynini.cross("%", " percent")
)
self.graph |= range_graph
self.graph = self.graph.optimize()
graph = pynutil.insert("name: \"") + convert_space(self.graph).optimize() + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/range.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.abbreviation import AbbreviationFst
from nemo_text_processing.text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.en.taggers.date import DateFst
from nemo_text_processing.text_normalization.en.taggers.decimal import DecimalFst
from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.en.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.en.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.en.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.en.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.taggers.range import RangeFst as RangeFst
from nemo_text_processing.text_normalization.en.taggers.roman import RomanFst
from nemo_text_processing.text_normalization.en.taggers.serial import SerialFst
from nemo_text_processing.text_normalization.en.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.en.taggers.time import TimeFst
from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.verbalizers.date import DateFst as vDateFst
from nemo_text_processing.text_normalization.en.verbalizers.ordinal import OrdinalFst as vOrdinalFst
from nemo_text_processing.text_normalization.en.verbalizers.time import TimeFst as vTimeFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"en_tn_{deterministic}_deterministic_{input_case}_{whitelist_file}_tokenize.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f"Creating ClassifyFst grammars.")
start_time = time.time()
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
logging.debug(f"cardinal: {time.time() - start_time: .2f}s -- {cardinal_graph.num_states()} nodes")
start_time = time.time()
ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic)
ordinal_graph = ordinal.fst
logging.debug(f"ordinal: {time.time() - start_time: .2f}s -- {ordinal_graph.num_states()} nodes")
start_time = time.time()
decimal = DecimalFst(cardinal=cardinal, deterministic=deterministic)
decimal_graph = decimal.fst
logging.debug(f"decimal: {time.time() - start_time: .2f}s -- {decimal_graph.num_states()} nodes")
start_time = time.time()
fraction = FractionFst(deterministic=deterministic, cardinal=cardinal)
fraction_graph = fraction.fst
logging.debug(f"fraction: {time.time() - start_time: .2f}s -- {fraction_graph.num_states()} nodes")
start_time = time.time()
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
logging.debug(f"measure: {time.time() - start_time: .2f}s -- {measure_graph.num_states()} nodes")
start_time = time.time()
date_graph = DateFst(cardinal=cardinal, deterministic=deterministic).fst
logging.debug(f"date: {time.time() - start_time: .2f}s -- {date_graph.num_states()} nodes")
start_time = time.time()
time_graph = TimeFst(cardinal=cardinal, deterministic=deterministic).fst
logging.debug(f"time: {time.time() - start_time: .2f}s -- {time_graph.num_states()} nodes")
start_time = time.time()
telephone_graph = TelephoneFst(deterministic=deterministic).fst
logging.debug(f"telephone: {time.time() - start_time: .2f}s -- {telephone_graph.num_states()} nodes")
start_time = time.time()
electonic_graph = ElectronicFst(cardinal=cardinal, deterministic=deterministic).fst
logging.debug(f"electronic: {time.time() - start_time: .2f}s -- {electonic_graph.num_states()} nodes")
start_time = time.time()
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=deterministic).fst
logging.debug(f"money: {time.time() - start_time: .2f}s -- {money_graph.num_states()} nodes")
start_time = time.time()
whitelist_graph = WhiteListFst(
input_case=input_case, deterministic=deterministic, input_file=whitelist
).fst
logging.debug(f"whitelist: {time.time() - start_time: .2f}s -- {whitelist_graph.num_states()} nodes")
start_time = time.time()
punctuation = PunctuationFst(deterministic=deterministic)
punct_graph = punctuation.fst
logging.debug(f"punct: {time.time() - start_time: .2f}s -- {punct_graph.num_states()} nodes")
start_time = time.time()
word_graph = WordFst(punctuation=punctuation, deterministic=deterministic).fst
logging.debug(f"word: {time.time() - start_time: .2f}s -- {word_graph.num_states()} nodes")
start_time = time.time()
serial_graph = SerialFst(cardinal=cardinal, ordinal=ordinal, deterministic=deterministic).fst
logging.debug(f"serial: {time.time() - start_time: .2f}s -- {serial_graph.num_states()} nodes")
start_time = time.time()
v_time_graph = vTimeFst(deterministic=deterministic).fst
v_ordinal_graph = vOrdinalFst(deterministic=deterministic)
v_date_graph = vDateFst(ordinal=v_ordinal_graph, deterministic=deterministic).fst
time_final = pynini.compose(time_graph, v_time_graph)
date_final = pynini.compose(date_graph, v_date_graph)
range_graph = RangeFst(
time=time_final, date=date_final, cardinal=cardinal, deterministic=deterministic
).fst
logging.debug(f"range: {time.time() - start_time: .2f}s -- {range_graph.num_states()} nodes")
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electonic_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(range_graph, 1.1)
| pynutil.add_weight(serial_graph, 1.1001) # should be higher than the rest of the classes
)
# roman_graph = RomanFst(deterministic=deterministic).fst
# classify |= pynutil.add_weight(roman_graph, 1.1)
if not deterministic:
abbreviation_graph = AbbreviationFst(deterministic=deterministic).fst
classify |= pynutil.add_weight(abbreviation_graph, 100)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
classify |= pynutil.add_weight(word_graph, 100)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unicodedata import category
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from pynini.examples import plurals
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="punctuation", kind="classify", deterministic=deterministic)
s = "!#%&\'()*+,-./:;<=>?@^_`{|}~\""
punct_symbols_to_exclude = ["[", "]"]
punct_unicode = [
chr(i)
for i in range(sys.maxunicode)
if category(chr(i)).startswith("P") and chr(i) not in punct_symbols_to_exclude
]
whitelist_symbols = load_labels(get_abs_path("data/whitelist/symbol.tsv"))
whitelist_symbols = [x[0] for x in whitelist_symbols]
self.punct_marks = [p for p in punct_unicode + list(s) if p not in whitelist_symbols]
punct = pynini.union(*self.punct_marks)
punct = pynini.closure(punct, 1)
emphasis = (
pynini.accep("<")
+ (
(pynini.closure(NEMO_NOT_SPACE - pynini.union("<", ">"), 1) + pynini.closure(pynini.accep("/"), 0, 1))
| (pynini.accep("/") + pynini.closure(NEMO_NOT_SPACE - pynini.union("<", ">"), 1))
)
+ pynini.accep(">")
)
punct = plurals._priority_union(emphasis, punct, NEMO_SIGMA)
self.graph = punct
self.fst = (pynutil.insert("name: \"") + self.graph + pynutil.insert("\"")).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/punctuation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, TO_UPPER, GraphFst, get_abs_path
from pynini.lib import pynutil
delete_space = pynutil.delete(" ")
quantities = pynini.string_file(get_abs_path("data/number/thousand.tsv"))
quantities_abbr = pynini.string_file(get_abs_path("data/number/quantity_abbr.tsv"))
quantities_abbr |= TO_UPPER @ quantities_abbr
def get_quantity(
decimal: 'pynini.FstLike', cardinal_up_to_hundred: 'pynini.FstLike', include_abbr: bool
) -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. 1 million -> integer_part: "one" quantity: "million"
e.g. 1.5 million -> integer_part: "one" fractional_part: "five" quantity: "million"
Args:
decimal: decimal FST
cardinal_up_to_hundred: cardinal FST
"""
quantity_wo_thousand = pynini.project(quantities, "input") - pynini.union("k", "K", "thousand")
if include_abbr:
quantity_wo_thousand |= pynini.project(quantities_abbr, "input") - pynini.union("k", "K", "thousand")
res = (
pynutil.insert("integer_part: \"")
+ cardinal_up_to_hundred
+ pynutil.insert("\"")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ (quantity_wo_thousand @ (quantities | quantities_abbr))
+ pynutil.insert("\"")
)
if include_abbr:
quantity = quantities | quantities_abbr
else:
quantity = quantities
res |= (
decimal
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert("quantity: \"")
+ quantity
+ pynutil.insert("\"")
)
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
-12.5006 billion -> decimal { negative: "true" integer_part: "12" fractional_part: "five o o six" quantity: "billion" }
1 billion -> decimal { integer_part: "one" quantity: "billion" }
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst, deterministic: bool):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph_with_and
cardinal_graph_hundred_component_at_least_one_none_zero_digit = (
cardinal.graph_hundred_component_at_least_one_none_zero_digit
)
self.graph = cardinal.single_digits_graph.optimize()
if not deterministic:
self.graph = self.graph | cardinal_graph
point = pynutil.delete(".")
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
self.graph_fractional = pynutil.insert("fractional_part: \"") + self.graph + pynutil.insert("\"")
self.graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(self.graph_integer + pynutil.insert(" "), 0, 1)
+ point
+ pynutil.insert(" ")
+ self.graph_fractional
)
quantity_w_abbr = get_quantity(
final_graph_wo_sign, cardinal_graph_hundred_component_at_least_one_none_zero_digit, include_abbr=True
)
quantity_wo_abbr = get_quantity(
final_graph_wo_sign, cardinal_graph_hundred_component_at_least_one_none_zero_digit, include_abbr=False
)
self.final_graph_wo_negative_w_abbr = final_graph_wo_sign | quantity_w_abbr
self.final_graph_wo_negative = final_graph_wo_sign | quantity_wo_abbr
# reduce options for non_deterministic and allow either "oh" or "zero", but not combination
if not deterministic:
no_oh_zero = pynini.difference(
NEMO_SIGMA,
(NEMO_SIGMA + "oh" + NEMO_SIGMA + "zero" + NEMO_SIGMA)
| (NEMO_SIGMA + "zero" + NEMO_SIGMA + "oh" + NEMO_SIGMA),
).optimize()
no_zero_oh = pynini.difference(
NEMO_SIGMA, NEMO_SIGMA + pynini.accep("zero") + NEMO_SIGMA + pynini.accep("oh") + NEMO_SIGMA
).optimize()
self.final_graph_wo_negative |= pynini.compose(
self.final_graph_wo_negative,
pynini.cdrewrite(
pynini.cross("integer_part: \"zero\"", "integer_part: \"oh\""), NEMO_SIGMA, NEMO_SIGMA, NEMO_SIGMA
),
)
self.final_graph_wo_negative = pynini.compose(self.final_graph_wo_negative, no_oh_zero).optimize()
self.final_graph_wo_negative = pynini.compose(self.final_graph_wo_negative, no_zero_oh).optimize()
final_graph = optional_graph_negative + self.final_graph_wo_negative
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
SINGULAR_TO_PLURAL,
GraphFst,
convert_space,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import get_abs_path, load_labels
from pynini.lib import pynutil
min_singular = pynini.string_file(get_abs_path("data/money/currency_minor_singular.tsv"))
min_plural = pynini.string_file(get_abs_path("data/money/currency_minor_plural.tsv"))
maj_singular = pynini.string_file((get_abs_path("data/money/currency_major.tsv")))
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money, suppletive aware, e.g.
$12.05 -> money { integer_part: "twelve" currency_maj: "dollars" fractional_part: "five" currency_min: "cents" preserve_order: true }
$12.0500 -> money { integer_part: "twelve" currency_maj: "dollars" fractional_part: "five" currency_min: "cents" preserve_order: true }
$1 -> money { currency_maj: "dollar" integer_part: "one" }
$1.00 -> money { currency_maj: "dollar" integer_part: "one" }
$0.05 -> money { fractional_part: "five" currency_min: "cents" preserve_order: true }
$1 million -> money { currency_maj: "dollars" integer_part: "one" quantity: "million" }
$1.2 million -> money { currency_maj: "dollars" integer_part: "one" fractional_part: "two" quantity: "million" }
$1.2320 -> money { currency_maj: "dollars" integer_part: "one" fractional_part: "two three two" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph_with_and
graph_decimal_final = decimal.final_graph_wo_negative_w_abbr
maj_singular_labels = load_labels(get_abs_path("data/money/currency_major.tsv"))
maj_unit_plural = convert_space(maj_singular @ SINGULAR_TO_PLURAL)
maj_unit_singular = convert_space(maj_singular)
graph_maj_singular = pynutil.insert("currency_maj: \"") + maj_unit_singular + pynutil.insert("\"")
graph_maj_plural = pynutil.insert("currency_maj: \"") + maj_unit_plural + pynutil.insert("\"")
optional_delete_fractional_zeros = pynini.closure(
pynutil.delete(".") + pynini.closure(pynutil.delete("0"), 1), 0, 1
)
graph_integer_one = pynutil.insert("integer_part: \"") + pynini.cross("1", "one") + pynutil.insert("\"")
# only for decimals where third decimal after comma is non-zero or with quantity
decimal_delete_last_zeros = (
pynini.closure(NEMO_DIGIT | pynutil.delete(","))
+ pynini.accep(".")
+ pynini.closure(NEMO_DIGIT, 2)
+ (NEMO_DIGIT - "0")
+ pynini.closure(pynutil.delete("0"))
)
decimal_with_quantity = NEMO_SIGMA + NEMO_ALPHA
graph_decimal = (
graph_maj_plural + insert_space + (decimal_delete_last_zeros | decimal_with_quantity) @ graph_decimal_final
)
graph_integer = (
pynutil.insert("integer_part: \"") + ((NEMO_SIGMA - "1") @ cardinal_graph) + pynutil.insert("\"")
)
graph_integer_only = graph_maj_singular + insert_space + graph_integer_one
graph_integer_only |= graph_maj_plural + insert_space + graph_integer
final_graph = (graph_integer_only + optional_delete_fractional_zeros) | graph_decimal
# remove trailing zeros of non zero number in the first 2 digits and fill up to 2 digits
# e.g. 2000 -> 20, 0200->02, 01 -> 01, 10 -> 10
# not accepted: 002, 00, 0,
two_digits_fractional_part = (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(pynutil.delete("0"))
) @ (
(pynutil.delete("0") + (NEMO_DIGIT - "0"))
| ((NEMO_DIGIT - "0") + pynutil.insert("0"))
| ((NEMO_DIGIT - "0") + NEMO_DIGIT)
)
graph_min_singular = pynutil.insert(" currency_min: \"") + min_singular + pynutil.insert("\"")
graph_min_plural = pynutil.insert(" currency_min: \"") + min_plural + pynutil.insert("\"")
# format ** dollars ** cent
decimal_graph_with_minor = None
integer_graph_reordered = None
decimal_default_reordered = None
for curr_symbol, _ in maj_singular_labels:
preserve_order = pynutil.insert(" preserve_order: true")
integer_plus_maj = graph_integer + insert_space + pynutil.insert(curr_symbol) @ graph_maj_plural
integer_plus_maj |= graph_integer_one + insert_space + pynutil.insert(curr_symbol) @ graph_maj_singular
integer_plus_maj_with_comma = pynini.compose(
NEMO_DIGIT - "0" + pynini.closure(NEMO_DIGIT | pynutil.delete(",")), integer_plus_maj
)
integer_plus_maj = pynini.compose(pynini.closure(NEMO_DIGIT) - "0", integer_plus_maj)
integer_plus_maj |= integer_plus_maj_with_comma
graph_fractional_one = two_digits_fractional_part @ pynini.cross("1", "one")
graph_fractional_one = pynutil.insert("fractional_part: \"") + graph_fractional_one + pynutil.insert("\"")
graph_fractional = (
two_digits_fractional_part
@ (pynini.closure(NEMO_DIGIT, 1, 2) - "1")
@ cardinal.graph_hundred_component_at_least_one_none_zero_digit
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_fractional + pynutil.insert("\"")
fractional_plus_min = graph_fractional + insert_space + pynutil.insert(curr_symbol) @ graph_min_plural
fractional_plus_min |= (
graph_fractional_one + insert_space + pynutil.insert(curr_symbol) @ graph_min_singular
)
decimal_graph_with_minor_curr = integer_plus_maj + pynini.cross(".", " ") + fractional_plus_min
if not deterministic:
decimal_graph_with_minor_curr |= pynutil.add_weight(
integer_plus_maj
+ pynini.cross(".", " ")
+ pynutil.insert("fractional_part: \"")
+ two_digits_fractional_part @ cardinal.graph_hundred_component_at_least_one_none_zero_digit
+ pynutil.insert("\""),
weight=0.0001,
)
default_fraction_graph = (decimal_delete_last_zeros | decimal_with_quantity) @ graph_decimal_final
decimal_graph_with_minor_curr |= (
pynini.closure(pynutil.delete("0"), 0, 1) + pynutil.delete(".") + fractional_plus_min
)
decimal_graph_with_minor_curr = (
pynutil.delete(curr_symbol) + decimal_graph_with_minor_curr + preserve_order
)
decimal_graph_with_minor = (
decimal_graph_with_minor_curr
if decimal_graph_with_minor is None
else pynini.union(decimal_graph_with_minor, decimal_graph_with_minor_curr).optimize()
)
if not deterministic:
integer_graph_reordered_curr = (
pynutil.delete(curr_symbol) + integer_plus_maj + preserve_order
).optimize()
integer_graph_reordered = (
integer_graph_reordered_curr
if integer_graph_reordered is None
else pynini.union(integer_graph_reordered, integer_graph_reordered_curr).optimize()
)
decimal_default_reordered_curr = (
pynutil.delete(curr_symbol)
+ default_fraction_graph
+ insert_space
+ pynutil.insert(curr_symbol) @ graph_maj_plural
)
decimal_default_reordered = (
decimal_default_reordered_curr
if decimal_default_reordered is None
else pynini.union(decimal_default_reordered, decimal_default_reordered_curr)
).optimize()
# weight for SH
final_graph |= pynutil.add_weight(decimal_graph_with_minor, -0.0001)
if not deterministic:
final_graph |= integer_graph_reordered | decimal_default_reordered
# to handle "$2.00" cases
final_graph |= pynini.compose(
NEMO_SIGMA + pynutil.delete(".") + pynini.closure(pynutil.delete("0"), 1), integer_graph_reordered
)
final_graph = self.add_tokens(final_graph.optimize())
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/en/taggers/money.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.