python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure, e.g.
"два килограма" -> measure { cardinal { integer: "2 кг" } }
Args:
tn_measure: Text normalization Cardinal graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_measure, deterministic: bool = True):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
tn_measure = tn_measure.tagger_graph_default @ tn_measure.verbalizer_graph
graph = tn_measure.invert().optimize()
graph = pynutil.insert("cardinal { integer: \"") + graph + pynutil.insert("\" }")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone, e.g.
"восемь девятьсот тринадцать девятьсот восемьдесят три пятьдесят шесть ноль один" -> telephone { number_part: "8-913-983-56-01" }
Args:
tn_telephone: Text normalization telephone graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_telephone: GraphFst, deterministic: bool = True):
super().__init__(name="telephone", kind="classify", deterministic=deterministic)
tn_telephone = tn_telephone.final_graph
graph = tn_telephone.invert().optimize()
graph = pynutil.insert("number_part: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinals, e.g.
"второе" -> ordinal { integer: "2" } }
Args:
tn_ordinal: Text normalization Ordinal graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify", deterministic=deterministic)
tn_ordinal = tn_ordinal.ordinal_numbers
graph = tn_ordinal.invert().optimize()
self.graph = graph
# do not invert numbers less than 10
graph = pynini.compose(graph, NEMO_DIGIT ** (2, ...))
graph = pynutil.insert("integer: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from nemo_text_processing.text_normalization.ru.utils import get_abs_path
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelist, e.g.
"квартира" -> telephone { number_part: "кв." }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/en/data/whitelist.tsv
"""
def __init__(self, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv")).invert()
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.decimals import DecimalFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.ru.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.ru.taggers.tokenize_and_classify import ClassifyFst as TNClassifyFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"ru_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars. This might take some time...")
tn_classify = TNClassifyFst(
input_case='cased', deterministic=False, cache_dir=cache_dir, overwrite_cache=True
)
cardinal = CardinalFst(tn_cardinal=tn_classify.cardinal)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(tn_ordinal=tn_classify.ordinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(tn_decimal=tn_classify.decimal)
decimal_graph = decimal.fst
measure_graph = MeasureFst(tn_measure=tn_classify.measure).fst
date_graph = DateFst(tn_date=tn_classify.date).fst
word_graph = WordFst().fst
time_graph = TimeFst(tn_time=tn_classify.time).fst
money_graph = MoneyFst(tn_money=tn_classify.money).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst(tn_electronic=tn_classify.electronic).fst
telephone_graph = TelephoneFst(tn_telephone=tn_classify.telephone).fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(pynutil.add_weight(delete_extra_space, 1.1) + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money, e.g.
"два рубля" -> money { integer_part: "2 руб." }
Args:
tn_money: Text normalization Money graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_money, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
graph = tn_money.final_graph
graph = graph.invert().optimize()
graph = pynutil.insert("integer_part: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, GraphFst, insert_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"тысяча один" -> cardinal { integer: "1 001" }
Args:
tn_cardinal: Text normalization Cardinal graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
graph = tn_cardinal.cardinal_numbers_default
self.graph = graph.invert().optimize()
optional_sign = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("минус ", "\"-\"") + insert_space, 0, 1
)
# do not invert numbers less than 10
graph = pynini.compose(graph, NEMO_DIGIT ** (2, ...))
graph = optional_sign + pynutil.insert("integer: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SPACE, GraphFst, delete_extra_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. "минус три целых две десятых" -> decimal { negative: "true" integer_part: "3," fractional_part: "2" }
Args:
tn_decimal: Text normalization Decimal graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_decimal, deterministic: bool = False):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("минус", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional_part = pynini.invert(tn_decimal.graph_fractional).optimize()
graph_integer_part = pynini.invert(tn_decimal.integer_part).optimize()
optional_graph_quantity = pynini.invert(tn_decimal.optional_quantity).optimize()
graph_fractional = pynutil.insert("fractional_part: \"") + graph_fractional_part + pynutil.insert("\"")
graph_integer = pynutil.insert("integer_part: \"") + graph_integer_part + pynutil.insert("\"")
optional_graph_quantity = pynutil.insert("quantity: \"") + optional_graph_quantity + pynutil.insert("\"")
optional_graph_quantity = pynini.closure(pynini.accep(NEMO_SPACE) + optional_graph_quantity, 0, 1)
self.final_graph_wo_sign = (
graph_integer + pynini.accep(NEMO_SPACE) + graph_fractional + optional_graph_quantity
)
final_graph = optional_graph_negative + self.final_graph_wo_sign
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/decimals.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic, e.g.
"эй би собака эн ди точка ру" -> electronic { username: "[email protected]" }
Args:
tn_electronic: Text normalization Electronic graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_electronic, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
graph = tn_electronic.final_graph
graph = graph.invert().optimize()
graph = pynutil.insert("username: \"") + graph + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, e.g.
восемнадцатое июня две тысячи второго -> tokens { date { day: "18.06.2002" } }
Args:
tn_date: Text normalization Date graph
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, tn_date: GraphFst, deterministic: bool = True):
super().__init__(name="date", kind="classify", deterministic=deterministic)
graph = pynini.invert(tn_date.final_graph).optimize()
graph = self.add_tokens(pynutil.insert("day: \"") + graph + pynutil.insert("\""))
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time
e.g. time { hours: "02:15" } -> "02:15"
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
hour = (
pynutil.delete("hours: ") + pynutil.delete("\"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
minutes = (
pynutil.delete("minutes: ")
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph_preserve_order = pynutil.delete("hours: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
# for cases that require permutations for the correct verbalization
graph_reverse_order = hour + delete_space + pynutil.insert(":") + minutes + delete_space
graph = graph_preserve_order | graph_reverse_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure
e.g. measure { cardinal { integer: "2 кг" } } -> "2 кг"
"""
def __init__(self):
super().__init__(name="measure", kind="verbalize")
graph = (
pynutil.delete(" cardinal { integer: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ delete_space
+ pynutil.delete("}")
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone
e.g. telephone { number_part: "8-913-983-56-01" } -> "8-913-983-56-01"
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
graph = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal numbers
e.g. ordinal { integer: "2" } -> "2"
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
value = pynini.closure(NEMO_NOT_QUOTE)
graph = pynutil.delete("integer: \"") + value + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.time import TimeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal = OrdinalFst()
ordinal_graph = ordinal.fst
decimal = DecimalFst()
decimal_graph = decimal.fst
whitelist_graph = WhiteListFst().fst
electronic_graph = ElectronicFst().fst
money_graph = MoneyFst().fst
date_graph = DateFst().fst
measure_graph = MeasureFst().fst
telephone_graph = TelephoneFst().fst
time_graph = TimeFst().fst
graph = (
whitelist_graph
| cardinal_graph
| ordinal_graph
| decimal_graph
| electronic_graph
| date_graph
| money_graph
| measure_graph
| telephone_graph
| time_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.inverse_text_normalization.ru.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SPACE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "3," fractional_part: "2" } -> -3,2
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\" ", "-"), 0, 1)
integer = pynutil.delete(" \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
integer_part = pynutil.delete("integer_part:") + integer
fractional_part = pynutil.delete("fractional_part:") + integer
optional_quantity = pynini.closure(pynini.accep(NEMO_SPACE) + pynutil.delete("quantity:") + integer, 0, 1)
graph = optional_sign + integer_part + delete_space + fractional_part + optional_quantity
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. money { integer_part: "2 руб." } -> "2 руб."
"""
def __init__(self):
super().__init__(name="money", kind="verbalize")
graph = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing roman numerals
e.g. cardinal { integer: "1 001" } -> 1 001
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
optional_sign + pynutil.delete("integer: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. electronic { username: "[email protected]" } -> "[email protected]"
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
graph = pynutil.delete("username: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "02.03.89" } -> "02.03.89"
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
graph = pynutil.delete("day: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph.optimize())
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ru/verbalizers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.pt.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. quinze pro meio dia -> time { hours: "11" minutes: "45" }
e.g. quinze pra meia noite -> time { hours: "23" minutes: "45" }
e.g. quinze pra uma -> time { hours: "12" minutes: "45" }
e.g. dez pras duas -> time { hours: "1" minutes: "50" }
e.g. quinze pras duas -> time { hours: "1" minutes: "45" }
e.g. ao meio dia -> time { hours: "12" minutes: "00" morphosyntactic_features: "ao" }
e.g. ao meio dia e meia -> time { hours: "12" minutes: "30" morphosyntactic_features: "ao" }
e.g. ao meio dia e meio -> time { hours: "12" minutes: "30" morphosyntactic_features: "ao" }
e.g. à meia noite e quinze -> time { hours: "0" minutes: "15" morphosyntactic_features: "à" }
e.g. à meia noite e meia -> time { hours: "0" minutes: "30" morphosyntactic_features: "à" }
e.g. à uma e trinta -> time { hours: "1" minutes: "30" morphosyntactic_features: "à" }
e.g. às onze e trinta -> time { hours: "11" minutes: "30" morphosyntactic_features: "às" }
e.g. às três horas e trinta minutos -> time { hours: "3" minutes: "30" morphosyntactic_features: "às" }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
# graph_hour_to_am = pynini.string_file(get_abs_path("data/time/hour_to_am.tsv"))
# graph_hour_to_pm = pynini.string_file(get_abs_path("data/time/hour_to_pm.tsv"))
graph_hours_to = pynini.string_file(get_abs_path("data/time/hours_to.tsv"))
graph_minutes_to = pynini.string_file(get_abs_path("data/time/minutes_to.tsv"))
graph_suffix_am = pynini.string_file(get_abs_path("data/time/time_suffix_am.tsv"))
graph_suffix_pm = pynini.string_file(get_abs_path("data/time/time_suffix_pm.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_1_to_100 = pynini.union(
graph_digit,
graph_twenties,
graph_teen,
(graph_ties + pynutil.insert("0")),
(graph_ties + pynutil.delete(" e ") + graph_digit),
)
# note that graph_hour will start from 2 hours
# "1 o'clock" will be treated differently because it
# is singular
digits_2_to_23 = [str(digits) for digits in range(2, 24)]
digits_1_to_59 = [str(digits) for digits in range(1, 60)]
graph_2_to_23 = graph_1_to_100 @ pynini.union(*digits_2_to_23)
graph_1_to_59 = graph_1_to_100 @ pynini.union(*digits_1_to_59)
graph_uma = pynini.cross("uma", "1")
# Mapping 'horas'
graph_hour = pynutil.delete(pynini.accep("hora") + pynini.accep("s").ques)
graph_minute = pynutil.delete(pynini.accep("minuto") + pynini.accep("s").ques)
# Mapping 'meio dia' and 'meia noite'
graph_meio_dia = pynini.cross("meio dia", "12")
graph_meia_noite = pynini.cross("meia noite", "0")
# Mapping 'e meia'
graph_e = delete_space + pynutil.delete(" e ") + delete_space
graph_e_meia = graph_e + pynini.cross("meia", "30")
graph_e_meio = graph_e + pynini.cross("meio", "30")
# à uma e meia -> 1:30
# às três e meia -> 3:30
graph_hours_at_prefix_singular = (
pynutil.insert("morphosyntactic_features: \"")
+ (pynini.cross("à", "à") | pynini.cross("a", "à"))
+ pynutil.insert("\" ")
+ delete_space
)
graph_hours_at_singular = (
graph_hours_at_prefix_singular
+ pynutil.insert("hours: \"")
+ graph_uma
+ pynutil.insert("\"")
+ (delete_space + graph_hour).ques
)
graph_hours_at_prefix_plural = (
pynutil.insert("morphosyntactic_features: \"")
+ (pynini.cross("às", "às") | pynini.cross("as", "às"))
+ pynutil.insert("\" ")
+ delete_space
)
graph_hours_at_plural = (
graph_hours_at_prefix_plural
+ pynutil.insert("hours: \"")
+ graph_2_to_23
+ pynutil.insert("\"")
+ (delete_space + graph_hour).ques
)
final_graph_hour_at = graph_hours_at_singular | graph_hours_at_plural
graph_minutes_component_without_zero = graph_e + graph_1_to_59 + (delete_space + graph_minute).ques
graph_minutes_component_without_zero |= graph_e_meia + pynutil.delete(delete_space + pynini.accep("hora")).ques
final_graph_minute = (
pynutil.insert(" minutes: \"") + graph_minutes_component_without_zero + pynutil.insert("\"")
)
graph_hm = final_graph_hour_at + final_graph_minute
# à uma hora -> 1:00
graph_hours_at_singular_with_hour = (
graph_hours_at_prefix_singular
+ pynutil.insert("hours: \"")
+ graph_uma
+ pynutil.insert("\"")
+ delete_space
+ graph_hour
)
graph_hours_at_plural_with_hour = (
graph_hours_at_prefix_plural
+ pynutil.insert("hours: \"")
+ graph_2_to_23
+ pynutil.insert("\"")
+ delete_space
+ graph_hour
)
graph_hm |= (graph_hours_at_singular_with_hour | graph_hours_at_plural_with_hour) + pynutil.insert(
" minutes: \"00\"", weight=0.2
)
# meio dia e meia -> 12:30
# meia noite e meia -> 0:30
graph_minutes_without_zero = (
pynutil.insert(" minutes: \"") + graph_minutes_component_without_zero + pynutil.insert("\"")
)
graph_meio_min = (
pynutil.insert("hours: \"")
+ (graph_meio_dia | graph_meia_noite)
+ pynutil.insert("\"")
+ graph_minutes_without_zero
)
graph_meio_min |= (
pynutil.insert("hours: \"")
+ graph_meio_dia
+ pynutil.insert("\" minutes: \"")
+ graph_e_meio
+ pynutil.insert("\"")
)
graph_hm |= graph_meio_min
# às quinze para as quatro -> às 3:45
# NOTE: case 'para à uma' ('to one') could be either 0:XX or 12:XX
# leading to wrong reading ('meio dia e ...' or 'meia noite e ...')
graph_para_a = (
pynutil.delete("para")
| pynutil.delete("para a")
| pynutil.delete("para as")
| pynutil.delete("pra")
| pynutil.delete("pras")
)
graph_para_o = pynutil.delete("para") | pynutil.delete("para o") | pynutil.delete("pro")
graph_pra_min = (
pynutil.insert("morphosyntactic_features: \"")
+ (pynini.cross("à", "à") | pynini.cross("às", "às") | pynini.cross("a", "à") | pynini.cross("as", "às"))
+ pynutil.insert("\" ")
+ delete_space
)
graph_pra_min += (
pynutil.insert("minutes: \"")
+ (graph_1_to_59 @ graph_minutes_to)
+ pynutil.insert("\" ")
+ (delete_space + graph_minute).ques
)
graph_pra_hour = (
pynutil.insert("hours: \"")
+ (graph_2_to_23 @ graph_hours_to)
+ pynutil.insert("\"")
+ (delete_space + graph_hour).ques
)
graph_pra_hour |= pynutil.insert("hours: \"") + (graph_meia_noite @ graph_hours_to) + pynutil.insert("\"")
graph_pra = graph_pra_min + delete_space + graph_para_a + delete_space + graph_pra_hour
# às quinze pro meio dia -> às 11:45
graph_pro = graph_pra_min + delete_space + graph_para_o + delete_space
graph_pro += pynutil.insert(" hours: \"") + (graph_meio_dia @ graph_hours_to) + pynutil.insert("\"")
graph_mh = graph_pra | graph_pro
# optional suffix
final_suffix = pynutil.insert("suffix: \"") + (graph_suffix_am | graph_suffix_pm) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(delete_space + insert_space + final_suffix, 0, 1)
final_graph = pynini.union((graph_hm | graph_mh) + final_suffix_optional).optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/time.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure
e.g. menos doze quilogramas -> measure { cardinal { negative: "true" integer: "12" } units: "kg" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="measure", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_unit_singular = pynini.string_file(get_abs_path("data/measurements_singular.tsv")).invert()
graph_unit_plural = pynini.string_file(get_abs_path("data/measurements_plural.tsv")).invert()
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
unit_singular = convert_space(graph_unit_singular)
unit_plural = convert_space(graph_unit_plural)
unit_misc = pynutil.insert("/") + pynutil.delete("por") + delete_space + convert_space(graph_unit_singular)
unit_singular = (
pynutil.insert("units: \"")
+ (unit_singular | unit_misc | pynutil.add_weight(unit_singular + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
unit_plural = (
pynutil.insert("units: \"")
+ (unit_plural | unit_misc | pynutil.add_weight(unit_plural + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_plural
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - "um" - "uma") @ cardinal_graph)
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_plural
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ (pynini.cross("um", "1") | pynini.cross("uma", "1"))
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_singular
)
final_graph = subgraph_decimal | subgraph_cardinal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
um dois um dois três quatro cinco seis sete oito nove -> { number_part: "(12) 12345-6789" }.
If 11 digits are spoken, they are grouped as 2+5+4 (eg. (12) 34567-8901).
If 10 digits are spoken, they are grouped as 2+4+4 (eg. (12) 3456-7890).
If 9 digits are spoken, they are grouped as 5+4 (eg. 12345-6789).
If 8 digits are spoken, they are grouped as 4+4 (eg. 1234-5678).
In portuguese, digits are generally spoken individually, or as 2-digit numbers,
eg. "trinta e quatro oitenta e dois" = "3482",
"meia sete vinte" = "6720".
"""
def __init__(self):
super().__init__(name="telephone", kind="classify")
# create `single_digits` and `double_digits` graphs as these will be
# the building blocks of possible telephone numbers
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_half = pynini.cross("meia", "6")
graph_all_digits = pynini.union(graph_digit, graph_half, graph_zero)
single_digits = pynini.invert(graph_all_digits).optimize()
double_digits = (
pynini.union(
graph_teen | graph_twenties,
(graph_ties + pynutil.insert("0")),
(graph_ties + delete_space + pynutil.delete("e") + delete_space + graph_digit),
(graph_all_digits + delete_space + graph_all_digits),
)
.invert()
.optimize()
)
# define `eleven_digit_graph`, `ten_digit_graph`, `nine_digit_graph`, `eight_digit_graph`
# which accept telephone numbers spoken (1) only with single digits,
# or (2) spoken with double digits (and sometimes single digits)
# 11-digit option (2): (2) + (1+2+2) + (2+2) digits
eleven_digit_graph = (
pynutil.delete("(")
+ double_digits
+ insert_space
+ pynutil.delete(") ")
+ single_digits
+ insert_space
+ double_digits
+ insert_space
+ double_digits
+ insert_space
+ pynutil.delete("-")
+ double_digits
+ insert_space
+ double_digits
)
# 10-digit option (2): (2) + (2+2) + (2+2) digits
ten_digit_graph = (
pynutil.delete("(")
+ double_digits
+ insert_space
+ pynutil.delete(") ")
+ double_digits
+ insert_space
+ double_digits
+ insert_space
+ pynutil.delete("-")
+ double_digits
+ insert_space
+ double_digits
)
# 9-digit option (2): (1+2+2) + (2+2) digits
nine_digit_graph = (
single_digits
+ insert_space
+ double_digits
+ insert_space
+ double_digits
+ insert_space
+ pynutil.delete("-")
+ double_digits
+ insert_space
+ double_digits
)
# 8-digit option (2): (2+2) + (2+2) digits
eight_digit_graph = (
double_digits
+ insert_space
+ double_digits
+ insert_space
+ pynutil.delete("-")
+ double_digits
+ insert_space
+ double_digits
)
number_part = pynini.union(eleven_digit_graph, ten_digit_graph, nine_digit_graph, eight_digit_graph)
number_part = pynutil.insert("number_part: \"") + pynini.invert(number_part) + pynutil.insert("\"")
graph = number_part
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
vigésimo primeiro -> ordinal { integer: "21" morphosyntactic_features: "o" }
This class converts ordinal up to "milésimo" (one thousandth) exclusive.
Cardinals below ten are not converted (in order to avoid
e.g. "primero fez ..." -> "1º fez...", "segunda guerra mundial" -> "2ª guerra mundial"
and any other odd conversions.)
This FST also records the ending of the ordinal (called "morphosyntactic_features"):
either "o" or "a".
Args:
cardinal: CardinalFst
"""
def __init__(self):
super().__init__(name="ordinal", kind="classify")
graph_digit = pynini.string_file(get_abs_path("data/ordinals/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/ordinals/ties.tsv"))
graph_hundreds = pynini.string_file(get_abs_path("data/ordinals/hundreds.tsv"))
ordinal_graph_union = pynini.union(
pynutil.add_weight(graph_digit, 0.4),
pynutil.add_weight(graph_ties + ((delete_space + graph_digit) | pynutil.insert("0")), 0.2),
graph_hundreds
+ ((delete_space + graph_ties) | pynutil.insert("0"))
+ ((delete_space + graph_digit) | pynutil.insert("0")),
)
accept_o_endings = NEMO_SIGMA + pynini.accep("o")
accept_a_endings = NEMO_SIGMA + pynini.accep("a")
ordinal_graph_o = accept_o_endings @ ordinal_graph_union
ordinal_graph_a = accept_a_endings @ ordinal_graph_union
# 'optional_numbers_in_front' have negative weight so we always
# include them if they're there
optional_in_front = (pynutil.add_weight(ordinal_graph_union, -0.1) + delete_space.closure()).closure()
graph_o_suffix = optional_in_front + ordinal_graph_o
graph_a_suffix = optional_in_front + ordinal_graph_a
# don't convert ordinals from one to nine inclusive
graph_exception = pynini.project(pynini.union(graph_digit), 'input')
graph_o_suffix = (pynini.project(graph_o_suffix, "input") - graph_exception.arcsort()) @ graph_o_suffix
graph_a_suffix = (pynini.project(graph_a_suffix, "input") - graph_exception.arcsort()) @ graph_a_suffix
graph = (
pynutil.insert("integer: \"")
+ graph_o_suffix
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"o\"")
)
graph |= (
pynutil.insert("integer: \"")
+ graph_a_suffix
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"a\"")
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. usted -> tokens { name: "ud." }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/pt/data/whitelist.tsv
"""
def __init__(self, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv")).invert()
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/whitelist.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.pt.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.pt.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"pt_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst(use_strict_e=True)
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal).fst
date_graph = DateFst(cardinal=cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.09)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.09)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/tokenize_and_classify.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~"
punct = pynini.union(*s)
graph = pynutil.insert("name: \"") + punct + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/punctuation.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_million: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. one million -> integer_part: "1" quantity: "million"
e.g. one point five million -> integer_part: "1" fractional_part: "5" quantity: "million"
Args:
decimal: decimal FST
cardinal_up_to_million: cardinal FST
"""
numbers = cardinal_up_to_million @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union(
"milhão",
"milhões",
"bilhão",
"bilhões",
"trilhão",
"trilhões",
"quatrilhão",
"quatrilhões",
"quintilhão",
"quintilhões",
"sextilhão",
"sextilhões",
)
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + suffix + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
Decimal point is either "." or ",", determined by whether "ponto" or "vírgula" is spoken.
e.g. menos um vírgula dois seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" }
e.g. menos um ponto dois seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "." fractional_part: "26" }
This decimal rule assumes that decimals can be pronounced as:
(a cardinal) + ('vírgula' or 'ponto') plus (any sequence of cardinals <1000, including 'zero')
Also writes large numbers in shortened form, e.g.
e.g. um vírgula dois seis milhões -> decimal { negative: "false" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" quantity: "milhões" }
e.g. dois milhões -> decimal { negative: "false" integer_part: "2" quantity: "milhões" }
e.g. mil oitcentos e vinte e quatro milhões -> decimal { negative: "false" integer_part: "1824" quantity: "milhões" }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
# number after decimal point can be any series of cardinals <1000, including 'zero'
graph_decimal = cardinal.numbers_up_to_thousand
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
# decimal point can be denoted by 'vírgula' or 'ponto'
decimal_point = pynini.cross("vírgula", "morphosyntactic_features: \",\"")
decimal_point |= pynini.cross("ponto", "morphosyntactic_features: \".\"")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
cardinal_graph = cardinal.graph_no_exception | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1)
+ decimal_point
+ delete_extra_space
+ graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.numbers_up_to_million
)
final_graph |= optional_graph_negative + get_quantity(final_graph_wo_sign, cardinal.numbers_up_to_million)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. doze dólares e cinco centavos -> money { integer_part: "12" fractional_part: "05" currency: "$" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
unit_singular = pynini.string_file(get_abs_path("data/currency_singular.tsv")).invert()
unit_plural = pynini.string_file(get_abs_path("data/currency_plural.tsv")).invert()
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + convert_space(unit_plural) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# twelve dollars (and) fifty cents, zero cents
cents_standalone = (
pynutil.insert("morphosyntactic_features: \",\"") # always use a comma in the decimal
+ insert_space
+ pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - "um" - "uma") @ cardinal_graph), -0.7)
@ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete(pynini.union("centavos")),
pynini.cross("um", "01") + delete_space + pynutil.delete(pynini.union("centavo")),
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure((pynutil.delete("com") | pynutil.delete('e')) + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
# twelve dollars fifty, only after integer
# setenta e cinco dólares com sessenta e três ~ $75,63
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("morphosyntactic_features: \",\"") # always use a comma in the decimal
+ insert_space
+ pynutil.insert("fractional_part: \"")
+ pynini.closure((pynutil.delete("com") | pynutil.delete('e')) + delete_space, 0, 1)
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - "um" - "uma") @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_plural
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ (pynini.cross("um", "1") | pynini.cross("uma", "1"))
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_cents_standalone = pynini.union(
pynutil.insert("currency: \"R$\" integer_part: \"0\" ") + cents_standalone,
pynutil.add_weight(
pynutil.insert("integer_part: \"0\" ")
+ cents_standalone
+ delete_extra_space
+ pynutil.delete("de")
+ delete_space
+ graph_unit_singular,
-0.1,
),
)
graph_decimal = (
graph_decimal_final + delete_extra_space + (pynutil.delete("de") + delete_space).ques + graph_unit_plural
)
graph_decimal |= graph_cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. menos veintitrés -> cardinal { negative: "-" integer: "23"}
This class converts cardinals up to (but not including) "un cuatrillón",
i.e up to "one septillion" in English (10^{24}).
Cardinals below ten are not converted (in order to avoid
"vivo em uma casa" --> "vivo em 1 casa" and any other odd conversions.)
Although technically Portuguese grammar requires that "e" only comes after
"10s" numbers (ie. "trinta", ..., "noventa"), these rules will convert
numbers even with "e" in an ungrammatical place (because "e" is ignored
inside cardinal numbers).
e.g. "mil e uma" -> cardinal { integer: "1001"}
e.g. "cento e uma" -> cardinal { integer: "101"}
"""
def __init__(self, use_strict_e=False):
"""
:param use_strict_e: When True forces to have the separator "e" in the right places
"""
super().__init__(name="cardinal", kind="classify")
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_one_hundred = pynini.string_file(get_abs_path("data/numbers/onehundred.tsv"))
graph_hundreds = pynini.string_file(get_abs_path("data/numbers/hundreds.tsv"))
graph = None
if not use_strict_e:
graph_hundred_component = graph_hundreds | pynutil.insert("0")
graph_hundred_component += delete_space
graph_hundred_component += pynini.union(
graph_twenties | graph_teen | pynutil.insert("00"),
(graph_ties | pynutil.insert("0")) + delete_space + (graph_digit | pynutil.insert("0")),
)
graph_hundred_component = pynini.union(graph_hundred_component, graph_one_hundred)
graph_hundred_component_at_least_one_none_zero_digit = graph_hundred_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
graph_thousands = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("mil"),
pynutil.insert("001") + pynutil.delete("mil"), # because we say 'mil', not 'hum mil'
pynutil.insert("000", weight=0.01),
)
graph_milhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("milhão") | pynutil.delete("milhões")),
pynutil.insert("000", weight=0.01),
)
graph_bilhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("bilhão") | pynutil.delete("bilhões")),
pynutil.insert("000", weight=0.01),
)
graph_trilhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("trilhão") | pynutil.delete("trilhões")),
pynutil.insert("000", weight=0.01),
)
graph_quatrilhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("quatrilhão") | pynutil.delete("quatrilhões")),
pynutil.insert("000", weight=0.01),
)
graph_quintilhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("quintilhão") | pynutil.delete("quintilhões")),
pynutil.insert("000", weight=0.01),
)
graph_sextilhoes = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("sextilhão") | pynutil.delete("sextilhões")),
pynutil.insert("000", weight=0.01),
)
graph = pynini.union(
graph_sextilhoes
+ delete_space
+ graph_quintilhoes
+ delete_space
+ graph_quatrilhoes
+ delete_space
+ graph_trilhoes
+ delete_space
+ graph_bilhoes
+ delete_space
+ graph_milhoes
+ delete_space
+ graph_thousands
+ delete_space
+ graph_hundred_component,
graph_zero,
)
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT),
"0",
)
graph = (
pynini.cdrewrite(pynutil.delete("e"), NEMO_SPACE, NEMO_SPACE, NEMO_SIGMA)
@ (NEMO_ALPHA + NEMO_SIGMA)
@ graph
)
else:
graph_e = (
pynutil.delete(NEMO_WHITE_SPACE.plus) + pynutil.delete("e") + pynutil.delete(NEMO_WHITE_SPACE.plus)
)
graph_ties_component = pynini.union(
graph_teen | graph_twenties,
graph_ties + ((graph_e + graph_digit) | pynutil.insert("0")),
pynutil.add_weight(pynutil.insert("0") + graph_digit, 0.1),
) @ (pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT))
graph_hundreds_except_hundred = (pynini.project(graph_hundreds, "input") - "cento") @ graph_hundreds
graph_hundred_component_prefix_e = pynini.union(
graph_one_hundred,
pynutil.add_weight(graph_hundreds_except_hundred + pynutil.insert("00"), 0.1),
pynutil.insert("0") + graph_ties_component,
) @ (pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT))
graph_hundred_component_prefix_e = graph_hundred_component_prefix_e.optimize()
graph_hundred_component_no_prefix = pynini.union(graph_hundreds + graph_e + graph_ties_component,) @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
graph_hundred_component_no_prefix = graph_hundred_component_no_prefix.optimize()
graph_mil_prefix_e = pynini.union(
# because we say 'mil', not 'hum mil'
(
(graph_hundred_component_prefix_e + delete_space + pynutil.delete("mil"))
| (pynutil.insert("001", weight=0.1) + pynutil.delete("mil"))
)
+ (
(graph_e + graph_hundred_component_prefix_e)
| (delete_space + graph_hundred_component_no_prefix)
| pynutil.insert("000", weight=0.1)
)
)
graph_mil_no_prefix = pynini.union(
(
(graph_hundred_component_no_prefix + delete_space + pynutil.delete("mil"))
| pynutil.insert("000", weight=0.1)
)
+ (
(graph_e + graph_hundred_component_prefix_e)
| (delete_space + graph_hundred_component_no_prefix)
| pynutil.insert("000", weight=0.1)
)
)
graph_milhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("milhão") | pynutil.delete("milhões"))
)
+ ((graph_e + graph_mil_prefix_e) | (delete_space + graph_mil_no_prefix))
)
graph_milhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("milhão") | pynutil.delete("milhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_mil_prefix_e) | (delete_space + graph_mil_no_prefix))
)
graph_bilhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("bilhão") | pynutil.delete("bilhões"))
)
+ ((graph_e + graph_milhao_prefix_e) | (delete_space + graph_milhao_no_prefix))
)
graph_bilhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("bilhão") | pynutil.delete("bilhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_milhao_prefix_e) | (delete_space + graph_milhao_no_prefix))
)
graph_trilhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("trilhão") | pynutil.delete("trilhões"))
)
+ ((graph_e + graph_bilhao_prefix_e) | (delete_space + graph_bilhao_no_prefix))
)
graph_trilhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("trilhão") | pynutil.delete("trilhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_bilhao_prefix_e) | (delete_space + graph_bilhao_no_prefix))
)
graph_quatrilhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("quatrilhão") | pynutil.delete("quatrilhões"))
)
+ ((graph_e + graph_trilhao_prefix_e) | (delete_space + graph_trilhao_no_prefix))
)
graph_quatrilhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("quatrilhão") | pynutil.delete("quatrilhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_trilhao_prefix_e) | (delete_space + graph_trilhao_no_prefix))
)
graph_quintilhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("quintilhão") | pynutil.delete("quintilhões"))
)
+ ((graph_e + graph_quatrilhao_prefix_e) | (delete_space + graph_quatrilhao_no_prefix))
)
graph_quintilhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("quintilhão") | pynutil.delete("quintilhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_quatrilhao_prefix_e) | (delete_space + graph_quatrilhao_no_prefix))
)
graph_sextilhao_prefix_e = pynini.union(
(
graph_hundred_component_prefix_e
+ delete_space
+ (pynutil.delete("sextilhão") | pynutil.delete("sextilhões"))
)
+ ((graph_e + graph_quintilhao_prefix_e) | (delete_space + graph_quintilhao_no_prefix))
)
graph_sextilhao_no_prefix = pynini.union(
(
(
graph_hundred_component_no_prefix
+ delete_space
+ (pynutil.delete("sextilhão") | pynutil.delete("sextilhões"))
)
| pynutil.insert("000", weight=0.1)
)
+ ((graph_e + graph_quintilhao_prefix_e) | (delete_space + graph_quintilhao_no_prefix))
)
graph = pynini.union(
graph_sextilhao_no_prefix,
graph_sextilhao_prefix_e,
graph_quintilhao_prefix_e,
graph_quatrilhao_prefix_e,
graph_trilhao_prefix_e,
graph_bilhao_prefix_e,
graph_milhao_prefix_e,
graph_mil_prefix_e,
graph_hundred_component_prefix_e,
graph_ties_component,
graph_zero,
).optimize()
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT),
"0",
)
graph = graph.optimize()
self.graph_no_exception = graph
# save self.numbers_up_to_thousand for use in DecimalFst
digits_up_to_thousand = NEMO_DIGIT | (NEMO_DIGIT ** 2) | (NEMO_DIGIT ** 3)
numbers_up_to_thousand = pynini.compose(graph, digits_up_to_thousand).optimize()
self.numbers_up_to_thousand = numbers_up_to_thousand
# save self.numbers_up_to_million for use in DecimalFst
digits_up_to_million = (
NEMO_DIGIT
| (NEMO_DIGIT ** 2)
| (NEMO_DIGIT ** 3)
| (NEMO_DIGIT ** 4)
| (NEMO_DIGIT ** 5)
| (NEMO_DIGIT ** 6)
)
numbers_up_to_million = pynini.compose(graph, digits_up_to_million).optimize()
self.numbers_up_to_million = numbers_up_to_million
# save self.digits_from_year for use in DateFst
digits_1_2099 = [str(digits) for digits in range(1, 2100)]
digits_from_year = (numbers_up_to_million @ pynini.union(*digits_1_2099)).optimize()
self.digits_from_year = digits_from_year
# don't convert cardinals from zero to nine inclusive
graph_exception = pynini.project(pynini.union(graph_digit, graph_zero), 'input')
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"-\"") + NEMO_SPACE, 0, 1
)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, GraphFst, insert_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying 'electronic' semiotic classes, i.e.
email address (which get converted to "username" and "domain" fields),
and URLS (which get converted to a "protocol" field).
e.g. c d f um arroba a b c ponto e d u -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
e.g. dáblio dáblio dáblio a b c ponto e d u -> tokens { electronic { protocol: "www.abc.edu" } }
"""
def __init__(self):
super().__init__(name="electronic", kind="classify")
delete_extra_space = pynutil.delete(" ")
alpha_num = (
NEMO_ALPHA
| pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
| pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
)
symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv")).invert()
accepted_username = alpha_num | symbols
process_dot = pynini.cross("ponto", ".")
username = (
pynutil.insert("username: \"")
+ alpha_num
+ delete_extra_space
+ pynini.closure(accepted_username + delete_extra_space)
+ alpha_num
+ pynutil.insert("\"")
)
single_alphanum = pynini.closure(alpha_num + delete_extra_space) + alpha_num
server = single_alphanum | pynini.string_file(get_abs_path("data/electronic/server_name.tsv")).invert()
domain = single_alphanum | pynini.string_file(get_abs_path("data/electronic/domain.tsv")).invert()
domain_graph = (
pynutil.insert("domain: \"")
+ server
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ domain
+ pynutil.insert("\"")
)
graph = (
username + delete_extra_space + pynutil.delete("arroba") + insert_space + delete_extra_space + domain_graph
)
############# url ###
protocol_end = pynini.cross(pynini.union("www", "w w w", "dáblio dáblio dáblio"), "www")
protocol_start = pynini.cross(pynini.union("http", "h t t p", "agá tê tê pê"), "http")
protocol_start |= pynini.cross(pynini.union("https", "h t t p s", "agá tê tê pê ésse"), "https")
protocol_start += pynini.cross(" dois pontos barra barra ", "://")
# e.g. .com, .es
ending = (
delete_extra_space
+ symbols
+ delete_extra_space
+ (domain | pynini.closure(accepted_username + delete_extra_space) + accepted_username)
)
protocol = (
pynini.closure(protocol_start, 0, 1)
+ protocol_end
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ (pynini.closure(delete_extra_space + accepted_username, 1) | server)
+ pynini.closure(ending, 1)
)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
graph |= protocol
########
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date,
e.g. primeiro de janeiro -> date { day: "1" month: "janeiro" }
e.g. um de janeiro -> date { day: "1" month: "janeiro" }
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="date", kind="classify")
digits_from_year = cardinal.digits_from_year
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_1_to_100 = pynini.union(
pynutil.insert("0") + graph_digit,
graph_twenties,
graph_teen,
(graph_ties + pynutil.insert("0")),
(graph_ties + pynutil.delete(" e ") + graph_digit),
)
digits_1_to_31 = [str("{:0>2d}").format(digits) for digits in range(1, 32)]
graph_1_to_31 = graph_1_to_100 @ pynini.union(*digits_1_to_31)
# can use "primeiro" for 1st day of the month
graph_1_to_31 = pynini.union(graph_1_to_31, pynini.cross("primeiro", "01"))
day_graph = pynutil.insert("day: \"") + graph_1_to_31 + pynutil.insert("\"")
month_name_graph = pynini.string_file(get_abs_path("data/months.tsv"))
month_name_graph = pynutil.insert("month: \"") + month_name_graph + pynutil.insert("\"")
# vinte do oito -> 20/08
digits_1_to_12 = [str("{:0>2d}").format(digits) for digits in range(1, 13)]
graph_1_to_12 = graph_1_to_100 @ pynini.union(*digits_1_to_12)
month_number_graph = pynutil.insert("month: \"") + graph_1_to_12 + pynutil.insert("\"")
graph_dm = day_graph + delete_space + pynutil.delete("de") + delete_extra_space + month_name_graph
graph_dm |= (
day_graph
+ delete_space
+ pynutil.delete("do")
+ delete_extra_space
+ month_number_graph
+ pynutil.insert(" morphosyntactic_features: \"/\"")
)
graph_year = (
delete_space
+ pynutil.delete("de")
+ delete_extra_space
+ pynutil.insert("year: \"")
+ digits_from_year
+ pynutil.insert("\"")
)
graph_dmy = graph_dm + graph_year.ques
final_graph = graph_dmy
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/taggers/word.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time,
e.g. time { hours: "à 1" minutes: "10" } -> à 1:10
e.g. time { hours: "às 2" minutes: "45" } -> às 2:45
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
prefix = (
pynutil.delete("morphosyntactic_features:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ delete_space
+ insert_space
)
optional_prefix = pynini.closure(prefix, 0, 1)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
suffix = (
delete_space
+ insert_space
+ pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_suffix = pynini.closure(suffix, 0, 1)
graph = (
optional_prefix
+ hour
+ delete_space
+ pynutil.insert(":")
+ (minute @ add_leading_zero_to_double_digit)
+ optional_suffix
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/time.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { negative: "true" integer: "12" } units: "kg" } -> -12 kg
Args:
decimal: DecimalFst
cardinal: CardinalFst
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst):
super().__init__(name="measure", kind="verbalize")
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert(" ") + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "123-123-5678" }
-> 123-123-5678
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(number_part)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "13" morphosyntactic_features: "o" } -> 13º
"""
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
replace_suffix = pynini.union(
pynini.cross(" morphosyntactic_features: \"o\"", "º"),
pynini.cross(" morphosyntactic_features: \"a\"", "ª"),
)
graph = graph + replace_suffix
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
graph = (
time_graph
| date_graph
| money_graph
| measure_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
| telephone_graph
| electronic_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/verbalize.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "sexta feira" } -> "sexta-feira"
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/whitelist.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.pt.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/verbalize_final.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal,
e.g. decimal { negative: "true" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" } -> -1,26
e.g. decimal { negative: "true" integer_part: "1" morphosyntactic_features: "." fractional_part: "26" } -> -1.26
e.g. decimal { negative: "false" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" quantity: "millón" } -> 1,26 millón
e.g. decimal { negative: "false" integer_part: "2" quantity: "millones" } -> 2 millones
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
optionl_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_integer = pynini.closure(integer + delete_space, 0, 1)
decimal_point = pynini.cross("morphosyntactic_features: \",\"", ",")
decimal_point |= pynini.cross("morphosyntactic_features: \".\"", ".")
fractional = (
decimal_point
+ delete_space
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = optional_integer + optional_fractional + optional_quantity
self.numbers = graph
graph = optionl_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" morphosyntactic_features: "," fractional_part: "05" currency: "$" } -> $12,05
Args:
decimal: DecimalFst
"""
def __init__(self, decimal: GraphFst):
super().__init__(name="money", kind="verbalize")
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = unit + delete_space + insert_space + decimal.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { negative: "-" integer: "23" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> [email protected]
e.g. tokens { electronic { protocol: "www.abc.edu" } } -> www.abc.edu
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
protocol = (
pynutil.delete("protocol:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = user_name + delete_space + pynutil.insert("@") + domain
graph |= protocol
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "1" month: "enero" preserve_order: true } -> 1 de enero
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# day month
graph_dmy = (
day
+ delete_extra_space
+ pynutil.insert("de")
+ insert_space
+ month
+ (delete_extra_space + pynutil.insert("de") + insert_space + year).ques
)
graph_dmy |= (
day
+ delete_space
+ pynutil.insert("/")
+ month
+ pynutil.delete(" morphosyntactic_features: \"/\"")
+ (delete_space + pynutil.insert("/") + year).ques
)
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space
)
final_graph = graph_dmy + delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/verbalizers/word.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/data/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/data/numbers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/data/ordinals/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/data/electronic/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/pt/data/time/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import string
from pathlib import Path
from typing import Dict
import pynini
from pynini.export import export
from pynini.lib import byte, pynutil, utf8
NEMO_CHAR = utf8.VALID_UTF8_CHAR
NEMO_DIGIT = byte.DIGIT
NEMO_HEX = pynini.union(*string.hexdigits).optimize()
NEMO_NON_BREAKING_SPACE = "\u00A0"
NEMO_SPACE = " "
NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", "\u00A0").optimize()
NEMO_NOT_SPACE = pynini.difference(NEMO_CHAR, NEMO_WHITE_SPACE).optimize()
NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, r'"').optimize()
NEMO_PUNCT = pynini.union(*map(pynini.escape, string.punctuation)).optimize()
NEMO_SIGMA = pynini.closure(NEMO_CHAR)
delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE))
delete_zero_or_one_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE, 0, 1))
insert_space = pynutil.insert(" ")
delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ")
delete_preserve_order = pynini.closure(
pynutil.delete(" preserve_order: true")
| (pynutil.delete(' field_order: "') + NEMO_NOT_QUOTE + pynutil.delete('"'))
)
INPUT_CASED = "cased"
INPUT_LOWER_CASED = "lower_cased"
def generator_main(file_name: str, graphs: Dict[str, 'pynini.FstLike']):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
print(f"Created {file_name}")
def convert_space(fst) -> "pynini.FstLike":
"""
Converts space to nonbreaking space.
Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty"
This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it.
Args:
fst: input fst
Returns output fst where breaking spaces are converted to non breaking spaces
"""
return fst @ pynini.cdrewrite(pynini.cross(NEMO_SPACE, NEMO_NON_BREAKING_SPACE), "", "", NEMO_SIGMA)
def string_map_cased(input_file: str, input_case: str = INPUT_LOWER_CASED):
labels = load_labels(input_file)
if input_case == INPUT_CASED:
additional_labels = []
for written, spoken, *weight in labels:
written_capitalized = written[0].upper() + written[1:]
additional_labels.extend(
[
[written_capitalized, spoken.capitalize(),], # first letter capitalized
[
written_capitalized,
spoken.upper().replace(" AND ", " and "),
], # # add pairs with the all letters capitalized
]
)
spoken_no_space = spoken.replace(" ", "")
# add abbreviations without spaces (both lower and upper case), i.e. "BMW" not "B M W"
if len(spoken) == (2 * len(spoken_no_space) - 1):
print(f"This is weight {weight}")
if len(weight) == 0:
additional_labels.extend(
[[written, spoken_no_space], [written_capitalized, spoken_no_space.upper()],]
)
else:
additional_labels.extend(
[
[written, spoken_no_space, weight[0]],
[written_capitalized, spoken_no_space.upper(), weight[0]],
]
)
labels += additional_labels
whitelist = pynini.string_map(labels).invert().optimize()
return whitelist
class GraphFst:
"""
Base class for all grammar fsts.
Args:
name: name of grammar class
kind: either 'classify' or 'verbalize'
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, name: str, kind: str, deterministic: bool = True):
self.name = name
self.kind = str
self._fst = None
self.deterministic = deterministic
self.far_path = Path(os.path.dirname(__file__) + "/grammars/" + kind + "/" + name + ".far")
if self.far_exist():
self._fst = pynini.Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst()
def far_exist(self) -> bool:
"""
Returns true if FAR can be loaded
"""
return self.far_path.exists()
@property
def fst(self) -> "pynini.FstLike":
return self._fst
@fst.setter
def fst(self, fst):
self._fst = fst
def add_tokens(self, fst) -> "pynini.FstLike":
"""
Wraps class name around to given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }")
def delete_tokens(self, fst) -> "pynini.FstLike":
"""
Deletes class name wrap around output of given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
res = (
pynutil.delete(f"{self.name}")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ fst
+ delete_space
+ pynutil.delete("}")
)
return res @ pynini.cdrewrite(pynini.cross("\u00A0", " "), "", "", NEMO_SIGMA)
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/graph_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.zh.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import inflect
_inflect = inflect.engine()
def num_to_word(x: Union[str, int]):
"""
converts integer to spoken representation
Args
x: integer
Returns: spoken representation
"""
if isinstance(x, int):
x = str(x)
x = _inflect.number_to_words(str(x)).replace("-", " ").replace(",", "")
return x
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + "/" + rel_path
def get_various_formats(text: str) -> List[str]:
"""
Return various formats for text, e.g., all caps, the first letter upper cased, space separated, etc.
"""
result = []
if len(text) == 0:
return []
for t in [text, " ".join(list(text))]:
result.append(t)
result.append(t.upper())
result.append(t.capitalize())
return result
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g., 五点十分 -> time { hours: "05" minutes: "10" }
e.g., 五时十五分 -> time { hours: "05" minutes: "15" }
e.g., 十五点十分 -> time { hours: "15" minutes: "10" }
e.g., 十五点十分二十秒 -> time { hours: "15" minutes: "10" seconds: "20 }
e.g., 两点一刻 -> time { hours: "2" minutes: "1刻" }
e.g., 五点 -> time { hours: "5点" }
e.g., 五小时 -> time { hours: "5小时" }
e.g., 五分 -> time { minutess: "5分" }
e.g., 五分钟 -> time { seconds: "5分钟" }
e.g., 五秒 -> time { seconds: "5秒" }
e.g., 五秒钟 -> time { seconds: "5秒钟" }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
hours = pynini.string_file(get_abs_path("data/time/time_hours.tsv"))
minutes = pynini.string_file(get_abs_path("data/time/time_minutes.tsv"))
seconds = pynini.string_file(get_abs_path("data/time/time_seconds.tsv"))
quarters = pynini.string_file(get_abs_path("data/time/time_quarters.tsv"))
for_mandarin = pynini.string_file(get_abs_path("data/time/time_mandarin.tsv"))
graph_delete_hours = pynutil.delete("点") | pynutil.delete("點") | pynutil.delete("时") | pynutil.delete("時")
graph_hours = hours + graph_delete_hours
graph_hours_component = pynutil.insert('hours: "') + graph_hours + pynutil.insert('"')
graph_minutes = pynutil.delete("分")
graph_minutes = minutes + graph_minutes
graph_minutes_component = pynutil.insert('minutes: "') + graph_minutes + pynutil.insert('"')
graph_seconds = pynutil.delete("秒")
graph_seconds = seconds + graph_seconds
graph_seconds_component = pynutil.insert('seconds: "') + graph_seconds + pynutil.insert('"')
graph_time_standard = (graph_hours_component + pynutil.insert(" ") + graph_minutes_component) | (
graph_hours_component
+ pynutil.insert(" ")
+ graph_minutes_component
+ pynutil.insert(" ")
+ graph_seconds_component
)
quarter_mandarin = (
quarters + pynini.accep("刻") | pynini.cross("刻鈡", "刻钟") | pynini.accep("刻钟") | pynini.accep("半")
)
hour_mandarin = (
pynini.accep("点")
| pynini.accep("时")
| pynini.cross("點", "点")
| pynini.cross("時", "时")
| pynini.accep("小时")
| pynini.cross("小時", "小时")
| pynini.cross("個點", "个点")
| pynini.accep("个点")
| pynini.accep("个钟头")
| pynini.cross("個鐘頭", "个钟头")
| pynini.accep("个小时")
| pynini.cross("個小時", "个小时")
)
minute_mandarin = pynini.accep("分") | pynini.cross("分鐘", "分钟") | pynini.accep("分钟")
second_mandarin = pynini.accep("秒") | pynini.cross("秒鐘", "秒钟") | pynini.accep("秒钟")
hours_only = for_mandarin + hour_mandarin
minutes_only = for_mandarin + minute_mandarin
seconds_only = for_mandarin + second_mandarin
graph_mandarin_hour = pynutil.insert('hours: "') + hours_only + pynutil.insert('"')
graph_mandarin_minute = pynutil.insert('minutes: "') + minutes_only + pynutil.insert('"')
graph_mandarin_second = pynutil.insert('seconds: "') + seconds_only + pynutil.insert('"')
graph_mandarin_quarter = pynutil.insert('minutes: "') + quarter_mandarin + pynutil.insert('"')
graph_mandarins = (
graph_mandarin_hour
| graph_mandarin_minute
| graph_mandarin_second
| graph_mandarin_quarter
| (graph_mandarin_hour + pynutil.insert(" ") + graph_mandarin_quarter)
)
final_graph = graph_time_standard | graph_mandarins
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/time.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. 二分之一 -> tokens { fraction { denominator: "2" numerator: "1"} }
e.g. 五又二分之一 -> tokens { fraction { integer_part: "1" denominator: "2" numerator: "1" } }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="fraction", kind="classify")
graph_cardinal = cardinal.just_cardinals
integer_component = pynutil.insert('integer_part: "') + graph_cardinal + pynutil.insert('"')
denominator_component = (
pynutil.insert('denominator: "') + graph_cardinal + pynutil.delete("分之") + pynutil.insert('"')
)
numerator_component = pynutil.insert('numerator: "') + graph_cardinal + pynutil.insert('"')
graph_only_fraction = denominator_component + pynutil.insert(" ") + numerator_component
graph_fraction_with_int = integer_component + pynutil.delete("又") + pynutil.insert(" ") + graph_only_fraction
graph_fraction = graph_only_fraction | graph_fraction_with_int
final_graph = self.add_tokens(graph_fraction)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
def __init__(self, cardinal: GraphFst):
super().__init__(name="ordinal", kind="classify")
graph_cardinals = cardinal.for_ordinals
mandarin_morpheme = pynini.accep("第")
graph_ordinal = mandarin_morpheme + graph_cardinals
graph_ordinal_final = pynutil.insert('integer: "') + graph_ordinal + pynutil.insert('"')
graph_ordinal_final = self.add_tokens(graph_ordinal_final)
self.fst = graph_ordinal_final.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/ordinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import INPUT_LOWER_CASED, GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. 贵宾 -> tokens { name: "VIP" }
美国研究生入学考试 -> { name: "GRE" }
人力资源 -> { name: "HR" }
工商管理学硕士 -> { name: "MBA" }
This class has highest priority among all classifier grammars. Whitelisted tokens are defined and loaded from "data/whitelist.tsv".
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv"))
graph = (pynutil.insert('name: "')) + (whitelist) + pynutil.insert('"')
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/whitelist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.inverse_text_normalization.zh.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.zh.taggers.word import WordFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(
self, input_case: str, cache_dir: str = None, whitelist: str = None, overwrite_cache: bool = False,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, "_zh_itn.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
date_graph = DateFst().fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
fraction = FractionFst(cardinal)
fraction_graph = fraction.fst
punct_graph = PunctuationFst().fst
whitelist_graph = WhiteListFst(input_file=whitelist, input_case=input_case).fst
classify = (
pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.2)
| pynutil.add_weight(cardinal_graph, 1.09)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
| pynutil.add_weight(whitelist_graph, 1.01)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&'()*+,-./:;<=>?@^_`{|}~。,;:《》“”·~【】!?、‘’.<>-——_"
punct = pynini.union(*s)
graph = pynutil.insert('name: "') + punct + pynutil.insert('"')
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/punctuation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
def get_quantity(decimal, cardinal):
suffix = pynini.union(
"万",
"十万",
"百万",
"千万",
"亿",
"十亿",
"百亿",
"千亿",
"萬",
"十萬",
"百萬",
"千萬",
"億",
"十億",
"百億",
"千億",
"拾萬",
"佰萬",
"仟萬",
"拾億",
"佰億",
"仟億",
"拾万",
"佰万",
"仟万",
"仟亿",
"佰亿",
"仟亿",
)
numbers = cardinal
res = (
pynutil.insert('integer_part: "')
+ numbers
+ pynutil.insert('"')
+ pynutil.insert(' quantity: "')
+ suffix
+ pynutil.insert('"')
)
res = res | decimal + pynutil.insert(' quantity: "') + suffix + pynutil.insert('"')
return res
class DecimalFst(GraphFst):
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
cardinal_after_decimal = pynini.string_file(get_abs_path("data/numbers/digit-nano.tsv")) | pynini.closure(
pynini.cross("零", "0")
)
cardinal_before_decimal = cardinal.just_cardinals | pynini.cross("零", "0")
delete_decimal = pynutil.delete("点") | pynutil.delete("點")
graph_integer = pynutil.insert('integer_part: "') + cardinal_before_decimal + pynutil.insert('" ')
graph_string_of_cardinals = pynini.closure(cardinal_after_decimal, 1)
graph_fractional = pynutil.insert('fractional_part: "') + graph_string_of_cardinals + pynutil.insert('"')
graph_decimal_no_sign = pynini.closure((graph_integer + delete_decimal + graph_fractional), 1)
self.final_graph_wo_negative = graph_decimal_no_sign | get_quantity(
graph_decimal_no_sign, cardinal.just_cardinals
)
graph_negative = pynini.cross("负", 'negative: "-" ') | pynini.cross("負", 'negative: "-" ')
graph_negative = pynini.closure(graph_negative, 0, 1) # captures only one "负"
graph_decimal = graph_negative + graph_decimal_no_sign
graph_decimal = graph_decimal | (graph_negative + get_quantity(graph_decimal_no_sign, cardinal_before_decimal))
self.final_graph_wo_negative = graph_decimal
final_graph = self.add_tokens(graph_decimal)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/decimal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_DIGIT, GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
class MoneyFst(GraphFst):
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# imports
major_currency = pynini.string_file(get_abs_path("data/money/currency_major.tsv"))
minor_currency = pynini.string_file(get_abs_path("data/money/currency_minor.tsv"))
digits = pynini.string_file(get_abs_path("data/numbers/digit-nano.tsv"))
graph_cardinal = cardinal.for_ordinals
graph_decimal = decimal.final_graph_wo_negative #
# add leding zero to the number: 1 -> 01
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT) #
graph_fractional_values = graph_cardinal @ add_leading_zero_to_double_digit #
# regular number and yuan part
graph_integer_component = pynutil.insert('integer_part: "') + graph_cardinal + pynutil.insert('"')
graph_fractional_component = (
pynutil.insert('fractional_part: "') + graph_fractional_values + pynutil.insert('"')
)
# regular symbol part
graph_major_currency = pynutil.insert('currency: "') + major_currency + pynutil.insert('"')
graph_minor_currency = pynutil.insert('currency: "') + minor_currency + pynutil.insert('"')
# regular combine number and symbol part
graph_only_major = graph_integer_component + pynutil.insert(" ") + graph_major_currency
graph_only_minor = graph_fractional_component + pynutil.insert(" ") + graph_minor_currency
graph_money = graph_only_major + pynutil.insert(" ") + graph_fractional_component
# regular large money with decimals
graph_large_money = graph_decimal + pynutil.insert(" ") + graph_major_currency
# final graph for regular currency
graph_regular_money = graph_only_major | graph_only_minor | graph_money | graph_large_money
# yuan major plus minor
major_symbol = pynini.accep("块") | pynini.cross("塊", "块")
tencent = pynini.accep("毛") | pynini.accep("角",)
cent = pynini.accep("分")
graph_kuai = (
graph_integer_component
+ pynutil.insert(" ")
+ pynutil.insert('currency_major: "')
+ pynini.closure(major_symbol, 1, 1)
+ pynutil.insert('"')
)
graph_mao = (
graph_integer_component
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(tencent, 1, 1)
+ pynutil.insert('"')
)
graph_fen = (
graph_integer_component
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(cent, 1, 1)
+ pynutil.insert('"')
)
graph_digits = pynutil.insert('fractional_part: "') + digits + pynutil.insert('"')
graph_kuaimao = (
graph_kuai
+ pynutil.insert(" ")
+ graph_digits
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(tencent, 1, 1)
+ pynutil.insert('"')
)
graph_kuaifen = (
graph_kuai
+ pynutil.insert(" ")
+ graph_digits
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(cent, 1, 1)
+ pynutil.insert('"')
)
graph_maofen = (
pynutil.insert('fractional_part: "')
+ digits
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(tencent, 1, 1)
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('fraction_part: "')
+ digits
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('currency_min: "')
+ pynini.closure(cent, 1, 1)
+ pynutil.insert('"')
)
graph_kuaimaofen = (
graph_kuai
+ pynutil.insert(" ")
+ pynutil.insert('fractional_part: "')
+ digits
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('currency_minor: "')
+ pynini.closure(tencent, 1, 1)
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('fraction_part: "')
+ digits
+ pynutil.insert('"')
+ pynutil.insert(" ")
+ pynutil.insert('currency_min: "')
+ pynini.closure(cent, 1, 1)
+ pynutil.insert('"')
)
graph_mandarin = (
graph_kuai | graph_mao | graph_fen | graph_kuaimao | graph_kuaifen | graph_maofen | graph_kuaimaofen
)
# combing both
graph_final = graph_regular_money | graph_mandarin
final = self.add_tokens(graph_final)
self.fst = final.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/money.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_DIGIT, NEMO_SIGMA, GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
class CardinalFst(GraphFst):
def __init__(self):
"""
Fitite state transducer for classifying cardinals (e.g., 负五十 -> cardinal { negative: "-" integer: "50" })
This class converts cardinals up to hundred millions (i.e., (10**10))
Single unit digits are not converted (e.g., 五 -> 五)
Numbers less than 20 are not converted.
二十 (2 characters/logograms) is kept as it is but 二十一 (3 characters/logograms) would become 21
"""
super().__init__(name="cardinal", kind="classify")
# number of digits to be processed
delete_hundreds = pynutil.delete("百") | pynutil.delete("佰")
delete_thousands = pynutil.delete("千") | pynutil.delete("仟")
closure_ten_thousands = pynini.accep("萬") | pynini.accep("万")
delete_ten_thousands = pynutil.delete("萬") | pynutil.delete("万")
closure_hundred_millions = pynini.accep("亿") | pynini.accep("億")
delete_hundred_millions = pynutil.delete("亿") | pynutil.delete("億")
# data imported
zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
digits = pynini.string_file(get_abs_path("data/numbers/digit-nano.tsv"))
ties = pynini.string_file(get_abs_path("data/numbers/ties-nano.tsv"))
# grammar for digits
graph_digits = digits | pynutil.insert("0")
# grammar for teens
ten = pynini.string_map([("十", "1"), ("拾", "1"), ("壹拾", "1"), ("一十", "1")])
graph_teens = ten + graph_digits
# grammar for tens, not the output for Cardinal grammar but for pure Arabic digits (used in other grammars)
graph_tens = (ties + graph_digits) | (pynini.cross(pynini.accep("零"), "0") + graph_digits)
graph_all = graph_tens | graph_teens | pynutil.insert("00")
# grammar for hundreds 百
graph_hundreds_complex = (
(graph_digits + delete_hundreds + graph_all)
| (graph_digits + delete_hundreds + pynini.cross(pynini.closure("零"), "0") + graph_digits)
| (graph_digits + delete_hundreds + graph_teens)
)
graph_hundreds = graph_hundreds_complex
graph_hundreds = graph_hundreds | pynutil.insert("000")
# grammar for thousands 千
graph_thousands_complex = (
(graph_digits + delete_thousands + graph_hundreds_complex)
| (graph_digits + delete_thousands + pynini.cross(pynini.closure("零"), "0") + graph_all)
| (graph_digits + delete_thousands + pynini.cross(pynini.closure("零"), "00") + graph_digits)
)
graph_thousands = graph_thousands_complex | pynutil.insert("000")
# grammar for ten thousands 万
graph_ten_thousands_simple = graph_digits + closure_ten_thousands
graph_ten_thousands_complex = (
(graph_digits + delete_ten_thousands + graph_thousands_complex)
| (graph_digits + delete_ten_thousands + pynini.cross(pynini.closure("零"), "0") + graph_hundreds_complex)
| (graph_digits + delete_ten_thousands + pynini.cross(pynini.closure("零"), "00") + graph_all)
| (graph_digits + delete_ten_thousands + pynini.cross(pynini.closure("零"), "000") + graph_digits)
)
graph_ten_thousands = (
pynutil.add_weight(graph_ten_thousands_simple, -1.0)
| graph_ten_thousands_complex
| pynutil.insert("00000")
)
# grammmar for hundred thousands 十万
graph_hundred_thousands_simple = graph_all + closure_ten_thousands
graph_hundred_thousands_complex = (
(graph_all + delete_ten_thousands + graph_thousands_complex)
| (graph_all + delete_ten_thousands + pynini.cross(pynini.closure("零"), "0") + graph_hundreds_complex)
| (graph_all + delete_ten_thousands + pynini.cross(pynini.closure("零"), "00") + graph_all)
| (graph_all + delete_ten_thousands + pynini.cross(pynini.closure("零"), "000") + graph_digits)
)
graph_hundred_thousands = (
pynutil.add_weight(graph_hundred_thousands_simple, -1.0)
| graph_hundred_thousands_complex
| pynutil.insert("000000")
)
# grammar for millions 百万
graph_millions_simple = graph_hundreds_complex + closure_ten_thousands
graph_millions_complex = (
(graph_hundreds_complex + delete_ten_thousands + graph_thousands_complex)
| (
graph_hundreds_complex
+ delete_ten_thousands
+ pynini.cross(pynini.closure("零"), "0")
+ graph_hundreds_complex
)
| (graph_hundreds_complex + delete_ten_thousands + pynini.cross(pynini.closure("零"), "00") + graph_all)
| (graph_hundreds_complex + delete_ten_thousands + pynini.cross(pynini.closure("零"), "000") + graph_digits)
)
graph_millions = (
pynutil.add_weight(graph_millions_simple, -1.0) | graph_millions_complex | pynutil.insert("0000000")
)
# grammar for ten millions 千万
graph_ten_millions_simple = graph_thousands_complex + closure_ten_thousands
graph_ten_millions_complex = (
(graph_thousands_complex + delete_ten_thousands + graph_thousands_complex)
| (
graph_thousands_complex
+ delete_ten_thousands
+ pynini.cross(pynini.closure("零"), "0")
+ graph_hundreds_complex
)
| (graph_thousands_complex + delete_ten_thousands + pynini.cross(pynini.closure("零"), "00") + graph_all)
| (
graph_thousands_complex
+ delete_ten_thousands
+ pynini.cross(pynini.closure("零"), "000")
+ graph_digits
)
)
graph_ten_millions = pynutil.add_weight(graph_ten_millions_simple, -1.0) | graph_ten_millions_complex
graph_ten_millions = graph_ten_millions | pynutil.insert("00000000")
# grammar for hundred millions 亿
graph_hundred_millions_simple = graph_digits + closure_hundred_millions
graph_hundred_millions_complex = (
(graph_digits + delete_hundred_millions + graph_ten_millions_complex)
| (
graph_digits
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0")
+ graph_millions_complex
)
| (
graph_digits
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00")
+ graph_hundred_thousands_complex
)
| (
graph_digits
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000")
+ graph_ten_thousands_complex
)
| (
graph_digits
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000")
+ graph_thousands_complex
)
| (
graph_digits
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00000")
+ graph_hundreds_complex
)
| (graph_digits + delete_hundred_millions + pynini.cross(pynini.closure("零"), "000000") + graph_all)
| (graph_digits + delete_hundred_millions + pynini.cross(pynini.closure("零"), "0000000") + graph_digits)
)
graph_hundred_millions = (
pynutil.add_weight(graph_hundred_millions_simple, -1.0)
| graph_hundred_millions_complex
| pynutil.insert("000000000")
)
# grammar for billions 十亿
graph_billions_simple = graph_all + closure_hundred_millions
graph_billions_complex = (
(graph_all + delete_hundred_millions + graph_ten_millions_complex)
| (graph_all + delete_hundred_millions + pynini.cross(pynini.closure("零"), "0") + graph_millions_complex)
| (
graph_all
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00")
+ graph_hundred_thousands_complex
)
| (
graph_all
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000")
+ graph_ten_thousands_complex
)
| (
graph_all
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000")
+ graph_thousands_complex
)
| (
graph_all
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00000")
+ graph_hundreds_complex
)
| (graph_all + delete_hundred_millions + pynini.cross(pynini.closure("零"), "000000") + graph_all)
| (graph_all + delete_hundred_millions + pynini.cross(pynini.closure("零"), "0000000") + graph_digits)
)
graph_billions = (
pynutil.add_weight(graph_billions_simple, -1.0) | graph_billions_complex | pynutil.insert("0000000000")
)
# grammar for ten billions 百亿
graph_ten_billions_simple = graph_hundreds_complex + closure_hundred_millions
graph_ten_billions_complex = (
(graph_hundreds_complex + delete_hundred_millions + graph_ten_millions_complex)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0")
+ graph_millions_complex
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00")
+ graph_hundred_thousands_complex
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000")
+ graph_ten_thousands_complex
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000")
+ graph_thousands_complex
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00000")
+ graph_hundreds_complex
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000000")
+ graph_all
)
| (
graph_hundreds_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000000")
+ graph_digits
)
)
graph_ten_billions = (
pynutil.add_weight(graph_ten_billions_simple, -1.0)
| graph_ten_billions_complex
| pynutil.insert("00000000000")
)
# grammar for hundred billions 千亿
graph_hundred_billions_simple = graph_thousands_complex + closure_hundred_millions
graph_hundred_billions_complex = (
(graph_thousands_complex + delete_hundred_millions + graph_ten_millions_complex)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0")
+ graph_millions_complex
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00")
+ graph_hundred_thousands_complex
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000")
+ graph_ten_thousands_complex
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000")
+ graph_thousands_complex
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "00000")
+ graph_hundreds_complex
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "000000")
+ graph_all
)
| (
graph_thousands_complex
+ delete_hundred_millions
+ pynini.cross(pynini.closure("零"), "0000000")
+ graph_digits
)
)
graph_hundred_billions = (
pynutil.add_weight(graph_hundred_billions_simple, -1.0) | graph_hundred_billions_complex
)
# combining grammar; output for cardinal grammar
graph = pynini.union(
graph_hundred_billions,
graph_ten_billions,
graph_billions,
graph_hundred_millions,
graph_ten_millions,
graph_millions,
graph_hundred_thousands,
graph_ten_thousands,
graph_thousands,
graph_hundreds,
graph_all,
graph_teens,
graph_digits,
zero,
)
# combining grammar; output consists only arabic numbers
graph_just_cardinals = pynini.union(
graph_hundred_billions_complex,
graph_ten_billions_complex,
graph_billions_complex,
graph_hundred_millions_complex,
graph_ten_millions_complex,
graph_millions_complex,
graph_hundred_thousands_complex,
graph_ten_thousands_complex,
graph_thousands_complex,
graph_hundreds_complex,
graph_all,
graph_teens,
graph_digits,
zero,
)
# delete unnecessary leading zero
delete_leading_zeros = pynutil.delete(pynini.closure("0"))
stop_at_non_zero = pynini.difference(NEMO_DIGIT, "0")
rest_of_cardinal = pynini.closure(NEMO_DIGIT) | pynini.closure(NEMO_SIGMA)
# output for cardinal grammar without leading zero
clean_cardinal = delete_leading_zeros + stop_at_non_zero + rest_of_cardinal
clean_cardinal = clean_cardinal | "0"
graph = graph @ clean_cardinal # output for regular cardinals
self.for_ordinals = graph # used for ordinal grammars
# output for pure arabic number without leading zero
clean_just_cardinal = delete_leading_zeros + stop_at_non_zero + rest_of_cardinal
clean_just_cardinal = clean_just_cardinal | "0"
graph_just_cardinals = graph_just_cardinals @ clean_just_cardinal # output for other grammars
self.just_cardinals = graph_just_cardinals # used for other grammars
# final grammar for cardinal output; tokenization
optional_minus_graph = (pynini.closure(pynutil.insert("negative: ") + pynini.cross("负", '"-"'))) | (
pynini.closure(pynutil.insert("negative: ") + pynini.cross("負", '"-"'))
)
final_graph = optional_minus_graph + pynutil.insert('integer: "') + graph + pynutil.insert('"')
final_graph = self.add_tokens(final_graph)
self.fst = final_graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/cardinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.zh.utils import get_abs_path
from pynini.lib import pynutil
class DateFst(GraphFst):
def __init__(self):
"""
Finite state transducer for classifying date
1798年五月三十日 -> date { year: "1798" month: "5" day: "30" }
五月三十日 -> date { month: "5" day: "30" }
一六七二年 -> date { year: "1672" }
"""
super().__init__(name="date", kind="classify")
digits = pynini.string_file(get_abs_path("data/numbers/digit-nano.tsv")) # imported for year-component
months = pynini.string_file(get_abs_path("data/date/months.tsv")) # imported for month-component
days = pynini.string_file(get_abs_path("data/date/day.tsv")) # imported for day-component
# grammar for year
graph_year = (
pynini.closure(digits)
+ pynini.closure(pynini.cross("零", "0"))
+ pynini.closure(digits)
+ pynini.closure(pynini.cross("零", "0"))
+ pynutil.delete("年")
)
graph_year = pynutil.insert('year: "') + graph_year + pynutil.insert('"')
# grammar for month
graph_month = pynutil.insert('month: "') + months + pynutil.delete("月") + pynutil.insert('"')
# grammar for day
graph_day_suffix = pynini.accep("日") | pynini.accep("号") | pynini.accep("號")
graph_delete_day_suffix = pynutil.delete(graph_day_suffix)
graph_day = pynutil.insert('day: "') + days + graph_delete_day_suffix + pynutil.insert('"')
# grammar for combinations of year+month, month+day, and year+month+day
graph_ymd = graph_year + pynutil.insert(" ") + graph_month + pynutil.insert(" ") + graph_day
graph_ym = graph_year + pynutil.insert(" ") + graph_month
graph_md = graph_month + pynutil.insert(" ") + graph_day
# final grammar for standard date
graph_date = graph_ymd | graph_ym | graph_md | graph_year | graph_month | graph_day
# graph_date = graph_year | graph_month | graph_day
# grammar for optional prefix ad or bc
graph_bc_prefix = pynini.closure("紀元前", 0, 1) | pynini.closure("公元前", 0, 1) | pynini.closure("纪元前", 0, 1)
graph_bc = pynutil.delete(graph_bc_prefix)
graph_ad_prefix = (
pynini.closure("公元", 0, 1)
| pynini.closure("公元后", 0, 1) + pynini.closure("紀元", 0, 1)
| pynini.closure("纪元", 0, 1)
| pynini.closure("西元", 0, 1)
)
graph_ad = pynutil.delete(graph_ad_prefix)
graph_suffix_bc = (
graph_bc + graph_date + pynutil.insert(' era: "') + pynutil.insert("B.C.") + pynutil.insert('"')
)
graph_suffix_ad = (
graph_ad + graph_date + pynutil.insert(' era: "') + pynutil.insert("A.D.") + pynutil.insert('"')
)
graph_era = graph_suffix_bc | graph_suffix_ad
# grammar for standard date and with era
graph_date_final = graph_era | graph_date
# graph_date_final = graph_date
final_graph = self.add_tokens(graph_date_final)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/date.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert('name: "') + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert('"')
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/taggers/word.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_DIGIT, GraphFst, delete_space
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transcucer for verbalizing time, e.g.,
time { hours: "12" minutes: "30" } -> 12:30
time { hours: "01" minutes: "30" } -> 01:30
time { hours: "1" minutes: "30" seconds: "05" } -> 01:30:05
time { hours: "1" minutes: "1刻" } -> 1点1刻
time { hours: "一点" } -> 1点
time { hours: "一小时" } -> 1小时
time { hours: "一个钟头" } -> 1个钟头
time { minutes: "一分" } -> 1分
time { minutes: "一分钟" } -> 1分钟
time { seconds: "一秒" } -> 1秒
time { seconds: "一秒钟" } -> 1秒钟
time { hours: "五点" minutes: "一刻" } -> 5点1刻
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
# add_leading_zero = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
token_hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1, 2)
+ pynutil.delete('"')
)
token_minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1, 2)
+ pynutil.delete('"')
)
token_second = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1, 2)
+ pynutil.delete('"')
)
add_colon = pynutil.insert(":")
graph_regular_time = (token_hour + delete_space + add_colon + token_minute) | (
token_hour + delete_space + add_colon + token_minute + delete_space + add_colon + token_second
)
hours = (
pynini.accep("点")
| pynini.accep("小时")
| pynini.accep("时")
| pynini.accep("个钟头")
| pynini.accep("个点")
| pynini.accep("个小时")
)
hour_mandarin = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete('"')
+ (pynini.closure(NEMO_DIGIT) + pynini.closure(hours, 1))
+ pynutil.delete('"')
)
minutes = pynini.accep("分") | pynini.accep("分钟") | pynini.accep("半")
minute_mandarin = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete('"')
+ (((pynini.closure(NEMO_DIGIT) + pynini.closure(minutes, 1))) | pynini.closure(minutes, 1))
+ pynutil.delete('"')
)
seconds = pynini.accep("秒") | pynini.accep("秒钟")
second_mandarin = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete('"')
+ (pynini.closure(NEMO_DIGIT) + pynini.closure(seconds, 1))
+ pynutil.delete('"')
)
quarters = pynini.accep("刻") | pynini.accep("刻钟")
quarter_mandarin = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete('"')
+ (pynini.closure(NEMO_DIGIT) + pynini.closure(quarters, 1))
+ pynutil.delete('"')
)
graph_mandarin_time = (
hour_mandarin
| minute_mandarin
| second_mandarin
| quarter_mandarin
| (hour_mandarin + delete_space + quarter_mandarin)
| (hour_mandarin + delete_space + minute_mandarin)
| (hour_mandarin + delete_space + minute_mandarin + delete_space + second_mandarin)
| (minute_mandarin + delete_space + second_mandarin)
)
final_graph = graph_regular_time | graph_mandarin_time
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/time.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_DIGIT, GraphFst, delete_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. tokens { fraction { denominator: "2" numerator: "1"} } -> 1/2
e.g. tokens { fraction { integer_part: "1" denominator: "2" numerator: "1" } } -> 1又1/2
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
integer_part = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT)
+ pynutil.insert("又")
+ pynutil.delete('"')
)
denominator_part = (
pynutil.delete("denominator:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT)
+ pynutil.delete('"')
)
numerator_part = (
pynutil.delete("numerator:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT)
+ pynutil.insert("/")
+ pynutil.delete('"')
)
graph_with_integer = integer_part + delete_space + numerator_part + delete_space + denominator_part
graph_no_integer = numerator_part + delete_space + denominator_part
final_graph = graph_with_integer | graph_no_integer
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph_integer = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete('"')
+ pynini.accep("第")
+ pynini.closure(NEMO_DIGIT)
+ pynini.closure(NEMO_SIGMA)
+ pynutil.delete('"')
)
delete_tokens = self.delete_tokens(graph_integer)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/ordinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
money = MoneyFst()
money_graph = money.fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
graph = (
time_graph
| date_graph
| money_graph
| fraction_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/verbalize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "USB" } -> USB
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete('"')
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/whitelist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.zh.verbalizers.word import WordFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/verbalize_final.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
# group numbers by three
exactly_three_digits = NEMO_DIGIT ** 3
at_most_three_digits = pynini.closure(NEMO_DIGIT, 1, 3)
# insert a "," for every three numbers before decimal point
space_every_three_integer = at_most_three_digits + (pynutil.insert(",") + exactly_three_digits).closure()
# removing tokenizations, 'negative: '
optional_sign = pynini.closure(
pynutil.delete("negative: ")
+ delete_space
+ pynutil.delete('"')
+ pynini.accep("-")
+ pynutil.delete('"')
+ delete_space
)
# removing tokenzations, 'integer_part:'
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
)
integer = integer @ space_every_three_integer
optional_integer = pynini.closure(integer + delete_space, 0, 1)
# removing tokenizations, 'fractionl_part'
fractional = (
pynutil.insert(".")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
# removing tokenization, 'quantity:'
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
optional_quantity = pynini.closure(delete_space + quantity)
# combining graphs removing tokenizations *3
graph = (optional_integer + optional_fractional + optional_quantity).optimize()
graph = optional_sign + graph # add optional sign for negative number
self.numebrs = graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/decimal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
def __init__(self):
super().__init__(name="money", kind="verbalize")
currency_unit = pynutil.delete('currency: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
number_unit = pynutil.delete('integer_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
fraction_unit = pynutil.delete('fractional_part: "') + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('"')
decimal_unit = (
pynutil.insert(".")
+ pynutil.delete('fractional_part: "')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
+ delete_space
+ pynutil.delete('quantity: "')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete('"')
)
# regular money part
graph_money_regular = (
currency_unit + delete_space + number_unit + delete_space + pynutil.insert(".") + fraction_unit
)
graph_only_major_regular = currency_unit + delete_space + number_unit
graph_only_minor_regular = currency_unit + delete_space + pynutil.insert("0.") + fraction_unit
graph_large_money = currency_unit + delete_space + number_unit + delete_space + decimal_unit
graph_regular = graph_money_regular | graph_only_major_regular | graph_only_minor_regular | graph_large_money
major_symbol = pynini.accep("块")
minor_symbol = pynini.accep("毛") | pynini.accep("角")
lesser_symbol = pynini.accep("分")
major_currency = pynutil.delete('currency_major: "') + major_symbol + pynutil.delete('"')
minor_currency = pynutil.delete('currency_minor: "') + minor_symbol + pynutil.delete('"')
lesser_currency = pynutil.delete('currency_min:"') + lesser_symbol + pynutil.delete('"')
graph_kuai = number_unit + delete_space + major_currency
graph_mao = number_unit + delete_space + minor_currency
graph_fen = number_unit + delete_space + lesser_currency
graph_kuaimao = graph_kuai + delete_space + fraction_unit + delete_space + minor_currency
graph_kuaifen = graph_kuai + delete_space + fraction_unit + delete_space + lesser_currency
graph_maofen = (
pynutil.delete('fractional_part: "')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
+ delete_space
+ pynutil.delete('currency_minor: "')
+ minor_symbol
+ pynutil.delete('"')
+ delete_space
+ pynutil.delete('fraction_part: "')
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete('"')
+ delete_space
+ pynutil.delete('currency_min: "')
+ lesser_symbol
+ pynutil.delete('"')
)
graph_all = graph_kuai + delete_space + graph_maofen
graph_mandarin = (
(graph_kuai | graph_mao | graph_fen) | graph_kuaimao | graph_kuaifen | graph_maofen | graph_all
)
graph_verbalizer = graph_regular | pynutil.add_weight(graph_mandarin, -2.0)
delete_tokens = self.delete_tokens(graph_verbalizer)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/money.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class CardinalFst(GraphFst):
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
# group numbers by three
exactly_three_digits = NEMO_DIGIT ** 3
at_most_three_digits = pynini.closure(NEMO_DIGIT, 1, 3)
suffix = pynini.union(
"千",
"仟",
"万",
"十万",
"百万",
"千万",
"亿",
"十亿",
"百亿",
"千亿",
"萬",
"十萬",
"百萬",
"千萬",
"億",
"十億",
"百億",
"千億",
"拾萬",
"佰萬",
"仟萬",
"拾億",
"佰億",
"仟億",
"拾万",
"佰万",
"仟万",
"仟亿",
"佰亿",
"仟亿",
)
# inserting a "," between every 3 numbers
group_by_threes = (
at_most_three_digits + (pynutil.insert(",") + exactly_three_digits).closure()
) + pynini.closure(suffix)
# remove the negative attribute and leaves the sign if occurs
optional_sign = pynini.closure(
pynutil.delete("negative: ")
+ delete_space
+ pynutil.delete('"')
+ pynini.accep("-")
+ pynutil.delete('"')
+ delete_space
)
# remove integer aspect
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_DIGIT, 0, 1)
+ pynini.closure(NEMO_SIGMA)
+ pynutil.delete('"')
)
graph = graph @ group_by_threes
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/cardinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { year: "1798" month: "5" day: "30" } -> 1798年5月30日
date { year: "1798" month: "5" } -> 1798年5月
date { month: "5" day: "30" } -> 5月30日
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
# removing tokenization for year, month and day
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete('"')
)
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete('"')
)
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete('"')
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete('"')
)
era = pynutil.delete("era:")
bc = era + delete_space + pynutil.delete('"') + pynini.cross("A.D.", "公元") + pynutil.delete('"')
ad = era + delete_space + pynutil.delete('"') + pynini.cross("B.C.", "公元前") + pynutil.delete('"')
# combining above 3 for variations
graph_ymd = (
year
+ pynutil.insert("年")
+ delete_space
+ month
+ pynutil.insert("月")
+ delete_space
+ day
+ pynutil.insert("日")
)
graph_ym = year + pynutil.insert("年") + delete_space + month + pynutil.insert("月")
graph_md = month + pynutil.insert("月") + delete_space + day + pynutil.insert("日")
graph_year = year + pynutil.insert("年")
graph_month = month + pynutil.insert("月")
graph_day = day + pynutil.insert("日")
graph_era = bc | ad
optional_era = pynini.closure(graph_era)
final_graph = (
optional_era + delete_space + (graph_ymd | graph_ym | graph_md | graph_year | graph_month | graph_day)
)
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/date.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.zh.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete('"') + chars + pynutil.delete('"')
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/verbalizers/word.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/data/numbers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/data/date/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/data/time/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/zh/data/money/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
from pynini.lib import pynutil
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Union
import inflect
_inflect = inflect.engine()
def num_to_word(x: Union[str, int]):
"""
converts integer to spoken representation
Args
x: integer
Returns: spoken representation
"""
if isinstance(x, int):
x = str(x)
x = _inflect.number_to_words(str(x)).replace("-", " ").replace(",", "")
return x
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import GraphFst, convert_space, delete_extra_space
from nemo_text_processing.text_normalization.ar.taggers.measure import unit_singular
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure. Allows for plural form for unit.
e.g. "عشرون في المائة" -> measure { cardinal { integer: "20" } units: "%" }
Args:
itn_cardinal_tagger: ITN Cardinal tagger
itn_decimal_tagger: ITN Decimal tagger
itn_fraction_tagger: ITN Fraction tagger
"""
def __init__(
self,
itn_cardinal_tagger: GraphFst,
itn_decimal_tagger: GraphFst,
itn_fraction_tagger: GraphFst,
deterministic: bool = True,
):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
cardinal_graph = itn_cardinal_tagger.graph
graph_unit_singular = pynini.invert(unit_singular)
unit = convert_space(graph_unit_singular)
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("سالب", "\"true\"") + delete_extra_space, 0, 1
)
unit = pynutil.insert("units: \"") + (unit) + pynutil.insert("\"")
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ itn_decimal_tagger.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
subgraph_fraction = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ pynutil.insert("integer_part: \"")
+ itn_fraction_tagger.graph
+ pynutil.insert("\" }")
+ delete_extra_space
+ unit
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
final_graph = subgraph_cardinal | subgraph_decimal | subgraph_fraction
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
delete_zero_or_one_space,
insert_space,
)
from nemo_text_processing.text_normalization.ar.utils import get_abs_path
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. واحد و نصف -> tokens { integer_part: "1" numerator: "1" denominator: "2" }
Args:
tn_cardinal: TN cardinal tagger
"""
def __init__(self, tn_cardinal: GraphFst):
super().__init__(name="fraction", kind="classify")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("سالب", "\"true\" "), 0, 1
)
cardinal_graph = pynini.invert(tn_cardinal.cardinal_numbers).optimize()
# create unions for special cases
denominator_singular = pynini.union("نصف", "ثلث", "ربع", "خمس", "سدس", "سبع", "ثمن", "تسع", "عشر")
denominator_dual = pynini.union(
"نصفين", "ثلثين", "ربعين", "خمسين", "سدسين", "سبعين", "ثمنين", "تسعين", "عشرين"
)
denominator_plural = pynini.union("أخماس", "أرباع", "أثلاث", "أسداس", "أسباع", "أثمان", "أتساع", "أعشار")
numerator_three_to_ten = pynini.union("خمسة", "سبعة", "عشرة", "ثلاثة", "أربعة", "ستة", "ثمانية", "تسعة")
# data files
graph_ones = pynini.string_file(get_abs_path("data/number/fraction_singular.tsv")).invert().optimize()
graph_dual = pynini.string_file(get_abs_path("data/number/fraction_dual.tsv")).invert().optimize()
graph_plural = pynini.string_file(get_abs_path("data/number/fraction_plural.tsv")).invert().optimize()
# cases when denominator_singular
graph_denominator_singular = (
pynutil.insert("denominator: \"") + denominator_singular @ graph_ones + pynutil.insert("\"")
)
# cases when denominator_dual
graph_denominator_dual = (
pynutil.insert("denominator: \"") + denominator_dual @ graph_dual + pynutil.insert("\"")
)
# cases when denominator_plural
graph_denominator_plural = (
pynutil.insert("denominator: \"")
+ delete_zero_or_one_space
+ denominator_plural @ graph_plural
+ pynutil.insert("\"")
)
denominator_rest = pynutil.insert("denominator: \"") + cardinal_graph + pynutil.insert("\"")
numerator_rest = pynutil.insert("numerator: \"") + cardinal_graph + pynutil.insert("\" ")
# integer part
integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"") + insert_space
# e.g نصف
numerator_one = pynutil.insert("numerator: \"1\"") + insert_space + graph_denominator_singular
# e.g. ثلثين
numerator_two = pynutil.insert("numerator: \"2\"") + insert_space + graph_denominator_dual
# e.g. ثلاثة أرباع
numerator_three_to_ten = (
pynutil.insert("numerator: \"")
+ numerator_three_to_ten @ cardinal_graph
+ pynutil.insert("\"")
+ insert_space
+ graph_denominator_plural
)
# e.g. اثنا عشر على أربعة وعشرون
numerators = (
numerator_rest
+ delete_zero_or_one_space
+ pynutil.delete("على")
+ delete_zero_or_one_space
+ denominator_rest
)
fraction = (
numerator_one | numerator_three_to_ten | numerator_two | pynutil.add_weight(numerators, 0.001)
) # apply exceptions first then the rest
conjunction = pynutil.delete("و")
integer = pynini.closure(integer + delete_zero_or_one_space + conjunction + delete_zero_or_one_space, 0, 1)
graph = optional_graph_negative + integer + fraction
self.graph = graph
final_graph = self.add_tokens(self.graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.ar.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.ar.taggers.word import WordFst
from nemo_text_processing.text_normalization.ar.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.ar.taggers.tokenize_and_classify import ClassifyFst as TNClassifyFst
from nemo_text_processing.text_normalization.en.graph_utils import INPUT_LOWER_CASED
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"ar_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
tn_classify = TNClassifyFst(
input_case='cased', deterministic=True, cache_dir=cache_dir, overwrite_cache=True
)
cardinal = CardinalFst(tn_cardinal=tn_classify.cardinal)
cardinal_graph = cardinal.fst
decimal = DecimalFst(tn_decimal=tn_classify.decimal)
decimal_graph = decimal.fst
fraction = FractionFst(tn_cardinal=tn_classify.cardinal)
fraction_graph = fraction.fst
money = MoneyFst(itn_cardinal_tagger=cardinal)
money_graph = money.fst
measure = MeasureFst(
itn_cardinal_tagger=cardinal,
itn_decimal_tagger=decimal,
itn_fraction_tagger=fraction,
deterministic=True,
)
measure_graph = measure.fst
word_graph = WordFst().fst
punct_graph = PunctuationFst().fst
classify = (
pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~"
punct = pynini.union(*s)
graph = pynutil.insert("name: \"") + punct + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/punctuation.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
from pynini.lib import pynutil
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_SPACE,
GraphFst,
delete_extra_space,
insert_space,
)
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. "سبعة وتسعة وتسعون من مئة" -> decimal { negative: "false" integer_part: "7," fractional_part: "99" }
Args:
tn_decimal: Text normalization Decimal graph
"""
def __init__(self, tn_decimal):
super().__init__(name="decimal", kind="classify")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("سالب", '"true"') + delete_extra_space, 0, 1,
)
graph_fractional_part = pynini.invert(tn_decimal.graph_fractional).optimize()
graph_integer_part = pynini.invert(tn_decimal.integer_part).optimize()
optional_graph_quantity = pynini.invert(tn_decimal.optional_quantity).optimize()
delete_seperator = pynini.string_map([("و", "")])
graph_fractional = (
pynutil.insert('fractional_part: "') + delete_seperator + graph_fractional_part + pynutil.insert('"')
)
graph_integer = pynutil.insert('integer_part: "') + graph_integer_part + pynutil.insert('"')
optional_graph_quantity = pynutil.insert('quantity: "') + optional_graph_quantity + pynutil.insert('"')
optional_graph_quantity = pynini.closure(pynini.accep(NEMO_SPACE) + optional_graph_quantity, 0, 1)
self.final_graph_wo_sign = (
graph_integer + pynini.accep(NEMO_SPACE) + graph_fractional + optional_graph_quantity
)
self.final_graph_wo_negative = optional_graph_negative + self.final_graph_wo_sign
final_graph = self.add_tokens(self.final_graph_wo_negative)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.ar.taggers.money import ar_cur, maj_singular, min_plural, min_singular
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. "خمسة ريال سعودي" -> money { integer_part: "5" currency: "ر.س" }
e.g. "سبعة دولار وتسعة وتسعون سنت" -> money { integer_part: "7" currency: "$" fractional_part: "99" }
Args:
itn_cardinal_tagger: ITN Cardinal Tagger
"""
def __init__(self, itn_cardinal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = itn_cardinal_tagger.graph
graph_unit = pynini.invert(maj_singular)
graph_unit = pynutil.insert("currency: \"") + convert_space(graph_unit) + pynutil.insert("\"")
graph_ar_cur = pynini.invert(ar_cur)
graph_ar_cur = pynutil.insert("currency: \"") + convert_space(graph_ar_cur) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
min_unit = pynini.project(min_singular | min_plural, "output")
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ cardinal_graph @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete(min_unit)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynutil.delete("و")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit
+ optional_cents_standalone
)
graph_integer_with_ar_cur = (
pynutil.insert("integer_part: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_ar_cur
)
graph_decimal = pynutil.insert("currency: \"$\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_integer_with_ar_cur | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/money.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_SPACE, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. سالب تسعة وتسعون -> cardinal { integer: "99" negative: "-" } }
Numbers below thirteen are not converted.
Args:
tn_cardinal: cardinal FST for TN
"""
def __init__(self, tn_cardinal):
super().__init__(name="cardinal", kind="classify")
self.graph = pynini.invert(tn_cardinal.cardinal_numbers).optimize()
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("سالب", '"-"') + NEMO_SPACE, 0, 1,
)
final_graph = optional_minus_graph + pynutil.insert('integer: "') + self.graph + pynutil.insert('"')
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/cardinal.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.