python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/taggers/word.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { integer: "20" } units: "%" } -> 20%
Args:
decimal: ITN Decimal verbalizer
cardinal: ITN Cardinal verbalizer
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert("") + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SPACE,
GraphFst,
delete_space,
delete_zero_or_one_space,
)
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fractions
e.g. fraction { numerator: "8" denominator: "3" } -> "8/3"
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
optional_negative = pynutil.delete("negative: \"") + pynini.cross("True", "-") + pynutil.delete("\"")
optional_negative = pynini.closure(optional_negative + delete_space, 0, 1)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
optional_integer_part = pynini.closure(integer_part + NEMO_SPACE, 0, 1)
numerator = pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
denominator = pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
graph = (
optional_negative
+ optional_integer_part
+ numerator
+ delete_zero_or_one_space
+ pynutil.insert("/")
+ delete_zero_or_one_space
+ denominator
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/fraction.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.ar.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
money = MoneyFst(decimal, deterministic=True)
money_graph = money.fst
measure = MeasureFst(decimal=decimal, cardinal=cardinal, deterministic=True)
measure_graph = measure.fst
graph = cardinal_graph | decimal_graph | fraction_graph | money_graph | measure_graph
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/verbalize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.ar.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.ar.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/verbalize_final.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
optionl_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_integer = pynini.closure(integer + delete_space, 0, 1)
fractional = (
pynutil.insert(".")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = optional_integer + optional_fractional + optional_quantity
graph = optionl_sign + graph
self.numbers = graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/decimal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.ar.graph_utils import NEMO_CHAR, NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" fractional_part: "05" currency: "$" } -> $12.05
Args:
decimal: ITN Decimal verbalizer
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
# optionl_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
# integer = (
# pynutil.delete("integer_part:")
# + delete_space
# + pynutil.delete("\"")
# + pynini.closure(NEMO_NOT_QUOTE, 1)
# + pynutil.delete("\"")
# )
# optional_integer = pynini.closure(integer + delete_space, 0, 1)
# fractional = (
# pynutil.insert(".")
# + pynutil.delete("fractional_part:")
# + delete_space
# + pynutil.delete("\"")
# + pynini.closure(NEMO_NOT_QUOTE, 1)
# + pynutil.delete("\"")
# )
# optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
# quantity = (
# pynutil.delete("quantity:")
# + delete_space
# + pynutil.delete("\"")
# + pynini.closure(NEMO_NOT_QUOTE, 1)
# + pynutil.delete("\"")
# )
# optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
# graph = optional_integer + optional_fractional + optional_quantity
# graph = optionl_sign + graph
# graph= unit + delete_space + optional_integer + optional_fractional
# #graph= optional_integer + optional_fractional + unit
graph = unit + delete_space + decimal.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
self.unit = unit
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/money.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/cardinal.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/ar/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. acht uhr e s t-> time { hours: "8" zone: "e s t" }
e.g. dreizehn uhr -> time { hours: "13" }
e.g. dreizehn uhr zehn -> time { hours: "13" minutes: "10" }
e.g. viertel vor zwölf -> time { minutes: "45" hours: "11" }
e.g. viertel nach zwölf -> time { minutes: "15" hours: "12" }
e.g. halb zwölf -> time { minutes: "30" hours: "11" }
e.g. drei vor zwölf -> time { minutes: "57" hours: "11" }
e.g. drei nach zwölf -> time { minutes: "3" hours: "12" }
e.g. drei uhr zehn minuten zehn sekunden -> time { hours: "3" hours: "10" sekunden: "10"}
Args:
tn_time_verbalizer: TN time verbalizer
"""
def __init__(self, tn_time_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
# lazy way to make sure compounds work
optional_delete_space = pynini.closure(NEMO_SIGMA | pynutil.delete(" ", weight=0.0001))
graph = (tn_time_verbalizer.graph @ optional_delete_space).invert().optimize()
self.fst = self.add_tokens(graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.taggers.measure import singular_to_plural, unit_singular
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure. Allows for plural form for unit.
e.g. minus elf kilogramm -> measure { cardinal { negative: "true" integer: "11" } units: "kg" }
e.g. drei stunden -> measure { cardinal { integer: "3" } units: "h" }
e.g. ein halb kilogramm -> measure { decimal { integer_part: "1/2" } units: "kg" }
e.g. eins komma zwei kilogramm -> measure { decimal { integer_part: "1" fractional_part: "2" } units: "kg" }
Args:
itn_cardinal_tagger: ITN Cardinal tagger
itn_decimal_tagger: ITN Decimal tagger
itn_fraction_tagger: ITN Fraction tagger
"""
def __init__(
self,
itn_cardinal_tagger: GraphFst,
itn_decimal_tagger: GraphFst,
itn_fraction_tagger: GraphFst,
deterministic: bool = True,
):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
cardinal_graph = (
pynini.cdrewrite(pynini.cross(pynini.union("ein", "eine"), "eins"), "[BOS]", "[EOS]", NEMO_SIGMA)
@ itn_cardinal_tagger.graph_no_exception
)
graph_unit_singular = pynini.invert(unit_singular) # singular -> abbr
unit = (pynini.invert(singular_to_plural()) @ graph_unit_singular) | graph_unit_singular # plural -> abbr
unit = convert_space(unit)
graph_unit_singular = convert_space(graph_unit_singular)
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("minus", "\"true\"") + delete_extra_space, 0, 1
)
unit_misc = pynutil.insert("/") + pynutil.delete("pro") + delete_space + graph_unit_singular
unit = (
pynutil.insert("units: \"")
+ (unit | unit_misc | pynutil.add_weight(unit + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ itn_decimal_tagger.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
subgraph_fraction = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ pynutil.insert("integer_part: \"")
+ itn_fraction_tagger.graph
+ pynutil.insert("\" }")
+ delete_extra_space
+ unit
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
final_graph = subgraph_cardinal | subgraph_decimal | subgraph_fraction
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
convert_space,
delete_space,
)
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. ein halb -> tokens { name: "1/2" }
e.g. ein ein halb -> tokens { name: "1 1/2" }
e.g. drei zwei ein hundertstel -> tokens { name: "3 2/100" }
Args:
itn_cardinal_tagger: ITN cardinal tagger
tn_fraction_verbalizer: TN fraction verbalizer
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_fraction_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
tagger = tn_fraction_verbalizer.graph.invert().optimize()
delete_optional_sign = pynini.closure(pynutil.delete("negative: ") + pynini.cross("\"true\" ", "-"), 0, 1)
delete_integer_marker = (
pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph_no_exception
delete_numerator_marker = (
pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph_no_exception
delete_denominator_marker = (
pynutil.insert('/')
+ (pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\""))
@ itn_cardinal_tagger.graph_no_exception
)
graph = (
pynini.closure(delete_integer_marker + pynini.accep(" "), 0, 1)
+ delete_numerator_marker
+ delete_space
+ delete_denominator_marker
).optimize()
verbalizer = delete_optional_sign + graph
self.graph = tagger @ verbalizer
graph = pynutil.insert("name: \"") + convert_space(self.graph) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space, insert_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
null vier eins eins eins zwei drei vier eins zwei drei vier -> tokens { name: "(0411) 1234-1234" }
Args:
tn_cardinal_tagger: TN Cardinal Tagger
"""
def __init__(self, tn_cardinal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="telephone", kind="classify", deterministic=deterministic)
separator = pynini.accep(" ") # between components
digit = pynini.union(*list(map(str, range(1, 10)))) @ tn_cardinal_tagger.two_digit_non_zero
zero = pynini.cross("0", "null")
number_part = (
pynutil.delete("(")
+ zero
+ insert_space
+ pynini.closure(digit + insert_space, 2, 2)
+ digit
+ pynutil.delete(")")
+ separator
+ pynini.closure(digit + insert_space, 3, 3)
+ digit
+ pynutil.delete("-")
+ insert_space
+ pynini.closure(digit + insert_space, 3, 3)
+ digit
)
graph = convert_space(pynini.invert(number_part))
final_graph = pynutil.insert("name: \"") + graph + pynutil.insert("\"")
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
e.g. dreizehnter -> tokens { name: "13." }
Args:
itn_cardinal_tagger: ITN Cardinal Tagger
tn_ordinal_verbalizer: TN Ordinal Verbalizer
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_ordinal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify", deterministic=deterministic)
tagger = tn_ordinal_verbalizer.graph.invert().optimize()
graph = (
pynutil.delete("integer: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph
final_graph = tagger @ graph + pynutil.insert(".")
graph = pynutil.insert("name: \"") + final_graph + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. misses -> tokens { name: "Mrs." }
Args:
tn_whitelist_tagger: TN whitelist tagger
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/en/data/whitelist.tsv
"""
def __init__(self, tn_whitelist_tagger: GraphFst, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.invert(tn_whitelist_tagger.graph)
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/whitelist.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.de.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.de.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.de.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.de.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.de.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.de.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.de.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.de.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.de.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst
from nemo_text_processing.text_normalization.de.taggers.cardinal import CardinalFst as TNCardinalTagger
from nemo_text_processing.text_normalization.de.taggers.date import DateFst as TNDateTagger
from nemo_text_processing.text_normalization.de.taggers.decimal import DecimalFst as TNDecimalTagger
from nemo_text_processing.text_normalization.de.taggers.electronic import ElectronicFst as TNElectronicTagger
from nemo_text_processing.text_normalization.de.taggers.whitelist import WhiteListFst as TNWhitelistTagger
from nemo_text_processing.text_normalization.de.verbalizers.date import DateFst as TNDateVerbalizer
from nemo_text_processing.text_normalization.de.verbalizers.electronic import ElectronicFst as TNElectronicVerbalizer
from nemo_text_processing.text_normalization.de.verbalizers.fraction import FractionFst as TNFractionVerbalizer
from nemo_text_processing.text_normalization.de.verbalizers.ordinal import OrdinalFst as TNOrdinalVerbalizer
from nemo_text_processing.text_normalization.de.verbalizers.time import TimeFst as TNTimeVerbalizer
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
deterministic: bool = True,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != 'None':
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"de_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
tn_cardinal_tagger = TNCardinalTagger(deterministic=False)
tn_date_tagger = TNDateTagger(cardinal=tn_cardinal_tagger, deterministic=False)
tn_decimal_tagger = TNDecimalTagger(cardinal=tn_cardinal_tagger, deterministic=False)
tn_ordinal_verbalizer = TNOrdinalVerbalizer(deterministic=False)
tn_fraction_verbalizer = TNFractionVerbalizer(ordinal=tn_ordinal_verbalizer, deterministic=False)
tn_time_verbalizer = TNTimeVerbalizer(cardinal_tagger=tn_cardinal_tagger, deterministic=False)
tn_date_verbalizer = TNDateVerbalizer(ordinal=tn_ordinal_verbalizer, deterministic=False)
tn_electronic_tagger = TNElectronicTagger(deterministic=False)
tn_electronic_verbalizer = TNElectronicVerbalizer(deterministic=False)
tn_whitelist_tagger = TNWhitelistTagger(input_case="cased", deterministic=False, input_file=whitelist)
cardinal = CardinalFst(tn_cardinal_tagger=tn_cardinal_tagger)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(itn_cardinal_tagger=cardinal, tn_ordinal_verbalizer=tn_ordinal_verbalizer)
ordinal_graph = ordinal.fst
decimal = DecimalFst(itn_cardinal_tagger=cardinal, tn_decimal_tagger=tn_decimal_tagger)
decimal_graph = decimal.fst
fraction = FractionFst(itn_cardinal_tagger=cardinal, tn_fraction_verbalizer=tn_fraction_verbalizer)
fraction_graph = fraction.fst
measure_graph = MeasureFst(
itn_cardinal_tagger=cardinal, itn_decimal_tagger=decimal, itn_fraction_tagger=fraction
).fst
date_graph = DateFst(
itn_cardinal_tagger=cardinal, tn_date_verbalizer=tn_date_verbalizer, tn_date_tagger=tn_date_tagger
).fst
word_graph = WordFst().fst
time_graph = TimeFst(tn_time_verbalizer=tn_time_verbalizer).fst
money_graph = MoneyFst(itn_cardinal_tagger=cardinal, itn_decimal_tagger=decimal).fst
whitelist_graph = WhiteListFst(tn_whitelist_tagger=tn_whitelist_tagger).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst(
tn_electronic_tagger=tn_electronic_tagger, tn_electronic_verbalizer=tn_electronic_verbalizer
).fst
telephone_graph = TelephoneFst(tn_cardinal_tagger=tn_cardinal_tagger).fst
classify = (
pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(whitelist_graph, 1.0)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.1)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.taggers.decimal import get_quantity, quantities
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. minus elf komma zwei null null sechs billionen -> decimal { negative: "true" integer_part: "11" fractional_part: "2006" quantity: "billionen" }
e.g. eine billion -> decimal { integer_part: "1" quantity: "billion" }
Args:
itn_cardinal_tagger: ITN Cardinal tagger
tn_decimal_tagger: TN decimal tagger
"""
def __init__(self, itn_cardinal_tagger: GraphFst, tn_decimal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
self.graph = tn_decimal_tagger.graph.invert().optimize()
delete_point = pynutil.delete(" komma")
allow_spelling = pynini.cdrewrite(pynini.cross("eine ", "eins ") + quantities, "[BOS]", "[EOS]", NEMO_SIGMA)
graph_fractional = pynutil.insert("fractional_part: \"") + self.graph + pynutil.insert("\"")
graph_integer = (
pynutil.insert("integer_part: \"") + itn_cardinal_tagger.graph_no_exception + pynutil.insert("\"")
)
final_graph_wo_sign = graph_integer + delete_point + pynini.accep(" ") + graph_fractional
self.final_graph_wo_negative = (
allow_spelling
@ (
final_graph_wo_sign
| get_quantity(
final_graph_wo_sign, itn_cardinal_tagger.graph_hundred_component_at_least_one_none_zero_digit
)
).optimize()
)
final_graph = itn_cardinal_tagger.optional_minus_graph + self.final_graph_wo_negative
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.de.taggers.money import maj_singular, min_plural, min_singular
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. elf euro und vier cent -> money { integer_part: "11" fractional_part: 04 currency: "€" }
Args:
itn_cardinal_tagger: ITN Cardinal Tagger
itn_decimal_tagger: ITN Decimal Tagger
"""
def __init__(self, itn_cardinal_tagger: GraphFst, itn_decimal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = (
pynini.cdrewrite(pynini.cross(pynini.union("ein", "eine"), "eins"), "[BOS]", "[EOS]", NEMO_SIGMA)
@ itn_cardinal_tagger.graph_no_exception
)
graph_decimal_final = itn_decimal_tagger.final_graph_wo_negative
graph_unit = pynini.invert(maj_singular)
graph_unit = pynutil.insert("currency: \"") + convert_space(graph_unit) + pynutil.insert("\"")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
min_unit = pynini.project(min_singular | min_plural, "output")
# elf euro (und) vier cent, vier cent
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ cardinal_graph @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.delete(min_unit)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure(pynutil.delete("und") + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
# elf euro vierzig, only after integer
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("fractional_part: \"")
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit
graph_decimal |= pynutil.insert("currency: \"€\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals. Numbers below ten are not converted.
Allows both compound numeral strings or separated by whitespace.
"und" (en: "and") can be inserted between "hundert" and following number or "tausend" and following single or double digit number.
e.g. minus drei und zwanzig -> cardinal { negative: "-" integer: "23" } }
e.g. minus dreiundzwanzig -> cardinal { integer: "23" } }
e.g. dreizehn -> cardinal { integer: "13" } }
e.g. ein hundert -> cardinal { integer: "100" } }
e.g. einhundert -> cardinal { integer: "100" } }
e.g. ein tausend -> cardinal { integer: "1000" } }
e.g. eintausend -> cardinal { integer: "1000" } }
e.g. ein tausend zwanzig -> cardinal { integer: "1020" } }
Args:
tn_cardinal_tagger: TN cardinal tagger
"""
def __init__(self, tn_cardinal_tagger: GraphFst, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
# add_space_between_chars = pynini.cdrewrite(pynini.closure(insert_space, 0, 1), NEMO_CHAR, NEMO_CHAR, NEMO_SIGMA)
optional_delete_space = pynini.closure(NEMO_SIGMA | pynutil.delete(" "))
graph = (tn_cardinal_tagger.graph @ optional_delete_space).invert().optimize()
self.graph_hundred_component_at_least_one_none_zero_digit = (
(tn_cardinal_tagger.graph_hundred_component_at_least_one_none_zero_digit @ optional_delete_space)
.invert()
.optimize()
)
self.graph_ties = (tn_cardinal_tagger.two_digit_non_zero @ optional_delete_space).invert().optimize()
# this is to make sure if there is an ambiguity with decimal, decimal is chosen, e.g. 1000000 vs. 1 million
graph = pynutil.add_weight(graph, weight=0.001)
self.graph_no_exception = graph
self.digit = pynini.arcmap(tn_cardinal_tagger.digit, map_type="rmweight").invert().optimize()
graph_exception = pynini.project(self.digit, 'input')
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
self.optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("minus ", "\"-\" "), 0, 1
)
final_graph = self.optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: email addresses, etc.
e.g. c d f eins at a b c punkt e d u -> tokens { name: "cdf1.abc.edu" }
Args:
tn_electronic_tagger: TN eletronic tagger
tn_electronic_verbalizer: TN eletronic verbalizer
"""
def __init__(self, tn_electronic_tagger: GraphFst, tn_electronic_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
tagger = pynini.invert(tn_electronic_verbalizer.graph).optimize()
verbalizer = pynini.invert(tn_electronic_tagger.graph).optimize()
final_graph = tagger @ verbalizer
graph = pynutil.insert("name: \"") + final_graph + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
NEMO_SIGMA,
GraphFst,
convert_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, in the form of (day) month (year) or year
e.g. vierundzwanzigster juli zwei tausend dreizehn -> tokens { name: "24. Jul. 2013" }
e.g. neunzehnachtzig -> tokens { name: "1980" }
e.g. vierzehnter januar -> tokens { name: "14. Jan." }
e.g. zweiter dritter -> tokens { name: "02.03." }
e.g. januar neunzehnachtzig -> tokens { name: "Jan. 1980" }
e.g. zwanzigzwanzig -> tokens { name: "2020" }
Args:
itn_cardinal_tagger: ITN cardinal tagger
tn_date_tagger: TN date tagger
tn_date_verbalizer: TN date verbalizer
"""
def __init__(
self,
itn_cardinal_tagger: GraphFst,
tn_date_tagger: GraphFst,
tn_date_verbalizer: GraphFst,
deterministic: bool = True,
):
super().__init__(name="date", kind="classify", deterministic=deterministic)
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
optional_delete_space = pynini.closure(NEMO_SIGMA | pynutil.delete(" ", weight=0.0001))
tagger = tn_date_verbalizer.graph.invert().optimize()
delete_day_marker = (
pynutil.delete("day: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ itn_cardinal_tagger.graph_no_exception
month_as_number = pynutil.delete("month: \"") + itn_cardinal_tagger.graph_no_exception + pynutil.delete("\"")
month_as_string = pynutil.delete("month: \"") + tn_date_tagger.month_abbr.invert() + pynutil.delete("\"")
convert_year = (tn_date_tagger.year @ optional_delete_space).invert().optimize()
delete_year_marker = (
pynutil.delete("year: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
) @ convert_year
# day. month as string (year)
verbalizer = (
pynini.closure(delete_day_marker + pynutil.insert(".") + pynini.accep(" "), 0, 1)
+ month_as_string
+ pynini.closure(pynini.accep(" ") + delete_year_marker, 0, 1)
)
# day. month as number (year)
verbalizer |= (
delete_day_marker @ add_leading_zero_to_double_digit
+ pynutil.insert(".")
+ pynutil.delete(" ")
+ month_as_number @ add_leading_zero_to_double_digit
+ pynutil.insert(".")
+ pynini.closure(pynutil.delete(" ") + delete_year_marker, 0, 1)
)
# year
verbalizer |= delete_year_marker
final_graph = tagger @ verbalizer
graph = pynutil.insert("name: \"") + convert_space(final_graph) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, NEMO_DIGIT, GraphFst, delete_space
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "8" minutes: "30" zone: "e s t" } -> 08:30 Uhr est
time { hours: "8" } -> 8 Uhr
time { hours: "8" minutes: "30" seconds: "10" } -> 08:30:10 Uhr
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="time", kind="verbalize", deterministic=deterministic)
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
hour = pynutil.delete("hours: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
minute = pynutil.delete("minutes: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
second = pynutil.delete("seconds: \"") + pynini.closure(NEMO_DIGIT, 1) + pynutil.delete("\"")
zone = (
pynutil.delete("zone: \"") + pynini.closure(NEMO_ALPHA + delete_space) + NEMO_ALPHA + pynutil.delete("\"")
)
optional_zone = pynini.closure(pynini.accep(" ") + zone, 0, 1)
graph = (
delete_space
+ pynutil.insert(":")
+ (minute @ add_leading_zero_to_double_digit)
+ pynini.closure(delete_space + pynutil.insert(":") + (second @ add_leading_zero_to_double_digit), 0, 1)
+ pynutil.insert(" Uhr")
+ optional_zone
)
graph_h = hour + pynutil.insert(" Uhr") + optional_zone
graph_hm = hour @ add_leading_zero_to_double_digit + graph
graph_hms = hour @ add_leading_zero_to_double_digit + graph
final_graph = graph_hm | graph_hms | graph_h
self.fst = self.delete_tokens(final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { negative: "true" integer: "12" } units: "kg" } -> -12 kg
measure { decimal { integer_part: "1/2" } units: "kg" } -> 1/2 kg
measure { decimal { integer_part: "1" fractional_part: "2" quantity: "million" } units: "kg" } -> 1,2 million kg
Args:
decimal: ITN Decimal verbalizer
cardinal: ITN Cardinal verbalizer
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert(" ") + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.de.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.de.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.de.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.de.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.de.verbalizers.time import TimeFst
from nemo_text_processing.text_normalization.de.verbalizers.cardinal import CardinalFst as TNCardinalVerbalizer
from nemo_text_processing.text_normalization.de.verbalizers.decimal import DecimalFst as TNDecimalVerbalizer
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
tn_cardinal_verbalizer = TNCardinalVerbalizer(deterministic=False)
tn_decimal_verbalizer = TNDecimalVerbalizer(deterministic=False)
cardinal = CardinalFst(tn_cardinal_verbalizer=tn_cardinal_verbalizer)
cardinal_graph = cardinal.fst
decimal = DecimalFst(tn_decimal_verbalizer=tn_decimal_verbalizer)
decimal_graph = decimal.fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
graph = time_graph | money_graph | measure_graph | decimal_graph | cardinal_graph
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.de.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "jetzt" } tokens { name: "ist" } tokens { time { hours: "12" minutes: "30" } } -> jetzt ist 12:30 Uhr
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_preserve_order
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
Args:
tn_decimal_verbalizer: TN decimal verbalizer
"""
def __init__(self, tn_decimal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="decimal", kind="verbalize", deterministic=deterministic)
delete_space = pynutil.delete(" ")
optional_sign = pynini.closure(
pynutil.delete("negative: \"") + NEMO_NOT_QUOTE + pynutil.delete("\"") + delete_space, 0, 1
)
optional_integer = pynini.closure(tn_decimal_verbalizer.integer, 0, 1)
optional_fractional = pynini.closure(
delete_space + pynutil.insert(",") + tn_decimal_verbalizer.fractional_default, 0, 1
)
graph = (optional_integer + optional_fractional + tn_decimal_verbalizer.optional_quantity).optimize()
self.numbers = optional_sign + graph
graph = self.numbers + delete_preserve_order
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" fractional_part: "05" currency: "$" } -> $12.05
Args:
decimal: ITN Decimal verbalizer
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = unit + delete_space + decimal.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
Args:
tn_cardinal_verbalizer: TN cardinal verbalizer
"""
def __init__(self, tn_cardinal_verbalizer: GraphFst, deterministic: bool = True):
super().__init__(name="cardinal", kind="verbalize", deterministic=deterministic)
self.numbers = tn_cardinal_verbalizer.numbers
optional_sign = pynini.closure(pynutil.delete("negative: \"") + NEMO_NOT_QUOTE + pynutil.delete("\" "), 0, 1)
graph = optional_sign + self.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/de/verbalizers/cardinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
def int_to_roman(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Alters given fst to convert Arabic numerals into Roman integers (lower cased). Valid for values up to 3999.
e.g.
"5" -> "v"
"treinta y uno" -> "xxxi"
Args:
fst: Any fst. Composes fst onto Roman conversion outputs.
"""
def _load_roman(file: str):
roman_numerals = pynini.string_file(get_abs_path(file))
return pynini.invert(roman_numerals)
digit = _load_roman("data/roman/digit.tsv")
ties = _load_roman("data/roman/ties.tsv")
hundreds = _load_roman("data/roman/hundreds.tsv")
thousands = _load_roman("data/roman/thousands.tsv")
graph = (
digit
| ties + (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
| (
hundreds
+ (ties | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
)
| (
thousands
+ (hundreds | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (ties | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
)
).optimize()
return fst @ graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/graph_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.es.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.cardinal import CardinalFst as EnCardinalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.date import DateFst as EnDateFst
from nemo_text_processing.inverse_text_normalization.en.taggers.decimal import DecimalFst as EnDecimalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.electronic import ElectronicFst as EnElectronicFst
from nemo_text_processing.inverse_text_normalization.en.taggers.measure import MeasureFst as EnMeasureFst
from nemo_text_processing.inverse_text_normalization.en.taggers.money import MoneyFst as EnMoneyFst
from nemo_text_processing.inverse_text_normalization.en.taggers.ordinal import OrdinalFst as EnOrdinalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst as EnPunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.telephone import TelephoneFst as EnTelephoneFst
from nemo_text_processing.inverse_text_normalization.en.taggers.time import TimeFst as EnTimeFst
from nemo_text_processing.inverse_text_normalization.en.taggers.whitelist import WhiteListFst as EnWhiteListFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst as EnWordFst
from nemo_text_processing.inverse_text_normalization.es.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.es.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.es.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.es.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.es.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.es.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.es.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.es.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.es.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.es.taggers.word import WordFst
from nemo_text_processing.inverse_text_normalization.es_en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
en_whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if whitelist is None:
whitelist = get_abs_path("data/es_whitelist.tsv")
if en_whitelist is None:
en_whitelist = get_abs_path("data/en_whitelist.tsv")
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"es_en_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
fraction = FractionFst(cardinal, ordinal)
fraction_graph = fraction.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst
date_graph = DateFst(cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
en_cardinal = EnCardinalFst(input_case=input_case)
en_cardinal_graph = en_cardinal.fst
en_ordinal = EnOrdinalFst(cardinal=en_cardinal, input_case=input_case)
en_ordinal_graph = en_ordinal.fst
en_decimal = EnDecimalFst(cardinal=en_cardinal, input_case=input_case)
en_decimal_graph = en_decimal.fst
en_measure_graph = EnMeasureFst(cardinal=en_cardinal, decimal=en_decimal, input_case=input_case).fst
en_date_graph = EnDateFst(ordinal=en_ordinal, input_case=input_case).fst
en_word_graph = EnWordFst().fst
en_time_graph = EnTimeFst(input_case=input_case).fst
en_money_graph = EnMoneyFst(cardinal=en_cardinal, decimal=en_decimal, input_case=input_case).fst
en_whitelist_graph = EnWhiteListFst(input_file=en_whitelist, input_case=input_case).fst
en_punct_graph = EnPunctuationFst().fst
en_electronic_graph = EnElectronicFst(input_case=input_case).fst
en_telephone_graph = EnTelephoneFst(cardinal=en_cardinal, input_case=input_case).fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(en_whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(en_time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(en_date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.09)
| pynutil.add_weight(en_decimal_graph, 1.09)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(measure_graph, 1.6)
| pynutil.add_weight(en_measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.6)
| pynutil.add_weight(en_cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.6)
| pynutil.add_weight(en_ordinal_graph, 1.09)
| pynutil.add_weight(money_graph, 1.6)
| pynutil.add_weight(en_money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.6)
| pynutil.add_weight(en_telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.6)
| pynutil.add_weight(en_electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
| pynutil.add_weight(en_word_graph, 120)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
en_punct = (
pynutil.insert("tokens { ") + pynutil.add_weight(en_punct_graph, weight=1.3) + pynutil.insert(" }")
)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" "))
+ token
+ pynini.closure(pynutil.insert(" ") + punct | en_punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/taggers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.en.verbalizers.cardinal import CardinalFst as EnCardinalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.date import DateFst as EnDateFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.decimal import DecimalFst as EnDecimalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.electronic import ElectronicFst as EnElectronicFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.measure import MeasureFst as EnMeasureFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.money import MoneyFst as EnMoneyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.ordinal import OrdinalFst as EnOrdinalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.telephone import TelephoneFst as EnTelephoneFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.time import TimeFst as EnTimeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.whitelist import WhiteListFst as EnWhiteListFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
ordinal_graph = OrdinalFst().fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
en_cardinal = EnCardinalFst()
en_cardinal_graph = en_cardinal.fst
en_ordinal_graph = EnOrdinalFst().fst
en_decimal = EnDecimalFst()
en_decimal_graph = en_decimal.fst
en_measure_graph = EnMeasureFst(decimal=en_decimal, cardinal=en_cardinal).fst
en_money_graph = EnMoneyFst(decimal=en_decimal).fst
en_date_graph = EnDateFst().fst
en_whitelist_graph = EnWhiteListFst().fst
en_telephone_graph = EnTelephoneFst().fst
en_electronic_graph = EnElectronicFst().fst
graph = (
time_graph
| date_graph
| pynutil.add_weight(en_date_graph, 1.1)
| money_graph
| pynutil.add_weight(en_money_graph, 1.1)
| fraction_graph
| measure_graph
| pynutil.add_weight(en_measure_graph, 1.1)
| ordinal_graph
| pynutil.add_weight(en_ordinal_graph, 1.1)
| decimal_graph
| pynutil.add_weight(en_decimal_graph, 1.1)
| cardinal_graph
| pynutil.add_weight(en_cardinal_graph, 1.1)
| whitelist_graph
| pynutil.add_weight(en_whitelist_graph, 1.1)
| telephone_graph
| pynutil.add_weight(en_telephone_graph, 1.1)
| electronic_graph
| pynutil.add_weight(en_electronic_graph, 1.1)
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/verbalizers/verbalize.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.verbalizers.word import WordFst
from nemo_text_processing.inverse_text_normalization.es_en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/verbalizers/verbalize_final.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/verbalizers/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es_en/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import string
from pathlib import Path
from typing import Dict
import pynini
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini import Far
from pynini.examples import plurals
from pynini.export import export
from pynini.lib import byte, pynutil, utf8
NEMO_CHAR = utf8.VALID_UTF8_CHAR
NEMO_DIGIT = byte.DIGIT
NEMO_LOWER = pynini.union(*string.ascii_lowercase).optimize()
NEMO_UPPER = pynini.union(*string.ascii_uppercase).optimize()
NEMO_ALPHA = pynini.union(NEMO_LOWER, NEMO_UPPER).optimize()
NEMO_ALNUM = pynini.union(NEMO_DIGIT, NEMO_ALPHA).optimize()
NEMO_HEX = pynini.union(*string.hexdigits).optimize()
NEMO_NON_BREAKING_SPACE = u"\u00A0"
NEMO_SPACE = " "
NEMO_WHITE_SPACE = pynini.union(" ", "\t", "\n", "\r", u"\u00A0").optimize()
NEMO_NOT_SPACE = pynini.difference(NEMO_CHAR, NEMO_WHITE_SPACE).optimize()
NEMO_NOT_QUOTE = pynini.difference(NEMO_CHAR, r'"').optimize()
NEMO_PUNCT = pynini.union(*map(pynini.escape, string.punctuation)).optimize()
NEMO_GRAPH = pynini.union(NEMO_ALNUM, NEMO_PUNCT).optimize()
NEMO_SIGMA = pynini.closure(NEMO_CHAR)
delete_space = pynutil.delete(pynini.closure(NEMO_WHITE_SPACE))
insert_space = pynutil.insert(" ")
delete_extra_space = pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 1), " ")
# French frequently compounds numbers with hyphen.
delete_hyphen = pynutil.delete(pynini.closure("-", 0, 1))
insert_hyphen = pynutil.insert("-")
suppletive = pynini.string_file(get_abs_path("data/suppletive.tsv"))
_s = NEMO_SIGMA + pynutil.insert("s")
_x = NEMO_SIGMA + pynini.string_map([("eau"), ("eu"), ("ou")]) + pynutil.insert("x")
_aux = NEMO_SIGMA + pynini.string_map([("al", "aux"), ("ail", "aux")])
graph_plural = plurals._priority_union(
suppletive, plurals._priority_union(_s, pynini.union(_x, _aux), NEMO_SIGMA), NEMO_SIGMA
).optimize()
SINGULAR_TO_PLURAL = graph_plural
PLURAL_TO_SINGULAR = pynini.invert(graph_plural)
TO_LOWER = pynini.union(*[pynini.cross(x, y) for x, y in zip(string.ascii_uppercase, string.ascii_lowercase)])
TO_UPPER = pynini.invert(TO_LOWER)
def generator_main(file_name: str, graphs: Dict[str, pynini.FstLike]):
"""
Exports graph as OpenFst finite state archive (FAR) file with given file name and rule name.
Args:
file_name: exported file name
graphs: Mapping of a rule name and Pynini WFST graph to be exported
"""
exporter = export.Exporter(file_name)
for rule, graph in graphs.items():
exporter[rule] = graph.optimize()
exporter.close()
logging.info(f'Created {file_name}')
def get_plurals(fst):
"""
Given singular returns plurals
Args:
fst: Fst
Returns plurals to given singular forms
"""
return SINGULAR_TO_PLURAL @ fst
def get_singulars(fst):
"""
Given plural returns singulars
Args:
fst: Fst
Returns singulars to given plural forms
"""
return PLURAL_TO_SINGULAR @ fst
def convert_space(fst) -> 'pynini.FstLike':
"""
Converts space to nonbreaking space.
Used only in tagger grammars for transducing token values within quotes, e.g. name: "hello kitty"
This is making transducer significantly slower, so only use when there could be potential spaces within quotes, otherwise leave it.
Args:
fst: input fst
Returns output fst where breaking spaces are converted to non breaking spaces
"""
return fst @ pynini.cdrewrite(pynini.cross(NEMO_SPACE, NEMO_NON_BREAKING_SPACE), "", "", NEMO_SIGMA)
class GraphFst:
"""
Base class for all grammar fsts.
Args:
name: name of grammar class
kind: either 'classify' or 'verbalize'
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, name: str, kind: str, deterministic: bool = True):
self.name = name
self.kind = kind
self._fst = None
self.deterministic = deterministic
self.far_path = Path(os.path.dirname(__file__) + '/grammars/' + kind + '/' + name + '.far')
if self.far_exist():
self._fst = Far(self.far_path, mode="r", arc_type="standard", far_type="default").get_fst()
def far_exist(self) -> bool:
"""
Returns true if FAR can be loaded
"""
return self.far_path.exists()
@property
def fst(self) -> 'pynini.FstLike':
return self._fst
@fst.setter
def fst(self, fst):
self._fst = fst
def add_tokens(self, fst) -> 'pynini.FstLike':
"""
Wraps class name around to given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
return pynutil.insert(f"{self.name} {{ ") + fst + pynutil.insert(" }")
def delete_tokens(self, fst) -> 'pynini.FstLike':
"""
Deletes class name wrap around output of given fst
Args:
fst: input fst
Returns:
Fst: fst
"""
res = (
pynutil.delete(f"{self.name}")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ fst
+ delete_space
+ pynutil.delete("}")
)
return res @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/graph_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.fr.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst, delete_space
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. huit heures -> time { hours: "8" minutes: "00" }
e.g. treize heures -> time { hours: "13" minutes: "00" }
e.g. treize heures dix -> time { hours: "13" minutes: "10" }
e.g. huit heures du matin -> time { hours: "8" minutes: "00" suffix: "avant mid"}
e.g. huite heures du après midi -> time { hours: "8" minutes: "00" suffix: "après-midi"}
e.g. douze heures moins qart -> time { hours: "11" minutes: "45" }
e.g. douze heures et qart -> time { hours: "12" minutes: "15" }
e.g. midi et qart -> time { hours: "12" minutes: "15" }
e.g. minuit et medi -> time { hours: "0" minutes: "30" }
e.g. douze heures moins medi -> time { hours: "11" minutes: "30" }
e.g. douze heures moins trois -> time { hours: "11" minutes: "57" }
"""
def __init__(self):
super().__init__(name="time", kind="classify")
# hours, minutes, seconds, suffix, zone, style, speak_period
# time_zone = pynini.invert(pynini.string_file(get_abs_path("data/time/time_zone.tsv")))
graph_hours_to = pynini.string_file(get_abs_path("data/time/hours_to.tsv"))
graph_minutes_to = pynini.string_file(get_abs_path("data/time/minutes_to.tsv"))
graph_hours = pynini.string_file(get_abs_path("data/time/hours.tsv"))
graph_minutes = pynini.string_file(get_abs_path("data/time/minutes.tsv"))
graph_suffix_am = pynini.string_file(get_abs_path("data/time/time_suffix_am.tsv"))
graph_suffix_pm = pynini.string_file(get_abs_path("data/time/time_suffix_pm.tsv"))
graph_suffix = pynini.cross(graph_suffix_am, "am") | pynini.cross(graph_suffix_pm, "pm")
# Mapping 'heures'
graph_heures = pynini.accep("heure") + pynini.accep("s").ques
graph_heures = pynutil.delete(graph_heures)
graph_hours += delete_space + graph_heures
# Midi and minuit
graph_midi = pynini.cross("midi", "12")
graph_minuit = pynini.cross("minuit", "0")
# Mapping 'et demi' and 'et qart'
graph_et = pynutil.delete("et") + delete_space
graph_demi = pynini.accep("demi")
graph_demi += pynini.accep("e").ques # people vary on feminine or masculine form
graph_demi = pynini.cross(graph_demi, "30")
graph_quart = pynini.accep('quart')
graph_quart = pynini.accep("le ").ques + graph_quart # sometimes used
graph_quart = pynini.cross(graph_quart, '15')
graph_trois_quart = pynini.cross("trois quarts", "45")
graph_fractions = pynini.union(graph_demi, graph_quart, graph_trois_quart)
graph_et_fractions = graph_et + graph_fractions
# Hours component is usually just a cardinal + 'heures' (ignored in case of 'midi/minuit').
graph_hours_component = pynini.union(graph_hours, graph_midi, graph_minuit)
graph_hours_component = pynutil.insert("hours: \"") + graph_hours_component + pynutil.insert("\"")
graph_hours_component += delete_space
# Minutes component
graph_minutes_component = (
pynutil.insert(" minutes: \"") + pynini.union(graph_minutes, graph_et_fractions) + pynutil.insert("\"")
)
# Hour and minutes together. For 'demi' and 'qart', 'et' is used as a conjunction.
graph_time_standard = graph_hours_component + graph_minutes_component.ques
# For time until hour. "quatre heures moins qart" -> 4 h 00 - 0 h 15 = 3 h 45
graph_moins = pynutil.delete("moins")
graph_moins += delete_space
graph_hours_to_component = graph_hours | graph_midi | graph_minuit
graph_hours_to_component @= graph_hours_to
graph_hours_to_component = pynutil.insert("hours: \"") + graph_hours_to_component + pynutil.insert("\"")
graph_hours_to_component += delete_space
graph_minutes_to_component = pynini.union(graph_minutes, graph_fractions)
graph_minutes_to_component @= graph_minutes_to
graph_minutes_to_component = pynutil.insert(" minutes: \"") + graph_minutes_to_component + pynutil.insert("\"")
graph_time_to = graph_hours_to_component + graph_moins + graph_minutes_to_component
graph_time_no_suffix = graph_time_standard | graph_time_to
graph_suffix_component = pynutil.insert(" suffix: \"") + graph_suffix + pynutil.insert("\"")
graph_suffix_component = delete_space + graph_suffix_component
graph_suffix_component = graph_suffix_component.ques
final_graph = graph_time_no_suffix + graph_suffix_component
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
get_singulars,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure. Allows for plural form for unit.
e.g. moins onze kilogramme -> measure { negative: "true" cardinal { integer: "11" } units: "kg" }
e.g. trois heures -> measure { cardinal { integer: "3" } units: "h" }
e.g. demi gramme -> measure { fraction { numerator: "1" denominator: "2" } units: "g" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
fraction: FractionFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst):
super().__init__(name="measure", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_prefix = pynini.string_file(get_abs_path("data/measurements/magnitudes.tsv"))
graph_unit_singular = pynini.string_file(get_abs_path("data/measurements/measurements.tsv"))
unit = get_singulars(graph_unit_singular) | graph_unit_singular
unit = graph_prefix.ques + unit
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("moins", "\"true\"") + delete_extra_space, 0, 1
)
unit_misc = pynutil.insert("/") + (pynutil.delete("par") | pynutil.delete("à")) + delete_space + unit
unit = (
pynutil.insert("units: \"")
+ (unit | unit_misc | pynutil.add_weight(unit + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
subgraph_fraction = (
pynutil.insert("fraction { ")
+ optional_graph_negative
+ fraction.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit
)
final_graph = subgraph_decimal | subgraph_cardinal | subgraph_fraction
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_CHAR,
GraphFst,
delete_extra_space,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
e.g. demi -> tokens { fraction { numerator: "1" denominator: "2" } }
e.g. un et demi -> tokens { fraction { integer_part: "1" numerator: "1" denominator: "2" } }
e.g. trois et deux centième -> tokens { fraction { integer_part: "3" numerator: "2" denominator: "100" } }
Args:
cardinal: OrdinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="fraction", kind="classify")
# integer_part # numerator # denominator
graph_cardinal = cardinal.graph_no_exception
graph_strip_undo_root_change = pynini.string_file(get_abs_path("data/fractions.tsv")) # add in absolute path
graph_strip_no_root_change = pynutil.delete("ième") # For no change to root
graph_strip_no_root_change += pynutil.delete("s").ques # for plurals
graph_strip = graph_strip_no_root_change | graph_strip_undo_root_change
self.fractional = ((pynini.closure(NEMO_CHAR) + graph_strip) @ graph_cardinal).optimize()
integer = pynutil.insert("integer_part: \"") + graph_cardinal + pynutil.insert("\" ")
integer += delete_space
integer += pynutil.delete("et") # used to demarcate integer and fractional parts
numerator = pynutil.insert("numerator: \"") + graph_cardinal + pynutil.insert("\"")
denominator = pynutil.insert(" denominator: \"") + self.fractional + pynutil.insert("\"")
# Demi (half) can occur alone without explicit numerator.
graph_demi_component = pynutil.delete("demi") + pynutil.delete("e").ques + pynutil.delete("s").ques
graph_demi_component += pynutil.insert("numerator: \"1\" denominator: \"2\"")
graph_fraction_component = numerator + delete_space + denominator
graph_fraction_component |= graph_demi_component
self.graph_fraction_component = graph_fraction_component
graph = pynini.closure(integer + delete_space, 0, 1) + graph_fraction_component
graph = graph.optimize()
self.final_graph_wo_negative = graph
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("moins", "\"true\"") + delete_extra_space, 0, 1
)
graph = optional_graph_negative + graph
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
GraphFst,
delete_hyphen,
delete_space,
insert_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers. Assumes conventional grouping for Metropolitan France (and overseas departments)
(two number sequences are grouped as individual cardinals) or digit by digit (chiffre-par-chiffre) e.g.
"zero un quatre-vingt-deux zero deux vingt-deux cinquante" -> { number_part: "01 42 02 22 50" }
"zero un quatre deux zero deux deux deux cinq zero" -> { number_part: "01 42 02 22 50" }
In cases where only one digit of the first pairing is admitted, assumes that the 0 was skipped.
"une vingt-trois quatre-vingt zero six dix-sept" -> { number_part: "01 23 40 06 17" }
"""
def __init__(self):
super().__init__(name="telephone", kind="classify")
# create `single_digits` and `double_digits` graphs as these will be
# the building blocks of possible telephone numbers
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_ties_unique = pynini.string_file((get_abs_path("data/numbers/ties_unique.tsv")))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
double_digits = pynini.union(
graph_teen,
graph_ties_unique,
(graph_ties + pynutil.insert("0")),
(graph_ties + delete_hyphen + graph_digit),
)
# accept `double zéro` -> `00`
single_digits = graph_digit | graph_zero
digit_words = pynini.union(graph_digit.optimize(), graph_zero).invert()
doubled_digit = pynini.union(
*[
pynini.cross(
pynini.project(str(i) @ digit_words, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_words, "output"),
pynutil.insert("double ") + pynini.project(str(i) @ digit_words, "output"),
)
for i in range(10)
]
)
doubled_digit.invert()
digit_twice = single_digits + pynutil.delete(" ") + single_digits
doubled_digit @= digit_twice
graph_first_pair = graph_zero + delete_space + graph_digit
graph_first_pair |= pynutil.insert("0") + graph_digit # if zero is omitted
graph_first_pair |= doubled_digit
graph_first_pair += (
delete_space + insert_space
) # delete_space since closure allows possible gaps to be removed
# All digits
graph_pair_all_digits = single_digits + delete_space
graph_pair_all_digits += single_digits
graph_pair_all_digits |= doubled_digit
graph_all_digits = pynini.closure(graph_pair_all_digits + delete_space + insert_space, 3, 3)
graph_all_digits = graph_first_pair + graph_all_digits + graph_pair_all_digits
# Paired digits
graph_pair_digits_and_ties = double_digits | graph_pair_all_digits
graph_digits_and_ties = pynini.closure(graph_pair_digits_and_ties + delete_space + insert_space, 3, 3)
graph_digits_and_ties = graph_first_pair + graph_digits_and_ties + graph_pair_digits_and_ties
number_part = pynini.union(graph_all_digits, graph_digits_and_ties)
number_part = pynutil.insert("number_part: \"") + number_part + pynutil.insert("\"")
graph = number_part
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_SIGMA, GraphFst, delete_space
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
vingt-deuxième -> ordinal { integer: "22" morphosyntactic_features: "e" }
Also notes specific nouns that have unique normalization conventions.
For instance, 'siècles' are rendered in roman numerals when given an ordinal adjective.
e.g. dix-neuvième siècle -> XIXe
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="ordinal", kind="classify")
graph_cardinal = cardinal.graph_no_exception
graph_undo_root_change = pynini.string_file(
get_abs_path("data/ordinals/digits_root_change.tsv")
) # Returns base number to normal after root change.
graph_firsts = pynini.string_file(get_abs_path("data/ordinals/firsts.tsv"))
graph_second = pynini.string_file(get_abs_path("data/ordinals/second.tsv"))
graph_special_ordinals = pynini.string_file(get_abs_path("data/ordinals/key_nouns.tsv"))
# Removes morpheme
graph_no_root_change = pynutil.delete("ième") # For no change to root
graph_strip_morpheme = pynini.union(graph_no_root_change, graph_undo_root_change)
graph_strip_morpheme = NEMO_SIGMA + graph_strip_morpheme
graph_integer_component = graph_strip_morpheme @ graph_cardinal
graph_morpheme_component = pynutil.insert("e") # Put the superscript in.
graph_morpheme_component += pynini.accep("s").ques # In case of plurals.
# Concatenate with cardinal graph.
graph_ordinal = pynutil.insert("integer: \"") + graph_integer_component + pynutil.insert("\"")
graph_ordinal += (
pynutil.insert(" morphosyntactic_features: \"") + graph_morpheme_component
) # Leave open in case further morphems occur
# Primer has a different subscript depending on gender, need to take note if
# 'premier' or 'première'
graph_firsts = pynutil.insert("integer: \"1\" morphosyntactic_features: \"") + graph_firsts
# Second used 'd' as a superscript.
graph_second = pynutil.insert("integer: \"2\" morphosyntactic_features: \"") + graph_second
graph = graph_firsts | graph_second | graph_ordinal
# For roman numerals. Carries over designation to verbalizer
graph_special_ordinals = pynutil.insert("/") + delete_space + graph_special_ordinals
graph += graph_special_ordinals.ques + pynutil.insert("\"")
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst, convert_space
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. misses -> tokens { name: "mrs." }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/fr/data/whitelist.tsv
"""
def __init__(self, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv"))
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.inverse_text_normalization.fr.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.fr.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import INPUT_LOWER_CASED
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"fr_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
fraction = FractionFst(cardinal)
fraction_graph = fraction.fst
ordinal = OrdinalFst(cardinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst
date_graph = DateFst(cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal, decimal).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.05)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.08)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(money_graph, 1.07)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~"
guillemets = "\u00AB" + "\u00BB" # quotation marks in French.
s += guillemets
punct = pynini.union(*s)
graph = pynutil.insert("name: \"") + punct + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/punctuation.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_hyphen,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_thousand: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. one million -> integer_part: "1" quantity: "million"
e.g. one point five million -> integer_part: "1" fractional_part: "5" quantity: "million"
Will tag cases up to denominations of tens of hundreds of thousand. 'douze cent mille millions' -> 1 200 000 millions
Args:
decimal: decimal FST
cardinal_up_to_million: cardinal FST
"""
numbers = cardinal_up_to_thousand @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union(
"million",
"millions",
"milliard",
"milliards",
"billion",
"billions",
"billiard",
"billiards",
"trillion",
"trillions",
"trilliard",
"trilliards",
)
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ (
pynini.union(delete_hyphen, delete_extra_space)
) # Can be written either as 'deux-millions' or 'deux millions' depending on whether it registers as a noun or part of cardinal.
+ pynutil.insert(" quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert(" quantity: \"") + suffix + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
Decimal point is "," (virgule).
e.g. moins un virgule deux six -> decimal { negative: "true" integer_part: "1" fractional_part: "26" }
This decimal rule assumes that decimals can be pronounced as:
(a cardinal) + ('virgule') plus (any sequence of cardinals <1 million, including 'zero')
Also writes large numbers in shortened form, e.g.
e.g. un virgule deux-six-million -> decimal { negative: "false" integer_part: "1" fractional_part: "26" quantity: "million" }
e.g. deux-million -> decimal { negative: "false" integer_part: "2" quantity: "millions" }
e.g. moins cent-vingt-quatre-millions -> decimal { negative: "true" integer_part: "124" quantity: "millions" }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
# number after decimal point can be any series of cardinals <1 million, including 'zero'
graph_decimal = cardinal.numbers_up_to_million
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
# decimal point is denote by virgule
graph_fractional_separator = pynutil.delete("virgule")
# Possible negatives
optional_graph_negative = pynutil.insert("negative: ") + pynini.cross("moins", "\"true\"") + delete_extra_space
optional_graph_negative = optional_graph_negative.ques
# Fractional portion
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
# Integers
cardinal_graph = cardinal.graph_no_exception | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
# Final graphs
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1)
+ graph_fractional_separator
+ delete_extra_space
+ graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.graph_hundreds_component_at_least_one_none_zero_digit
)
final_graph |= optional_graph_negative + get_quantity(
final_graph_wo_sign, cardinal.graph_hundreds_component_at_least_one_none_zero_digit
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. douze euro cinq -> money { integer_part: "12" currency: "€" fractional_part: 05}
e.g. zéro euro cinq -> money { integer_part: "0" currency: "€" fractional_part: 05}
e.g. cinq centimes -> money { integer_part: "0" currency: "€" fractional_part: 05}
Note, the currency symbol seems more common for exact amounts and quantities less than 'un million'
For 'round' quantities of >=million (milliard, billion), the symbol is dropped. This allows
use of the 'de' preposition.
e.g. cinq millions d'euros -> money { integer_part: "5" currency: "d'euros" fractional_part: 00}
e.g. un milliard d'euro -> money { integer_part: "5" currency: "d'euro" fractional_part: 00}
e.g. trois virgule trois millions d'euros -> money { integer_part: "3" currency: "d'euros" fractional_part: 3}
Currency is included for uniform tagging.
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency
# quantities
cardinal_graph = cardinal.graph_no_exception
graph_decimal = decimal.final_graph_wo_negative
# Converts currency names to symbols
convert_currency_major = pynini.string_file(
get_abs_path("data/money/currency_major.tsv")
) # major denominations
convert_currency_minor = pynini.string_file(
get_abs_path("data/money/currency_minor.tsv")
) # minor denominations to major symbol. (e.g. 5 cents -> 0.05 $ )
accept_all_currency = (convert_currency_major | convert_currency_minor).project(
"input"
) # recognizes all currencies
# Graphs for large round amounts ('deux billiards d'euros', 'un milliard de dollars')
graph_de = pynini.union("de ", "des ", "d'") # the use of de/d'only occurs with round amounts
graph_currency_component_large_round_amounts = graph_de + accept_all_currency
graph_currency_component_large_round_amounts = (
pynutil.insert(" currency: \"") + graph_currency_component_large_round_amounts + pynutil.insert("\"")
)
graph_money_large_round_amounts = (
graph_decimal + delete_space
) # graph_decimal includes tags and quantities already
graph_money_large_round_amounts += graph_currency_component_large_round_amounts
# For standard currency
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# Graphs integer denomination for large denominations (e.g. $)
graph_integer_component_major = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
graph_integer_component_major += delete_space
graph_currency_component_major = (
pynutil.insert(" currency: \"") + convert_currency_major + pynutil.insert("\"")
)
graph_decimal_component_major = (
delete_space
+ pynutil.insert(" fractional_part: \"")
+ (cardinal_graph @ add_leading_zero_to_double_digit)
+ pynutil.insert("\"")
)
# Rare cases where 'et' will separate major and minor denominations.
delete_minor_currency = pynini.project(convert_currency_minor, "input")
delete_minor_currency = delete_extra_space + pynutil.delete(delete_minor_currency)
delete_et = delete_extra_space + pynutil.delete("et")
graph_money_major = (
graph_integer_component_major
+ graph_currency_component_major
+ delete_et.ques
+ graph_decimal_component_major.ques
+ delete_minor_currency.ques
)
# For cases when only small denominations are used.
graph_integer_component_minor = pynutil.insert("integer_part: \"0\"")
graph_decimal_component_minor = (
pynutil.insert(" fractional_part: \"")
+ (cardinal_graph @ add_leading_zero_to_double_digit)
+ pynutil.insert("\"")
)
graph_decimal_component_minor += delete_extra_space
graph_currency_component_minor = (
pynutil.insert(" currency: \"") + convert_currency_minor + pynutil.insert("\"")
)
graph_money_minor = (
graph_integer_component_minor + graph_decimal_component_minor + graph_currency_component_minor
)
graph_money_standard_amounts = graph_money_major | graph_money_minor
final_graph = graph_money_large_round_amounts | graph_money_standard_amounts
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_hyphen,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
def rewrite(cardinal: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Function to rewrite cardinals written in traditional orthograph (no '-' for numbers >100)
to current orthography ('-' between all words in number string)
e.g. deux mille cent vingt-trois -> deux-mille-cent-vingt-trois.
In cases where original orthography is current, or string is mixture of two orthographies,
will render invalid form that will not pass through CardinalFst
e.g. deux-mille cent-vingt-trois -> "deux##vingt-trois" ('#' is not accepted in cardinal FST and will fail to convert.)
e.g. deux
Args:
cardinal: cardinal FST
"""
# Traditional orthography does not hyphenate numbers > 100, this will insert hyphens in
# those contexts.
targets = pynini.string_map(
[
"et", # for 'et un/onze'
"cent",
"mille",
"million",
"milliard",
"billion",
"billiard",
"trillion",
"trilliard",
]
)
targets += pynini.accep("s").ques
no_spaces = pynini.closure(NEMO_NOT_SPACE)
# Valid numbers in reformed orthography will have no spaces.
new_orthography_sigma = no_spaces
# Old orthography will not have these strings. Replacing with character to mark.
targets_for_filtering = ("-" + targets) | ("-" + targets + "-") | (targets + "-")
filter = pynini.cdrewrite(pynini.cross(targets_for_filtering, "#"), "", "", NEMO_SIGMA) # Invalid for cardinal
old_orthography_sigma = pynini.difference(NEMO_CHAR, "#") # Marked character removed from sigma_star.
old_orthography_sigma.closure()
# Only accept strings that occur in old orthography. (This avoids tying two non-related numbers together.)
# e.g. mille cent-une -> mille-cent-une
filter @= old_orthography_sigma
# Now know replacements will only work around targets
replace_left = pynini.cdrewrite(pynini.cross(" ", "-"), "", targets, NEMO_SIGMA)
replace_right = pynini.cdrewrite(pynini.cross(" ", "-"), targets, "", NEMO_SIGMA)
replace = replace_left @ replace_right
graph = new_orthography_sigma | (filter @ replace)
return graph @ cardinal
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. mois vingt-trois -> cardinal { negative: "-" integer: "23"}
This class converts cardinals up to (but not including) "un-quatrillion",
i.e up to "one septillion" in English (10^{24}).
Cardinals below nine are not converted (in order to avoid
"j'ai un pomme." --> "j'ai 1 pomme" and any other odd conversions.)
This transducer accomodates both traditional hyphenation of numbers ('-' for most numbers <100)
and current hyphenation (all elements of number are hyphenated), prioritizing the latter.
e.g cent cinquante et un -> cardinal { integer: "151"}
cent-cinquante-et-un -> cardinal { integer: "151"}
This is done through a context dependent rewrite that attempts to map old spelling to new.
e.g. cent cinquante et un -> cent-cinquante-et-un
"""
def __init__(self):
super().__init__(name="cardinal", kind="classify")
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_teens = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_ties_unique = pynini.string_file(get_abs_path("data/numbers/ties_unique.tsv"))
# Tens components
graph_tens_component = graph_ties + ((delete_hyphen + graph_digit) | pynutil.insert("0"))
graph_tens_component = pynini.union(graph_tens_component, graph_teens, graph_ties_unique)
graph_tens_component_with_leading_zeros = pynini.union(
graph_tens_component, (pynutil.insert("0") + (graph_digit | pynutil.insert("0", weight=0.01)))
)
# Hundreds components
graph_cent_singular = pynutil.delete("cent") # Used in hundreds place
graph_cent_plural = pynini.cross(
"cents", "00"
) # Only used as terminus of hundred sequence. deux cents -> 200, deux cent un -> 201
graph_digit_no_one = pynini.project(pynini.union("un", "une"), 'input')
graph_digit_no_one = (pynini.project(graph_digit, "input") - graph_digit_no_one.arcsort()) @ graph_digit
graph_hundreds_component_singular = (
graph_digit_no_one + delete_hyphen + graph_cent_singular
) # Regular way: [1-9] * 100
graph_hundreds_component_singular = pynini.union(graph_hundreds_component_singular, pynini.cross("cent", "1"))
graph_hundreds_component_singular += delete_hyphen
graph_hundreds_component_singular += graph_tens_component_with_leading_zeros
graph_hundreds_component_plural = graph_digit_no_one + delete_hyphen + graph_cent_plural
graph_hundreds_component = pynini.union(
graph_hundreds_component_singular,
graph_hundreds_component_plural,
pynutil.insert("0") + graph_tens_component_with_leading_zeros,
)
graph_hundreds_component_at_least_one_none_zero_digit = graph_hundreds_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
self.graph_hundreds_component_at_least_one_none_zero_digit = rewrite(
graph_hundreds_component_at_least_one_none_zero_digit
).optimize()
# Graph thousands (we'll need this for cases of mille millions, mille milliards...)
graph_tens_of_hundreds_component_singular = (
graph_tens_component + delete_hyphen + graph_cent_singular
) # Tens of hundreds. e.g. 1900 = nineteen hundred/ 'dix neuf cents"
graph_tens_of_hundreds_component_singular += delete_hyphen + graph_tens_component_with_leading_zeros
graph_tens_of_hundreds_component_plural = graph_tens_component + delete_hyphen + graph_cent_plural
graph_tens_of_hundred_component = (
graph_tens_of_hundreds_component_plural | graph_tens_of_hundreds_component_singular
)
graph_thousands = pynini.union(
graph_hundreds_component_at_least_one_none_zero_digit + delete_hyphen + pynutil.delete("mille"),
pynutil.insert("001") + pynutil.delete("mille"), # because 'mille', not 'un mille'
pynutil.insert("000", weight=0.1),
)
# All other large amounts
graph_millions = pynini.union(
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("million") | pynutil.delete("millions")),
pynutil.insert("000", weight=0.1),
)
graph_milliards = pynini.union( # French for English 'billion'
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("milliard") | pynutil.delete("milliards")),
pynutil.insert("000", weight=0.1),
)
graph_billions = pynini.union( # NOTE: this is English 'trillion.'
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("billions") | pynutil.delete("billion")),
pynutil.insert("000", weight=0.1),
)
graph_mille_billion = pynini.union(
graph_hundreds_component_at_least_one_none_zero_digit + delete_hyphen + pynutil.delete("mille"),
pynutil.insert("001") + pynutil.delete("mille"), # because we say 'mille', not 'un mille'
)
graph_mille_billion += delete_hyphen + (
graph_millions | pynutil.insert("000") + pynutil.delete("billions")
) # allow for 'mil millones'
graph_mille_billion |= pynutil.insert("000000", weight=0.1)
graph_billiards = pynini.union(
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("billiards") | pynutil.delete("billiard")),
pynutil.insert("000", weight=0.1),
)
graph_trillions = pynini.union( # One thousand English trillions.
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("trillions") | pynutil.delete("trillion")),
pynutil.insert("000", weight=0.1),
)
graph_trilliards = pynini.union(
graph_hundreds_component_at_least_one_none_zero_digit
+ delete_hyphen
+ (pynutil.delete("trilliards") | pynutil.delete("trilliard")),
pynutil.insert("000", weight=0.1),
)
graph = pynini.union(
graph_trilliards
+ delete_hyphen
+ graph_trillions
+ delete_hyphen
+ graph_billiards
+ delete_hyphen
+ graph_billions
+ delete_hyphen
+ graph_milliards
+ delete_hyphen
+ graph_millions
+ delete_hyphen
+ graph_thousands
+ delete_hyphen
+ graph_hundreds_component,
graph_tens_of_hundred_component,
graph_zero,
)
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT), "0"
)
graph = rewrite(graph)
self.graph_no_exception = graph.optimize()
# save self.numbers_up_to_thousand for use in DecimalFst
digits_up_to_thousand = NEMO_DIGIT | (NEMO_DIGIT ** 2) | (NEMO_DIGIT ** 3)
numbers_up_to_thousand = pynini.compose(graph, digits_up_to_thousand).optimize()
self.numbers_up_to_thousand = numbers_up_to_thousand
# save self.numbers_up_to_million for use in DecimalFst
digits_up_to_million = (
NEMO_DIGIT
| (NEMO_DIGIT ** 2)
| (NEMO_DIGIT ** 3)
| (NEMO_DIGIT ** 4)
| (NEMO_DIGIT ** 5)
| (NEMO_DIGIT ** 6)
)
numbers_up_to_million = pynini.compose(graph, digits_up_to_million).optimize()
self.numbers_up_to_million = numbers_up_to_million
# don't convert cardinals from zero to nine inclusive
graph_exception = pynini.project(pynini.union(graph_digit, graph_zero), 'input')
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("moins", "\"-\"") + NEMO_SPACE, 0, 1
)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_ALPHA, GraphFst, insert_space
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying 'electronic' semiotic classes, i.e.
email address (which get converted to "username" and "domain" fields),
and URLS (which get converted to a "protocol" field).
e.g. c d f une arobase a b c point e d u -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
e.g. double vé double vé double vé a b c point e d u -> tokens { electronic { protocol: "www.abc.edu" } }
"""
def __init__(self):
super().__init__(name="electronic", kind="classify")
delete_extra_space = pynutil.delete(" ")
alpha_num = (
NEMO_ALPHA
| pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
| pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
)
symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv"))
ampersand = pynini.string_map([("arobase"), ("chez"), ("at"), ("à")])
accepted_username = alpha_num | symbols
process_dot = pynini.cross("point", ".")
username = (
pynutil.insert("username: \"")
+ alpha_num
+ delete_extra_space
+ pynini.closure(accepted_username + delete_extra_space)
+ alpha_num
+ pynutil.insert("\"")
)
single_alphanum = pynini.closure(alpha_num + delete_extra_space) + alpha_num
server = single_alphanum | pynini.string_file(get_abs_path("data/electronic/server_name.tsv"))
domain = single_alphanum | pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
domain_graph = (
pynutil.insert("domain: \"")
+ server
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ domain
+ pynutil.insert("\"")
)
graph = (
username
+ delete_extra_space
+ pynutil.delete(ampersand)
+ insert_space
+ delete_extra_space
+ domain_graph
)
############# url ###
protocol_end = pynini.cross(pynini.union("www", "w w w", "double vé double vé double vé"), "www")
protocol_start = pynini.cross(pynini.union("http", "h t t p", "ache té té pé"), "http")
protocol_start |= pynini.cross(pynini.union("https", "h t t p s", "ache té té pé esse"), "https")
protocol_start += pynini.cross(
pynini.union(
" deux-points barre oblique barre oblique ",
" deux-points barre barre ",
" deux-points double barre ",
" deux-points slash slash ",
),
"://",
)
# e.g. .com, .es
ending = (
delete_extra_space
+ symbols
+ delete_extra_space
+ (domain | pynini.closure(accepted_username + delete_extra_space) + accepted_username)
)
protocol = (
pynini.closure(protocol_start, 0, 1)
+ protocol_end
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ (pynini.closure(delete_extra_space + accepted_username, 1) | server)
+ pynini.closure(ending, 1)
)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
graph |= protocol
########
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst, delete_extra_space
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, in the form of (day) month (year) or year
e.g. le vingt-quatre juillet deux-mille-treize -> date { day: "24" month: "juli" year: "2013" preserve_order: true }
e.g. le vingt-quatre juillet deux-mille-treize -> date { day: "24" month: "juli" year: "2013" preserve_order: true }
e.g. le premier janvier -> date { day: "1" month: "janvier" preserve_order: true }
Also will convert colloquialism of spelling in which tens of hundreds are used to express date. (e.g. nineteen hundred and four)
e.g. le vingt mais dix-neuf-cent-quatre -> date { day: "20" month: "mais" year: "1904" preserve_order: true }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="date", kind="classify")
self.cardinal = cardinal.graph_no_exception
year_graph = self.cardinal
month_graph = pynini.string_file(get_abs_path("data/months.tsv"))
month_graph = pynutil.insert("month: \"") + month_graph + pynutil.insert("\"")
day_graph = self.cardinal | pynini.cross("premier", "1") # Premier is only ordinal used for dates
day_graph = pynutil.insert("day: \"") + day_graph + pynutil.insert("\"")
optional_graph_year = pynini.closure(
delete_extra_space + pynutil.insert("year: \"") + year_graph + pynutil.insert("\""), 0, 1,
)
graph_dmy = day_graph + delete_extra_space + month_graph + optional_graph_year
final_graph = graph_dmy
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/taggers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "8" minutes: "30" suffix: "du matin"} -> 8 h 30
time { hours: "8" minutes: "30" } -> 8 h 30
time { hours: "8" minutes: "30" suffix: "du soir"} -> 20 h 30
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
hour_to_night = pynini.string_file(get_abs_path("data/time/hour_to_night.tsv"))
day_suffixes = pynutil.delete("suffix: \"am\"")
night_suffixes = pynutil.delete("suffix: \"pm\"")
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1, 2)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_extra_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1, 2)
+ pynutil.delete("\"")
)
graph = hour + delete_extra_space + pynutil.insert("h") + minute.ques + delete_space + day_suffixes.ques
graph |= (
hour @ hour_to_night
+ delete_extra_space
+ pynutil.insert("h")
+ minute.ques
+ delete_space
+ night_suffixes
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { negative: "true" cardinal { integer: "12" } units: "kg" } -> -12 kg
Args:
decimal: DecimalFst
cardinal: CardinalFst
fraction: FractionFst
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst):
super().__init__(name="measure", kind="verbalize")
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers @ decimal.group_by_threes # measurements most obey three by three spacing
+ delete_space
+ pynutil.delete("}")
)
graph_fraction = (
pynutil.delete("fraction {")
+ delete_space
+ optional_sign
+ delete_space
+ fraction.numbers
+ delete_space
+ pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal | graph_fraction) + delete_space + pynutil.insert(" ") + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. fraction { integer_part: "1" numerator: "2" denominator: "3" } } -> 1 2/3
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ insert_space
)
numerator = pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
denominator = (
pynutil.insert('/')
+ pynutil.delete("denominator: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = (pynini.closure(integer + delete_space, 0, 1) + numerator + delete_space + denominator).optimize()
self.numbers = graph
delete_tokens = self.delete_tokens(optional_sign + graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "02 33 43 53 22" }
-> 02 33 43 53 22
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(number_part)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_DIGIT,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
)
from nemo_text_processing.inverse_text_normalization.fr.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "13" morphosyntactic_features: "e" } -> 13ᵉ
Given 'special' terms for ordinals (e.g. siècle), renders
amount in conventional format. e.g.
ordinal { integer: "13" morphosyntactic_features: "e/siècle" } -> XIIIᵉ
"""
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph_integer = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
replace_suffix = pynini.union(
pynini.cross("e", "ᵉ"), # only delete first quote since there may be more features
pynini.cross("d", "ᵈ"),
pynini.cross("r", "ʳ"),
pynini.cross("s", "ˢ"),
)
replace_suffix = pynutil.delete(" morphosyntactic_features: \"") + replace_suffix.plus
graph_arabic = graph_integer + replace_suffix.plus
# For roman.
graph_roman_digits = pynini.string_file(get_abs_path("data/roman/digits_large.tsv")).invert()
graph_roman_ties = pynini.string_file(get_abs_path("data/roman/ties_large.tsv")).invert()
graph_roman_hundreds = pynini.string_file(get_abs_path("data/roman/hundreds_large.tsv")).invert()
graph_roman_zero_digit = pynutil.delete("0")
graph_roman_hundreds = NEMO_DIGIT ** 3 @ (
graph_roman_hundreds
+ pynini.union(graph_roman_ties, graph_roman_zero_digit)
+ pynini.union(graph_roman_digits, graph_roman_zero_digit)
)
graph_roman_ties = NEMO_DIGIT ** 2 @ (
graph_roman_ties + pynini.union(graph_roman_digits, graph_roman_zero_digit)
)
graph_roman_digits = NEMO_DIGIT @ graph_roman_digits
graph_roman_integers = graph_roman_hundreds | graph_roman_ties | graph_roman_digits
graph_roman = (graph_integer @ graph_roman_integers) + replace_suffix
graph_roman += pynini.cross("/", " ") + "siècle"
graph = (graph_roman | graph_arabic) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
graph = (
time_graph
| date_graph
| money_graph
| measure_graph
| fraction_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
| telephone_graph
| electronic_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "mrs." } -> mrs.
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.fr.verbalizers.word import WordFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_DIGIT,
NEMO_NON_BREAKING_SPACE,
NEMO_NOT_QUOTE,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class NumberParser(GraphFst):
"""
Finite state transducer for parsing strings of digis. Breaks up digit strings into groups of three for
strings of digits of four or more (inclusive). Groupings are separated by non-breaking space.
e.g. '1000' -> '1 000'
e.g. '1000,33333' -> '1 000,333 33
"""
def __init__(self):
super().__init__(name="parser", kind="verbalize")
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
# Need parser to group digits by threes
exactly_three_digits = NEMO_DIGIT ** 3
at_most_three_digits = pynini.closure(NEMO_DIGIT, 1, 3)
space_every_three_integer = (
at_most_three_digits + (pynutil.insert(NEMO_NON_BREAKING_SPACE) + exactly_three_digits).closure()
)
space_every_three_decimal = (
pynini.accep(",")
+ (exactly_three_digits + pynutil.insert(NEMO_NON_BREAKING_SPACE)).closure()
+ at_most_three_digits
)
group_by_threes = space_every_three_integer | space_every_three_decimal
self.group_by_threes = group_by_threes
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
integer = integer @ group_by_threes
optional_integer = pynini.closure(integer + delete_space, 0, 1)
fractional = (
pynutil.insert(",")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
fractional = fractional @ group_by_threes
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = (optional_integer + optional_fractional + optional_quantity).optimize()
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" fractional_part: "05" currency: "$" } -> 12.05 $
Args:
decimal: DecimalFst
"""
def __init__(self, decimal: GraphFst):
super().__init__(name="money", kind="verbalize")
unit = (
pynutil.delete("currency:")
+ delete_extra_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = decimal.numbers + delete_space + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { negative: "-" integer: "23" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> [email protected]
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = user_name + delete_space + pynutil.insert("@") + domain
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "1" month: "janvier" preserve_order: true } -> 1 de enero
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
convert_primer = pynini.cross('1', '1ᵉʳ')
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete("\"")
+ (
pynini.closure(NEMO_NOT_QUOTE, 1) | pynutil.add_weight(convert_primer, -1)
) # first of the month is ordinal
+ pynutil.delete("\"")
)
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# day month
graph_dm = day + delete_extra_space + month
graph_dmy = graph_dm + delete_extra_space + year
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space
)
final_graph = (graph_dm | graph_dmy) + delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.fr.graph_utils import (
NEMO_CHAR,
NEMO_SIGMA,
GraphFst,
delete_space,
)
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/numbers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/ordinals/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/time/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/money/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/roman/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/fr/data/measurements/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.es.utils import get_abs_path
from pynini.lib import pynutil
def int_to_roman(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Alters given fst to convert Arabic numerals into Roman integers (lower cased). Valid for values up to 3999.
e.g.
"5" -> "v"
"treinta y uno" -> "xxxi"
Args:
fst: Any fst. Composes fst onto Roman conversion outputs.
"""
def _load_roman(file: str):
roman_numerals = pynini.string_file(get_abs_path(file))
return pynini.invert(roman_numerals)
digit = _load_roman("data/roman/digit.tsv")
ties = _load_roman("data/roman/ties.tsv")
hundreds = _load_roman("data/roman/hundreds.tsv")
thousands = _load_roman("data/roman/thousands.tsv")
graph = (
digit
| ties + (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
| (
hundreds
+ (ties | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
)
| (
thousands
+ (hundreds | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (ties | pynutil.add_weight(pynutil.delete("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.delete("0"), 0.01))
)
).optimize()
return fst @ graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/graph_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.es.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
Time formats that it converts:
- <hour> + <minutes>
e.g. la una diez -> time { hours: "la 1" minutes: "10" }
- <hour> + " y " + <minutes>
e.g. la una y diez -> time { hours: "la 1" minutes: "10" }
- <hour> + " con " + <minutes>
e.g. la una con diez -> time { hours: "la 1" minutes: "10" }
- <hour> + " menos " + <minutes>
e.g. las dos menos cuarto -> time { hours: "la 1" minutes: "45" }
- "(un) cuarto para " + <hour>
e.g. cuarto para las dos -> time { minutes: "45" hours: "la 1" }
Note that times on the hour (e.g. "las dos" i.e. "two o'clock") do not get
converted into a time format. This is to avoid converting phrases that are
not part of a time phrase (e.g. "las dos personas" i.e. "the two people")
e.g. las dos -> tokens { name: "las" } tokens { name: "dos" }
However, if a time on the hour is followed by a suffix (indicating 'a.m.'
or 'p.m.'), it will be converted.
e.g. las dos pe eme -> time { hours: "las 2" minutes: "00" suffix: "p.m." }
In the same way, times without a preceding article are not converted. This is
to avoid converting ranges or complex fractions
e.g. dos y media -> tokens { name: "dos" } tokens { name: "y" } tokens { name: "media" }
However, if a time without an article is followed by a suffix (indicating 'a.m.'
or 'p.m.'), it will be converted.
e.g. dos y media p m -> time { hours: "2" minutes: "30" suffix: "p.m." }
Note that although the TimeFst verbalizer can accept 'zone' (timezone) fields,
so far the rules have not been added to the TimeFst tagger to process
timezones (to keep the rules simple, and because timezones are not very
often specified in Spanish.)
"""
def __init__(self):
super().__init__(name="time", kind="classify")
suffix_graph = pynini.string_file(get_abs_path("data/time/time_suffix.tsv"))
time_to_graph = pynini.string_file(get_abs_path("data/time/time_to.tsv"))
minutes_to_graph = pynini.string_file(get_abs_path("data/time/minutes_to.tsv"))
time_zones = pynini.string_file(get_abs_path("data/time/time_zone.tsv"))
time_zones = pynini.invert(time_zones)
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_1_to_100 = pynini.union(
graph_digit,
graph_twenties,
graph_teen,
(graph_ties + pynutil.insert("0")),
(graph_ties + pynutil.delete(" y ") + graph_digit),
)
# note that graph_hour will start from 2 hours
# "1 o'clock" will be treated differently because it
# is singular
digits_2_to_23 = [str(digits) for digits in range(2, 24)]
digits_1_to_59 = [str(digits) for digits in range(1, 60)]
graph_1oclock = pynini.cross("la una", "la 1")
graph_hour = pynini.cross("las ", "las ") + graph_1_to_100 @ pynini.union(*digits_2_to_23)
graph_minute = graph_1_to_100 @ pynini.union(*digits_1_to_59)
graph_minute_verbose = pynini.cross("media", "30") | pynini.cross("cuarto", "15")
final_graph_hour = pynutil.insert("hours: \"") + (graph_1oclock | graph_hour) + pynutil.insert("\"")
final_graph_minute = (
pynutil.insert("minutes: \"")
+ pynini.closure((pynutil.delete("y") | pynutil.delete("con")) + delete_space, 0, 1)
+ (graph_minute | graph_minute_verbose)
+ pynutil.insert("\"")
)
# g m t más tres -> las 2:00 p.m. gmt+3
digits_1_to_23 = [str(digits) for digits in range(1, 24)]
offset = graph_1_to_100 @ pynini.union(*digits_1_to_23)
sign = pynini.cross("más", "+") | pynini.cross("menos", "-")
full_offset = pynutil.delete(" ") + sign + pynutil.delete(" ") + offset
graph_offset = pynini.closure(full_offset, 0, 1)
graph_time_zones = pynini.accep(" ") + time_zones + graph_offset
time_zones_optional = pynini.closure(graph_time_zones, 0, 1)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(delete_space + insert_space + final_suffix, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert("zone: \"")
+ convert_space(time_zones_optional)
+ pynutil.insert("\""),
0,
1,
)
# las nueve a eme (only convert on-the-hour times if they are followed by a suffix)
graph_1oclock_with_suffix = pynini.closure(pynini.accep("la "), 0, 1) + pynini.cross("una", "1")
graph_hour_with_suffix = pynini.closure(pynini.accep("las "), 0, 1) + graph_1_to_100 @ pynini.union(
*digits_2_to_23
)
final_graph_hour_with_suffix = (
pynutil.insert("hours: \"") + (graph_1oclock_with_suffix | graph_hour_with_suffix) + pynutil.insert("\"")
)
graph_hsuffix = (
final_graph_hour_with_suffix
+ delete_extra_space
+ pynutil.insert("minutes: \"00\"")
+ insert_space
+ final_suffix
+ final_time_zone_optional
)
# las nueve y veinticinco
graph_hm = final_graph_hour + delete_extra_space + final_graph_minute
# nueve y veinticinco a m
graph_hm_suffix = (
final_graph_hour_with_suffix + delete_extra_space + final_graph_minute + delete_extra_space + final_suffix
)
# un cuarto para las cinco
graph_mh = (
pynutil.insert("minutes: \"")
+ minutes_to_graph
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("hours: \"")
+ time_to_graph
+ pynutil.insert("\"")
)
# las diez menos diez
graph_time_to = (
pynutil.insert("hours: \"")
+ time_to_graph
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("minutes: \"")
+ delete_space
+ pynutil.delete("menos")
+ delete_space
+ pynini.union(
pynini.cross("cinco", "55"),
pynini.cross("diez", "50"),
pynini.cross("cuarto", "45"),
pynini.cross("veinte", "40"),
pynini.cross("veinticinco", "30"),
)
+ pynutil.insert("\"")
)
final_graph = pynini.union(
(graph_hm | graph_mh | graph_time_to) + final_suffix_optional, graph_hsuffix, graph_hm_suffix
).optimize()
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure
e.g. menos doce kilogramos -> measure { cardinal { negative: "true" integer: "12" } units: "kg" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst):
super().__init__(name="measure", kind="classify")
cardinal_graph = cardinal.graph_no_exception
fraction_graph = fraction.proper_fractions_with_medio
math_symbols = pynini.string_file(get_abs_path("data/measures/math_symbols.tsv"))
equal_symbol = pynini.string_map([("es igual a", "="), ("igual a", "=")])
graph_unit_singular = pynini.string_file(get_abs_path("data/measures/measurements_singular.tsv"))
graph_unit_singular = pynini.invert(graph_unit_singular) # singular -> abbr
graph_unit_plural = pynini.string_file(get_abs_path("data/measures/measurements_plural.tsv"))
graph_unit_plural = pynini.invert(graph_unit_plural) # plural -> abbr
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
unit_singular = convert_space(graph_unit_singular)
unit_plural = convert_space(graph_unit_plural)
unit_misc = pynutil.insert("/") + pynutil.delete("por") + delete_space + convert_space(graph_unit_singular)
unit_singular = (
pynutil.insert("units: \"")
+ (unit_singular | unit_misc | pynutil.add_weight(unit_singular + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
unit_plural = (
pynutil.insert("units: \"")
+ (unit_plural | unit_misc | pynutil.add_weight(unit_plural + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_plural
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - "un" - "una" - "uno") @ cardinal_graph)
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_plural
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ (pynini.cross("un", "1") | pynini.cross("una", "1") | pynini.cross("uno", "1"))
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_singular
)
subgraph_fraction = (
((NEMO_SIGMA - "un") @ fraction_graph)
+ delete_extra_space
+ ((pynini.closure(pynutil.delete("de") + delete_space, 0, 1) + unit_singular) | unit_plural)
)
subgraph_fraction |= (
((pynini.accep("un") + NEMO_SIGMA) @ fraction_graph)
+ delete_extra_space
+ pynutil.delete("de")
+ delete_space
+ unit_singular
)
subgraph_fraction |= (
subgraph_cardinal + pynutil.delete(pynini.union(" y", " con")) + delete_extra_space + fraction_graph
)
subgraph_fraction |= pynutil.add_weight(
(fraction.fst | ((NEMO_SIGMA + pynini.cross("y medio", "y un medio")) @ fraction.fst))
+ delete_extra_space
+ unit_plural,
0.001,
)
math_long_side = (
cardinal_graph
+ delete_extra_space
+ pynini.closure(
math_symbols + delete_extra_space + cardinal_graph + pynini.closure(delete_extra_space, 0, 1), 1
)
)
math_short_side = cardinal_graph | NEMO_ALPHA
math_operation = math_long_side + equal_symbol + delete_extra_space + math_short_side
math_operation |= math_short_side + delete_extra_space + equal_symbol + delete_extra_space + math_long_side
subgraph_math_operation = (
pynutil.insert("units: \"math\" cardinal { integer: \"") + math_operation + pynutil.insert("\" }")
)
final_graph = subgraph_decimal | subgraph_cardinal | subgraph_fraction | subgraph_math_operation
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/measure.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, NEMO_SPACE, GraphFst
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fractions
e.g. dos quintos -> fraction { numerator: "2" denominator: "5" }
This class converts fractions with a denominator up to (and including)
"1/999".
Fractions with 4 as their denominator, read as "cuarto(s)", are not
converted because "room" is also "cuarto", which could cause issues like
"quiero reservar un cuarto" -> quiero reservar 1/2".
Fractions without a numerator are not converted either to prevent issues
like:
"estaba medio dormido" -> "estaba 1/2 dormido"
Args:
cardinal: CardinalFst
ordinal: OrdinalFst
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst):
super().__init__(name="fraction", kind="classify")
cardinal_graph = cardinal.graph_no_exception
ordinal_graph = ordinal.graph_masc_num_no_exception
numbers_read_as_ordinals = pynini.string_file(get_abs_path("data/fractions/numbers_read_as_ordinals.tsv"))
ordinal_replacements = pynini.string_file(get_abs_path("data/fractions/ordinal_exceptions.tsv"))
ordinal_replacements = pynini.invert(ordinal_replacements)
make_singular = pynini.closure(pynutil.delete("s"), 0, 1)
# process denominators read like ordinals
# e.g. "un quinto" -> fraction { numerator: "1" denominator: "5" }
# exclude cases that are ambiguous or read differently
ordinal_exceptions = pynini.project(pynini.union("segundo", "tercero", "cuarto"), 'input')
ordinal_graph |= ordinal_replacements @ ordinal_graph
ordinal_graph = (pynini.project(ordinal_graph, "input") - ordinal_exceptions.arcsort()) @ ordinal_graph
ordinal_numbers = (NEMO_SIGMA + make_singular) @ ordinal_graph
# process other denominators
# e.g. "dos dieciochoavo" -> fraction { numerator: "2" denominator: "18" }
restore_accents = pynini.string_map([('un', 'ún'), ('dos', 'dós'), ('tres', 'trés')])
restore_accents = NEMO_SIGMA + pynini.closure(pynutil.add_weight(restore_accents, -0.001), 0, 1)
extend_numbers = NEMO_SIGMA + pynini.closure(pynini.cross('i', "a y "), 0, 1) + NEMO_SIGMA
delete_endings = NEMO_SIGMA + (pynutil.delete("avo") | pynutil.delete("vo")) + make_singular
other_denominators = extend_numbers @ delete_endings @ restore_accents @ cardinal_graph
denominators = ordinal_numbers @ numbers_read_as_ordinals
denominators |= other_denominators
# process negative fractions
# e.g. "menos dos tercios" -> "fractions { negative: True numerator: "2" denominator: "3" }"
optional_negative_graph = pynini.closure(pynini.cross("menos", "negative: \"True\"") + NEMO_SPACE, 0, 1)
# process mixed fractions
# e.g. "dos y dos tercios" -> "fractions { integer_part: "2" numerator: "2" denominator: "3" }"
integer_part_graph = (
pynutil.insert("integer_part: \"")
+ cardinal_graph
+ pynutil.insert("\"")
+ pynutil.delete(pynini.union(" y", " con"))
)
optional_integer_part_graph = pynini.closure(integer_part_graph + NEMO_SPACE, 0, 1)
numerators_graph = pynutil.insert("numerator: \"") + cardinal_graph + pynutil.insert("\"")
denominators_graph = pynutil.insert("denominator: \"") + denominators + pynutil.insert("\"")
proper_fractions = numerators_graph + NEMO_SPACE + denominators_graph
proper_fractions_with_medio = proper_fractions | (
pynutil.insert("numerator: \"1\" ") + pynini.cross("medio", "denominator: \"2\"")
)
proper_fractions_with_medio = optional_negative_graph + proper_fractions_with_medio
self.proper_fractions_with_medio = self.add_tokens(proper_fractions_with_medio)
graph = (
optional_negative_graph + optional_integer_part_graph + numerators_graph + NEMO_SPACE + denominators_graph
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_space
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
uno dos tres uno dos tres cinco seis siete ocho -> { number_part: "123-123-5678" }.
If 10 digits are spoken, they are grouped as 3+3+4 (eg. 123-456-7890).
If 9 digits are spoken, they are grouped as 3+3+3 (eg. 123-456-789).
If 8 digits are spoken, they are grouped as 4+4 (eg. 1234-5678).
In Spanish, digits are generally spoken individually, or as 2-digit numbers,
eg. "one twenty three" = "123",
"twelve thirty four" = "1234".
(we ignore more complicated cases such as "three hundred and two" or "three nines").
"""
def __init__(self):
super().__init__(name="telephone", kind="classify")
# create `single_digits` and `double_digits` graphs as these will be
# the building blocks of possible telephone numbers
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
single_digits = graph_digit.optimize() | pynini.cross("cero", "0")
double_digits = pynini.union(
graph_twenties,
graph_teen,
(graph_ties + pynutil.insert("0")),
(graph_ties + delete_space + pynutil.delete("y") + delete_space + graph_digit),
)
# self.single_digits = single_digits
# self.double_digits = double_digits
digit_twice = single_digits + pynutil.delete(" ") + single_digits
digit_thrice = digit_twice + pynutil.delete(" ") + single_digits
# accept `doble cero` -> `00` and `triple ocho` -> `888`
digit_words = pynini.union(graph_digit.optimize(), pynini.cross("cero", "0")).invert()
doubled_digit = pynini.union(
*[
pynini.cross(
pynini.project(str(i) @ digit_words, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_words, "output"),
pynutil.insert("doble ") + pynini.project(str(i) @ digit_words, "output"),
)
for i in range(10)
]
)
doubled_digit.invert()
doubled_digit @= digit_twice
tripled_digit = pynini.union(
*[
pynini.cross(
pynini.project(str(i) @ digit_words, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_words, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_words, "output"),
pynutil.insert("triple ") + pynini.project(str(i) @ digit_words, "output"),
)
for i in range(10)
]
)
tripled_digit.invert()
tripled_digit @= digit_thrice
# Denormalized phone numbers are grouped in sets of 3 or 4 digits
group_of_two = pynini.union(doubled_digit, digit_twice, double_digits)
group_of_three = pynini.union(tripled_digit, single_digits + pynutil.delete(" ") + group_of_two,)
group_of_four = pynini.union(
group_of_two + pynutil.delete(" ") + group_of_two,
tripled_digit + pynutil.delete(" ") + single_digits,
single_digits + pynutil.delete(" ") + tripled_digit,
)
insert_separator = pynini.cross(" ", "-")
# 10-digit option
ten_digit_graph = group_of_three + insert_separator + group_of_three + insert_separator + group_of_four
# 9-digit option
nine_digit_graph = group_of_three + insert_separator + group_of_three + insert_separator + group_of_three
# 8-digit option
eight_digit_graph = group_of_four + insert_separator + group_of_four
# optionally denormalize country codes
optional_country_code = pynini.closure(
pynini.cross("más ", "+") + (single_digits | group_of_two | group_of_three) + insert_separator, 0, 1
)
# optionally denormalize extensions
optional_extension = pynini.closure(
pynini.cross(" extensión ", " ext. ") + (single_digits | group_of_two | group_of_three), 0, 1
)
number_part = (
optional_country_code
+ pynini.union(pynutil.add_weight(ten_digit_graph, -0.01), nine_digit_graph, eight_digit_graph)
+ optional_extension
)
number_part = pynutil.insert("number_part: \"") + number_part + pynutil.insert("\"")
graph = number_part
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
vigésimo primero -> ordinal { integer: "21" morphosyntactic_features: "o" }
This class converts ordinal up to "millesímo" (one thousandth) exclusive.
Cardinals below ten are not converted (in order to avoid
e.g. "primero hice ..." -> "1.º hice...", "segunda guerra mundial" -> "2.ª guerra mundial"
and any other odd conversions.)
This FST also records the ending of the ordinal (called "morphosyntactic_features"):
either "o", "a", or "er".
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="ordinal", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_digit = pynini.string_file(get_abs_path("data/ordinals/digit.tsv"))
graph_teens = pynini.string_file(get_abs_path("data/ordinals/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/ordinals/twenties.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/ordinals/ties.tsv"))
graph_hundreds = pynini.string_file(get_abs_path("data/ordinals/hundreds.tsv"))
full_graph_ties = graph_ties | (graph_ties + pynini.cross(" ", "y") + graph_digit)
ordinal_graph_union = pynini.union(graph_digit, graph_teens, graph_twenties, full_graph_ties, graph_hundreds,)
accept_o_endings = NEMO_SIGMA + pynini.accep("o")
accept_a_endings = NEMO_SIGMA + pynini.accep("a")
accept_er_endings = NEMO_SIGMA.closure() + pynini.accep("er")
ordinal_graph_o = accept_o_endings @ ordinal_graph_union
ordinal_graph_a = accept_a_endings @ ordinal_graph_union
ordinal_graph_er = accept_er_endings @ ordinal_graph_union
# 'optional_numbers_in_front' have negative weight so we always
# include them if they're there
optional_numbers_in_front = (pynutil.add_weight(ordinal_graph_union, -0.1) + delete_space.closure()).closure()
graph_o_suffix = (optional_numbers_in_front + ordinal_graph_o) @ cardinal_graph
graph_a_suffix = (optional_numbers_in_front + ordinal_graph_a) @ cardinal_graph
graph_er_suffix = (optional_numbers_in_front + ordinal_graph_er) @ cardinal_graph
self.graph_masc_num_no_exception = graph_o_suffix
# don't convert ordinals from one to nine inclusive
graph_exception = pynini.project(pynini.union(graph_digit), 'input')
graph_o_suffix = (pynini.project(graph_o_suffix, "input") - graph_exception.arcsort()) @ graph_o_suffix
graph_a_suffix = (pynini.project(graph_a_suffix, "input") - graph_exception.arcsort()) @ graph_a_suffix
graph_er_suffix = (pynini.project(graph_er_suffix, "input") - graph_exception.arcsort()) @ graph_er_suffix
graph = (
pynutil.insert("integer: \"")
+ graph_o_suffix
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"o\"")
)
graph |= (
pynutil.insert("integer: \"")
+ graph_a_suffix
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"a\"")
)
graph |= (
pynutil.insert("integer: \"")
+ graph_er_suffix
+ pynutil.insert("\"")
+ pynutil.insert(" morphosyntactic_features: \"er\"")
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. usted -> tokens { name: "ud." }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/es/data/whitelist.tsv
"""
def __init__(self, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file:
whitelist = pynini.string_file(input_file).invert()
else:
whitelist = pynini.string_file(get_abs_path("data/whitelist.tsv")).invert()
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.es.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.es.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.es.taggers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.es.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.es.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.es.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.es.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.es.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.es.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.es.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.es.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(
self,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
input_case: str = INPUT_LOWER_CASED,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"es_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal)
decimal_graph = decimal.fst
fraction = FractionFst(cardinal, ordinal)
fraction_graph = fraction.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction).fst
date_graph = DateFst(cardinal).fst
word_graph = WordFst().fst
time_graph = TimeFst().fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal).fst
whitelist_graph = WhiteListFst(input_file=whitelist).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst().fst
telephone_graph = TelephoneFst().fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.09)
| pynutil.add_weight(fraction_graph, 1.09)
| pynutil.add_weight(measure_graph, 1.6)
| pynutil.add_weight(cardinal_graph, 1.6)
| pynutil.add_weight(ordinal_graph, 1.6)
| pynutil.add_weight(money_graph, 1.6)
| pynutil.add_weight(telephone_graph, 1.6)
| pynutil.add_weight(electronic_graph, 1.6)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~"
punct = pynini.union(*s)
graph = pynutil.insert("name: \"") + punct + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/punctuation.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
def get_quantity(decimal: 'pynini.FstLike', cardinal_up_to_million: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. one million -> integer_part: "1" quantity: "million"
e.g. one point five million -> integer_part: "1" fractional_part: "5" quantity: "million"
Args:
decimal: decimal FST
cardinal_up_to_million: cardinal FST
"""
numbers = cardinal_up_to_million @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix = pynini.union(
"millón",
"millones",
"millardo",
"millardos",
"billón",
"billones",
"trillón",
"trillones",
"cuatrillón",
"cuatrillones",
)
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + suffix + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
Decimal point is either "." or ",", determined by whether "punto" or "coma" is spoken.
e.g. menos uno coma dos seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" }
e.g. menos uno punto dos seis -> decimal { negative: "true" integer_part: "1" morphosyntactic_features: "." fractional_part: "26" }
This decimal rule assumes that decimals can be pronounced as:
(a cardinal) + ('coma' or 'punto') plus (any sequence of cardinals <1000, including 'zero')
Also writes large numbers in shortened form, e.g.
e.g. uno coma dos seis millón -> decimal { negative: "false" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" quantity: "millón" }
e.g. dos millones -> decimal { negative: "false" integer_part: "2" quantity: "millones" }
e.g. mil ochocientos veinticuatro millones -> decimal { negative: "false" integer_part: "1824" quantity: "millones" }
Args:
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="decimal", kind="classify")
# number after decimal point can be any series of cardinals <1000, including 'zero'
graph_decimal = cardinal.numbers_up_to_thousand
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
# decimal point can be denoted by 'coma' or 'punto'
decimal_point = pynini.cross("coma", "morphosyntactic_features: \",\"")
decimal_point |= pynini.cross("punto", "morphosyntactic_features: \".\"")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"true\"") + delete_extra_space, 0, 1
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
cardinal_graph = cardinal.graph_no_exception | pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1)
+ decimal_point
+ delete_extra_space
+ graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.numbers_up_to_million
)
final_graph |= optional_graph_negative + get_quantity(final_graph_wo_sign, cardinal.numbers_up_to_million)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. doce dólares y cinco céntimos -> money { integer_part: "12" fractional_part: 05 currency: "$" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency
cardinal_graph = cardinal.graph_no_exception
graph_decimal_final = decimal.final_graph_wo_negative
unit_singular = pynini.string_file(get_abs_path("data/money/currency_major_singular.tsv"))
unit_singular = pynini.invert(unit_singular)
unit_plural = pynini.string_file(get_abs_path("data/money/currency_major_plural.tsv"))
unit_plural = pynini.invert(unit_plural)
unit_minor_singular = pynini.string_file(get_abs_path("data/money/currency_minor_singular.tsv"))
unit_minor_singular = pynini.invert(unit_minor_singular)
unit_minor_plural = pynini.string_file(get_abs_path("data/money/currency_minor_plural.tsv"))
unit_minor_plural = pynini.invert(unit_minor_plural)
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = pynutil.insert("currency: \"") + convert_space(unit_plural) + pynutil.insert("\"")
graph_unit_minor_singular = (
pynutil.insert("currency: \"") + convert_space(unit_minor_singular) + pynutil.insert("\"")
)
graph_unit_minor_plural = (
pynutil.insert("currency: \"") + convert_space(unit_minor_plural) + pynutil.insert("\"")
)
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# twelve dollars (and) fifty cents, zero cents
cents_standalone = (
pynutil.insert("morphosyntactic_features: \",\"") # always use a comma in the decimal
+ insert_space
+ pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - "un") @ cardinal_graph), -0.7) @ add_leading_zero_to_double_digit
+ delete_space,
pynini.cross("un", "01") + delete_space,
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure((pynutil.delete("con") | pynutil.delete('y')) + delete_space, 0, 1)
+ insert_space
+ cents_standalone
+ pynutil.delete(pynini.union(unit_minor_singular, unit_minor_plural)),
0,
1,
)
# twelve dollars fifty, only after integer
# setenta y cinco dólares con sesenta y tres~$75,63
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("morphosyntactic_features: \",\"") # always use a comma in the decimal
+ insert_space
+ pynutil.insert("fractional_part: \"")
+ pynini.closure(pynutil.delete("con") + delete_space, 0, 1)
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - "un" - "una") @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_plural
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ (pynini.cross("un", "1") | pynini.cross("una", "1"))
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
cents_only_int = pynutil.insert("integer_part: \"0\" ")
cents_only_units = graph_unit_minor_singular | graph_unit_minor_plural
cents_only_graph = cents_only_int + cents_standalone + pynini.accep(" ") + cents_only_units
graph_decimal = (graph_decimal_final + delete_extra_space + graph_unit_plural) | cents_only_graph
graph_decimal |= graph_decimal_final + pynutil.delete(" de") + delete_extra_space + graph_unit_plural
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_DIGIT, NEMO_SPACE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. menos veintitrés -> cardinal { negative: "-" integer: "23"}
This class converts cardinals up to (but not including) "un cuatrillón",
i.e up to "one septillion" in English (10^{24}).
Cardinals below ten are not converted (in order to avoid
"vivo en una casa" --> "vivo en 1 casa" and any other odd conversions.)
Although technically Spanish grammar requires that "y" only comes after
"10s" numbers (ie. "treinta", ..., "noventa"), these rules will convert
numbers even with "y" in an ungrammatical place (because "y" is ignored
inside cardinal numbers).
e.g. "mil y una" -> cardinal { integer: "1001"}
e.g. "ciento y una" -> cardinal { integer: "101"}
"""
def __init__(self):
super().__init__(name="cardinal", kind="classify")
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_hundreds = pynini.string_file(get_abs_path("data/numbers/hundreds.tsv"))
full_graph_ties = (graph_ties | pynutil.insert("0")) + (
(delete_space + pynutil.delete("y") + delete_space + graph_digit) | pynutil.insert("0")
)
graph_hundred_component = graph_hundreds | pynutil.insert("0")
graph_hundred_component += delete_space
graph_hundred_component += (
graph_twenties | full_graph_ties | graph_teen | (pynutil.insert("0") + graph_digit) | pynutil.insert("00")
)
graph_hundred_component_at_least_one_none_zero_digit = graph_hundred_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
self.graph_hundred_component_at_least_one_none_zero_digit = (
graph_hundred_component_at_least_one_none_zero_digit
)
graph_thousands = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("mil"),
pynutil.insert("001") + pynutil.delete("mil"), # because we say 'mil', not 'un mil'
pynutil.insert("000", weight=0.1),
)
graph_millones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("millones") | pynutil.delete("millón")),
pynutil.insert("000") + pynutil.delete("millones"), # to allow for 'mil millones'
)
graph_mil_millones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("mil"),
pynutil.insert("001") + pynutil.delete("mil"), # because we say 'mil', not 'un mil'
)
graph_mil_millones += delete_space + (
graph_millones | pynutil.insert("000") + pynutil.delete("millones")
) # allow for 'mil millones'
graph_mil_millones |= pynutil.insert("000000", weight=0.1)
# also allow 'millardo' instead of 'mil millones'
graph_millardo = (
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("millardo") | pynutil.delete("millardos"))
)
graph_billones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("billones") | pynutil.delete("billón")),
)
graph_mil_billones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("mil"),
pynutil.insert("001") + pynutil.delete("mil"), # because we say 'mil', not 'un mil'
)
graph_mil_billones += delete_space + (
graph_billones | pynutil.insert("000") + pynutil.delete("billones")
) # allow for 'mil billones'
graph_mil_billones |= pynutil.insert("000000", weight=0.1)
graph_trillones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("trillones") | pynutil.delete("trillón")),
)
graph_mil_trillones = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("mil"),
pynutil.insert("001") + pynutil.delete("mil"), # because we say 'mil', not 'un mil'
)
graph_mil_trillones += delete_space + (
graph_trillones | pynutil.insert("000") + pynutil.delete("trillones")
) # allow for 'mil trillones'
graph_mil_trillones |= pynutil.insert("000000", weight=0.1)
graph = pynini.union(
(graph_mil_trillones | pynutil.insert("000", weight=0.1) + graph_trillones)
+ delete_space
+ (graph_mil_billones | pynutil.insert("000", weight=0.1) + graph_billones)
+ delete_space
+ pynini.union(
graph_mil_millones,
pynutil.insert("000", weight=0.1) + graph_millones,
graph_millardo + graph_millones,
graph_millardo + pynutil.insert("000", weight=0.1),
)
+ delete_space
+ graph_thousands
+ delete_space
+ graph_hundred_component,
graph_zero,
)
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT), "0"
)
self.graph_no_exception = graph
# save self.numbers_up_to_thousand for use in DecimalFst
digits_up_to_thousand = NEMO_DIGIT | (NEMO_DIGIT ** 2) | (NEMO_DIGIT ** 3)
numbers_up_to_thousand = pynini.compose(graph, digits_up_to_thousand).optimize()
self.numbers_up_to_thousand = numbers_up_to_thousand
# save self.numbers_up_to_million for use in DecimalFst
digits_up_to_million = (
NEMO_DIGIT
| (NEMO_DIGIT ** 2)
| (NEMO_DIGIT ** 3)
| (NEMO_DIGIT ** 4)
| (NEMO_DIGIT ** 5)
| (NEMO_DIGIT ** 6)
)
numbers_up_to_million = pynini.compose(graph, digits_up_to_million).optimize()
self.numbers_up_to_million = numbers_up_to_million
# don't convert cardinals from zero to nine inclusive
graph_exception = pynini.project(pynini.closure(NEMO_SPACE, 0, 1) + (graph_digit | graph_zero), 'input')
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross("menos", "\"-\"") + NEMO_SPACE, 0, 1
)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, GraphFst, insert_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying 'electronic' semiotic classes, i.e.
email address (which get converted to "username" and "domain" fields),
and URLS (which get converted to a "protocol" field).
e.g. c d f uno arroba a b c punto e d u -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
e.g. doble ve doble ve doble ve a b c punto e d u -> tokens { electronic { protocol: "www.abc.edu" } }
"""
def __init__(self):
super().__init__(name="electronic", kind="classify")
delete_extra_space = pynutil.delete(" ")
alpha_num = (
NEMO_ALPHA
| pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
| pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
)
symbols = pynini.string_file(get_abs_path("data/electronic/symbols.tsv")).invert()
accepted_username = alpha_num | symbols
process_dot = pynini.cross("punto", ".")
username = (
pynutil.insert("username: \"")
+ alpha_num
+ delete_extra_space
+ pynini.closure(accepted_username + delete_extra_space)
+ alpha_num
+ pynutil.insert("\"")
)
single_alphanum = pynini.closure(alpha_num + delete_extra_space) + alpha_num
server = single_alphanum | pynini.string_file(get_abs_path("data/electronic/server_name.tsv")).invert()
domain = single_alphanum | pynini.string_file(get_abs_path("data/electronic/domain.tsv")).invert()
domain_graph = (
pynutil.insert("domain: \"")
+ server
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ domain
+ pynutil.insert("\"")
)
graph = (
username + delete_extra_space + pynutil.delete("arroba") + insert_space + delete_extra_space + domain_graph
)
############# url ###
protocol_end = pynini.cross(pynini.union("www", "w w w", "doble ve doble ve doble ve"), "www")
protocol_start = pynini.cross(pynini.union("http", "h t t p", "hache te te pe"), "http")
protocol_start |= pynini.cross(pynini.union("https", "h t t p s", "hache te te pe ese"), "https")
protocol_start += pynini.cross(" dos puntos barra barra ", "://")
# e.g. .com, .es
ending = (
delete_extra_space
+ symbols
+ delete_extra_space
+ (domain | pynini.closure(accepted_username + delete_extra_space,) + accepted_username)
)
protocol = (
pynini.closure(protocol_start, 0, 1)
+ protocol_end
+ delete_extra_space
+ process_dot
+ delete_extra_space
+ (pynini.closure(delete_extra_space + accepted_username, 1) | server)
+ pynini.closure(ending, 1)
)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
graph |= protocol
########
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.graph_utils import int_to_roman
from nemo_text_processing.inverse_text_normalization.es.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date,
e.g. primero de enero -> date { day: "1" month: "enero" }
e.g. uno de enero -> date { day: "1" month: "enero" }
"""
def __init__(self, cardinal: GraphFst):
super().__init__(name="date", kind="classify")
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
graph_twenties = pynini.string_file(get_abs_path("data/numbers/twenties.tsv"))
graph_month = pynini.string_file(get_abs_path("data/dates/months.tsv"))
graph_suffix = pynini.string_file(get_abs_path("data/dates/year_suffix.tsv")).invert()
graph_1_to_100 = pynini.union(
graph_digit,
graph_twenties,
graph_teen,
(graph_ties + pynutil.insert("0")),
(graph_ties + pynutil.delete(" y ") + graph_digit),
)
digits_1_to_31 = [str(digits) for digits in range(1, 32)]
graph_1_to_31 = graph_1_to_100 @ pynini.union(*digits_1_to_31)
# can use "primero" for 1st day of the month
graph_1_to_31 = pynini.union(graph_1_to_31, pynini.cross("primero", "1"))
day_graph = pynutil.insert("day: \"") + graph_1_to_31 + pynutil.insert("\"")
month_graph = pynutil.insert("month: \"") + graph_month + pynutil.insert("\"")
graph_dm = day_graph + delete_space + pynutil.delete("de") + delete_extra_space + month_graph
# transform "siglo diez" -> "siglo x" and "año mil novecientos noventa y ocho" -> "año mcmxcviii"
roman_numerals = int_to_roman(cardinal.graph)
roman_centuries = pynini.union("siglo ", "año ") + roman_numerals
roman_centuries_graph = pynutil.insert("year: \"") + roman_centuries + pynutil.insert("\"")
# transform "doscientos antes de cristo" -> "200 a. c."
year_with_suffix = cardinal.graph + pynini.accep(" ") + graph_suffix
year_with_suffix_graph = pynutil.insert("year: \"") + year_with_suffix + pynutil.insert("\"")
final_graph = graph_dm | roman_centuries_graph | year_with_suffix_graph
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/taggers/word.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.