python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time,
e.g. time { hours: "la 1" minutes: "10" } -> la 1:10
e.g. time { hours: "la 1" minutes: "45" } -> la 1:45
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
# hour may or may not include preposition ("la" or "las")
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(pynini.union("la ", "las "), 0, 1)
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
suffix = (
delete_space
+ insert_space
+ pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_suffix = pynini.closure(suffix, 0, 1)
zone = (
delete_space
+ insert_space
+ pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(zone, 0, 1)
graph = (
hour
+ delete_space
+ pynutil.insert(":")
+ (minute @ add_leading_zero_to_double_digit)
+ optional_suffix
+ optional_zone
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { cardinal { negative: "true" integer: "12" } units: "kg" } -> -12 kg
Args:
decimal: DecimalFst
cardinal: CardinalFst
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst):
super().__init__(name="measure", kind="verbalize")
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_fraction = fraction.fst + delete_space + pynutil.insert(" ") + unit
graph_fraction |= graph_cardinal + delete_extra_space + fraction.fst + delete_extra_space + unit
graph_math = pynutil.delete("units: \"math\"") + delete_space + cardinal.fst
graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert(" ") + unit
graph |= graph_fraction
graph |= graph_math
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/measure.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SPACE, GraphFst, delete_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fractions
e.g. fraction { numerator: "8" denominator: "3" } -> "8/3"
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
optional_negative = pynutil.delete("negative: \"") + pynini.cross("True", "-") + pynutil.delete("\"")
optional_negative = pynini.closure(optional_negative + delete_space, 0, 1)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
optional_integer_part = pynini.closure(integer_part + NEMO_SPACE, 0, 1)
numerator = pynutil.delete("numerator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
denominator = pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
graph = (
optional_negative + optional_integer_part + numerator + delete_space + pynutil.insert("/") + denominator
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "123-123-5678" }
-> 123-123-5678
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
delete_tokens = self.delete_tokens(number_part)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "13" morphosyntactic_features: "o" } -> 13.º
"""
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
replace_suffix = pynini.union(
pynini.cross(" morphosyntactic_features: \"o\"", ".º"),
pynini.cross(" morphosyntactic_features: \"a\"", ".ª"),
pynini.cross(" morphosyntactic_features: \"er\"", ".ᵉʳ"),
)
graph = graph + replace_suffix
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.es.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.fraction import FractionFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
decimal = DecimalFst()
decimal_graph = decimal.fst
fraction = FractionFst()
fraction_graph = fraction.fst
ordinal_graph = OrdinalFst().fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
graph = (
time_graph
| date_graph
| money_graph
| fraction_graph
| measure_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
| telephone_graph
| electronic_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "uds." } -> uds.
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.es.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.es.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal,
e.g. decimal { negative: "true" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" } -> -1,26
e.g. decimal { negative: "true" integer_part: "1" morphosyntactic_features: "." fractional_part: "26" } -> -1.26
e.g. decimal { negative: "false" integer_part: "1" morphosyntactic_features: "," fractional_part: "26" quantity: "millón" } -> 1,26 millón
e.g. decimal { negative: "false" integer_part: "2" quantity: "millones" } -> 2 millones
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
optionl_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_integer = pynini.closure(integer + delete_space, 0, 1)
decimal_point = pynini.cross("morphosyntactic_features: \",\"", ",")
decimal_point |= pynini.cross("morphosyntactic_features: \".\"", ".")
fractional = (
decimal_point
+ delete_space
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = optional_integer + optional_fractional + optional_quantity
self.numbers = graph
graph = optionl_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" morphosyntactic_features: "," fractional_part: "05" currency: "$" } -> $12,05
Args:
decimal: DecimalFst
"""
def __init__(self, decimal: GraphFst):
super().__init__(name="money", kind="verbalize")
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = unit + delete_space + decimal.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { negative: "-" integer: "23" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> [email protected]
e.g. tokens { electronic { protocol: "www.abc.edu" } } -> www.abc.edu
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
protocol = (
pynutil.delete("protocol:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = user_name + delete_space + pynutil.insert("@") + domain
graph |= protocol
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { day: "1" month: "enero" preserve_order: true } -> 1 de enero
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# day month
graph_dm = day + delete_extra_space + pynutil.insert("de") + insert_space + month
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space
)
final_graph = graph_dm | year
final_graph += delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/dates/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/numbers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/ordinals/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/time/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/money/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/measures/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/roman/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/es/data/fractions/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import List, Union
import inflect
_inflect = inflect.engine()
def num_to_word(x: Union[str, int]):
"""
converts integer to spoken representation
Args
x: integer
Returns: spoken representation
"""
if isinstance(x, int):
x = str(x)
x = _inflect.number_to_words(str(x)).replace("-", " ").replace(",", "")
return x
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
def get_various_formats(text: str) -> List[str]:
"""
Return various formats for text, e.g., all caps, the first letter upper cased, space separated, etc.
"""
result = []
if len(text) == 0:
return []
for t in [text, ' '.join(list(text))]:
result.append(t)
result.append(t.upper())
result.append(t.capitalize())
return result
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from typing import List
import regex as re
from nemo_text_processing.text_normalization.data_loader_utils import (
EOS_TYPE,
Instance,
load_files,
training_data_to_sentences,
)
"""
This file is for evaluation purposes.
filter_loaded_data() cleans data (list of instances) for inverse text normalization. Filters and cleaners can be specified for each semiotic class individually.
For example, normalized text should only include characters and whitespace characters but no punctuation.
Cardinal unnormalized instances should contain at least one integer and all other characters are removed.
"""
class Filter:
"""
Filter class
Args:
class_type: semiotic class used in dataset
process_func: function to transform text
filter_func: function to filter text
"""
def __init__(self, class_type: str, process_func: object, filter_func: object):
self.class_type = class_type
self.process_func = process_func
self.filter_func = filter_func
def filter(self, instance: Instance) -> bool:
"""
filter function
Args:
filters given instance with filter function
Returns: True if given instance fulfills criteria or does not belong to class type
"""
if instance.token_type != self.class_type:
return True
return self.filter_func(instance)
def process(self, instance: Instance) -> Instance:
"""
process function
Args:
processes given instance with process function
Returns: processed instance if instance belongs to expected class type or original instance
"""
if instance.token_type != self.class_type:
return instance
return self.process_func(instance)
def filter_cardinal_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_cardinal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r"[^0-9]", "", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_ordinal_1(instance: Instance) -> bool:
ok = re.search(r"(st|nd|rd|th)\s*$", instance.un_normalized)
return ok
def process_ordinal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r"[,\s]", "", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_decimal_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_decimal_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r",", "", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_measure_1(instance: Instance) -> bool:
ok = True
return ok
def process_measure_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r",", "", un_normalized)
un_normalized = re.sub(r"m2", "m²", un_normalized)
un_normalized = re.sub(r"(\d)([^\d.\s])", r"\1 \2", un_normalized)
normalized = re.sub(r"[^a-z\s]", "", normalized)
normalized = re.sub(r"per ([a-z\s]*)s$", r"per \1", normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_money_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_money_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
un_normalized = re.sub(r",", "", un_normalized)
un_normalized = re.sub(r"a\$", r"$", un_normalized)
un_normalized = re.sub(r"us\$", r"$", un_normalized)
un_normalized = re.sub(r"(\d)m\s*$", r"\1 million", un_normalized)
un_normalized = re.sub(r"(\d)bn?\s*$", r"\1 billion", un_normalized)
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_time_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_time_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r": ", ":", un_normalized)
un_normalized = re.sub(r"(\d)\s?a\s?m\s?", r"\1 a.m.", un_normalized)
un_normalized = re.sub(r"(\d)\s?p\s?m\s?", r"\1 p.m.", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_plain_1(instance: Instance) -> bool:
ok = True
return ok
def process_plain_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_punct_1(instance: Instance) -> bool:
ok = True
return ok
def process_punct_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_date_1(instance: Instance) -> bool:
ok = True
return ok
def process_date_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
un_normalized = re.sub(r",", "", un_normalized)
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_letters_1(instance: Instance) -> bool:
ok = True
return ok
def process_letters_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_verbatim_1(instance: Instance) -> bool:
ok = True
return ok
def process_verbatim_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_digit_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_digit_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_telephone_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_telephone_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_electronic_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_electronic_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_fraction_1(instance: Instance) -> bool:
ok = re.search(r"[0-9]", instance.un_normalized)
return ok
def process_fraction_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
def filter_address_1(instance: Instance) -> bool:
ok = True
return ok
def process_address_1(instance: Instance) -> Instance:
un_normalized = instance.un_normalized
normalized = instance.normalized
normalized = re.sub(r"[^a-z ]", "", normalized)
return Instance(token_type=instance.token_type, un_normalized=un_normalized, normalized=normalized)
filters = []
filters.append(Filter(class_type="CARDINAL", process_func=process_cardinal_1, filter_func=filter_cardinal_1))
filters.append(Filter(class_type="ORDINAL", process_func=process_ordinal_1, filter_func=filter_ordinal_1))
filters.append(Filter(class_type="DECIMAL", process_func=process_decimal_1, filter_func=filter_decimal_1))
filters.append(Filter(class_type="MEASURE", process_func=process_measure_1, filter_func=filter_measure_1))
filters.append(Filter(class_type="MONEY", process_func=process_money_1, filter_func=filter_money_1))
filters.append(Filter(class_type="TIME", process_func=process_time_1, filter_func=filter_time_1))
filters.append(Filter(class_type="DATE", process_func=process_date_1, filter_func=filter_date_1))
filters.append(Filter(class_type="PLAIN", process_func=process_plain_1, filter_func=filter_plain_1))
filters.append(Filter(class_type="PUNCT", process_func=process_punct_1, filter_func=filter_punct_1))
filters.append(Filter(class_type="LETTERS", process_func=process_letters_1, filter_func=filter_letters_1))
filters.append(Filter(class_type="VERBATIM", process_func=process_verbatim_1, filter_func=filter_verbatim_1))
filters.append(Filter(class_type="DIGIT", process_func=process_digit_1, filter_func=filter_digit_1))
filters.append(Filter(class_type="TELEPHONE", process_func=process_telephone_1, filter_func=filter_telephone_1))
filters.append(Filter(class_type="ELECTRONIC", process_func=process_electronic_1, filter_func=filter_electronic_1))
filters.append(Filter(class_type="FRACTION", process_func=process_fraction_1, filter_func=filter_fraction_1))
filters.append(Filter(class_type="ADDRESS", process_func=process_address_1, filter_func=filter_address_1))
filters.append(Filter(class_type=EOS_TYPE, process_func=lambda x: x, filter_func=lambda x: True))
def filter_loaded_data(data: List[Instance], verbose: bool = False) -> List[Instance]:
"""
Filters list of instances
Args:
data: list of instances
Returns: filtered and transformed list of instances
"""
updates_instances = []
for instance in data:
updated_instance = False
for fil in filters:
if fil.class_type == instance.token_type and fil.filter(instance):
instance = fil.process(instance)
updated_instance = True
if updated_instance:
if verbose:
print(instance)
updates_instances.append(instance)
return updates_instances
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", help="input file path", type=str, default='./en_with_types/output-00001-of-00100')
parser.add_argument("--verbose", help="print filtered instances", action='store_true')
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
file_path = args.input
print("Loading training data: " + file_path)
instance_list = load_files([file_path]) # List of instances
filtered_instance_list = filter_loaded_data(instance_list, args.verbose)
training_data_to_sentences(filtered_instance_list)
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/clean_eval_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path, num_to_word
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
GraphFst,
capitalized_input_graph,
convert_space,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time
e.g. twelve thirty -> time { hours: "12" minutes: "30" }
e.g. twelve past one -> time { minutes: "12" hours: "1" }
e.g. two o clock a m -> time { hours: "2" suffix: "a.m." }
e.g. quarter to two -> time { hours: "1" minutes: "45" }
e.g. quarter past two -> time { hours: "2" minutes: "15" }
e.g. half past two -> time { hours: "2" minutes: "30" }
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="time", kind="classify")
# hours, minutes, seconds, suffix, zone, style, speak_period
suffix_graph = pynini.string_file(get_abs_path("data/time/time_suffix.tsv"))
time_zone_graph = pynini.invert(pynini.string_file(get_abs_path("data/time/time_zone.tsv")))
if input_case == INPUT_CASED:
suffix_graph |= pynini.string_file(get_abs_path("data/time/time_suffix_cased.tsv"))
time_zone_graph |= pynini.invert(pynini.string_file(get_abs_path("data/time/time_zone_cased.tsv")))
to_hour_graph = pynini.string_file(get_abs_path("data/time/to_hour.tsv"))
minute_to_graph = pynini.string_file(get_abs_path("data/time/minute_to.tsv"))
# only used for < 1000 thousand -> 0 weight
cardinal = pynutil.add_weight(CardinalFst(input_case=input_case).graph_no_exception, weight=-0.7)
labels_hour = [num_to_word(x) for x in range(0, 24)]
labels_minute_single = [num_to_word(x) for x in range(1, 10)]
labels_minute_double = [num_to_word(x) for x in range(10, 60)]
graph_hour = pynini.union(*labels_hour) @ cardinal
if input_case == INPUT_CASED:
graph_hour = capitalized_input_graph(graph_hour)
graph_minute_single = pynini.union(*labels_minute_single) @ cardinal
graph_minute_double = pynini.union(*labels_minute_double) @ cardinal
graph_minute_verbose = pynini.cross("half", "30") | pynini.cross("quarter", "15")
oclock = pynini.cross(pynini.union("o' clock", "o clock", "o'clock", "oclock", "hundred hours",), "",)
if input_case == INPUT_CASED:
minute_to_graph = capitalized_input_graph(minute_to_graph)
graph_minute_single = capitalized_input_graph(graph_minute_single)
graph_minute_double = capitalized_input_graph(graph_minute_double)
graph_minute_verbose |= pynini.cross("Half", "30") | pynini.cross("Quarter", "15")
oclock |= pynini.cross(pynini.union("O' clock", "O clock", "O'clock", "Oclock", "Hundred hours",), "",)
final_graph_hour = pynutil.insert("hours: \"") + graph_hour + pynutil.insert("\"")
graph_minute = (
oclock + pynutil.insert("00")
| pynutil.delete("o") + delete_space + graph_minute_single
| graph_minute_double
)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix = delete_space + insert_space + final_suffix
final_suffix_optional = pynini.closure(final_suffix, 0, 1)
final_time_zone_optional = pynini.closure(
delete_space
+ insert_space
+ pynutil.insert("zone: \"")
+ convert_space(time_zone_graph)
+ pynutil.insert("\""),
0,
1,
)
# five o' clock
# two o eight, two thirty five (am/pm)
# two pm/am
graph_hm = (
final_graph_hour + delete_extra_space + pynutil.insert("minutes: \"") + graph_minute + pynutil.insert("\"")
)
# 10 past four, quarter past four, half past four
graph_m_past_h = (
pynutil.insert("minutes: \"")
+ pynini.union(graph_minute_single, graph_minute_double, graph_minute_verbose)
+ pynutil.insert("\"")
+ delete_space
+ pynutil.delete("past")
+ delete_extra_space
+ final_graph_hour
)
quarter_graph = pynini.accep("quarter")
if input_case == INPUT_CASED:
quarter_graph |= pynini.accep("Quarter")
graph_quarter_time = (
pynutil.insert("minutes: \"")
+ (pynini.cross(quarter_graph, "45"))
+ pynutil.insert("\"")
+ delete_space
+ pynutil.delete(pynini.union("to", "till"))
+ delete_extra_space
+ pynutil.insert("hours: \"")
+ to_hour_graph
+ pynutil.insert("\"")
)
graph_m_to_h_suffix_time = (
pynutil.insert("minutes: \"")
+ ((graph_minute_single | graph_minute_double).optimize() @ minute_to_graph)
+ pynutil.insert("\"")
+ pynini.closure(delete_space + pynutil.delete(pynini.union("min", "mins", "minute", "minutes")), 0, 1)
+ delete_space
+ pynutil.delete(pynini.union("to", "till"))
+ delete_extra_space
+ pynutil.insert("hours: \"")
+ to_hour_graph
+ pynutil.insert("\"")
+ final_suffix
)
graph_h = (
final_graph_hour
+ delete_extra_space
+ pynutil.insert("minutes: \"")
+ (pynutil.insert("00") | graph_minute)
+ pynutil.insert("\"")
+ final_suffix
+ final_time_zone_optional
)
final_graph = (
(graph_hm | graph_m_past_h | graph_quarter_time) + final_suffix_optional + final_time_zone_optional
)
final_graph |= graph_h
final_graph |= graph_m_to_h_suffix_time
final_graph = self.add_tokens(final_graph.optimize())
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
MINUS,
NEMO_SIGMA,
TO_LOWER,
GraphFst,
convert_space,
delete_extra_space,
delete_space,
get_singulars,
)
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure
e.g. minus twelve kilograms -> measure { negative: "true" cardinal { integer: "12" } units: "kg" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="measure", kind="classify")
cardinal_graph = cardinal.graph_no_exception
# accept capital letters in units
casing_graph = pynini.closure(TO_LOWER | NEMO_SIGMA).optimize()
graph_unit = pynini.string_file(get_abs_path("data/measurements.tsv"))
graph_unit_singular = pynini.invert(graph_unit) # singular -> abbr
graph_unit_singular = pynini.compose(casing_graph, graph_unit_singular).optimize()
graph_unit_plural = get_singulars(graph_unit_singular).optimize() # plural -> abbr
graph_unit_plural = pynini.compose(casing_graph, graph_unit_plural).optimize()
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(MINUS, "\"true\"") + delete_extra_space, 0, 1,
)
unit_singular = convert_space(graph_unit_singular)
unit_plural = convert_space(graph_unit_plural)
unit_misc = pynutil.insert("/") + pynutil.delete("per") + delete_space + convert_space(graph_unit_singular)
one_graph = pynini.accep("one").optimize()
if input_case == INPUT_CASED:
one_graph |= pynini.accep("One").optimize()
unit_singular = (
pynutil.insert("units: \"")
+ (unit_singular | unit_misc | pynutil.add_weight(unit_singular + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
unit_plural = (
pynutil.insert("units: \"")
+ (unit_plural | unit_misc | pynutil.add_weight(unit_plural + delete_space + unit_misc, 0.01))
+ pynutil.insert("\"")
)
# Let singular apply to values > 1 as they could be part of an adjective phrase (e.g. 14 foot tall building)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ pynutil.insert(" }")
+ delete_extra_space
+ (unit_plural | unit_singular)
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - one_graph) @ cardinal_graph)
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ (unit_plural | unit_singular)
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ pynini.cross(one_graph, "1")
+ pynutil.insert("\"")
+ pynutil.insert(" }")
+ delete_extra_space
+ unit_singular
)
final_graph = subgraph_decimal | subgraph_cardinal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import INPUT_LOWER_CASED, GraphFst
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
Args:
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="fraction", kind="classify")
# integer_part # numerator # denominator
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
MIN_NEG_WEIGHT,
NEMO_ALNUM,
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_LOWER_NOT_A,
GraphFst,
capitalized_input_graph,
delete_space,
insert_space,
)
from pynini.lib import pynutil
def get_serial_number(cardinal):
"""
any alphanumerical character sequence with at least one number with length greater equal to 3 and
excluding any numeric sequence containing double digits (ties/teens) preceded by 'a'.
This avoids cases like "a thirty six" being converted to "a36" in "a thirty six times increase"
"""
digit = pynini.compose(cardinal.graph_no_exception, NEMO_DIGIT)
two_digit = pynutil.add_weight(pynini.compose(cardinal.graph_two_digit, NEMO_DIGIT ** 2), 0.002)
character = digit | two_digit | NEMO_ALPHA
sequence = (NEMO_LOWER_NOT_A | digit) + pynini.closure(pynutil.delete(" ") + character, 2)
sequence |= character + pynini.closure(pynutil.delete(" ") + (digit | NEMO_ALPHA), 2)
sequence2 = (
NEMO_ALPHA
+ pynini.closure(pynutil.delete(" ") + NEMO_ALPHA, 1)
+ pynini.closure(pynutil.delete(" ") + two_digit, 1)
)
sequence2 |= NEMO_LOWER_NOT_A + pynini.closure(pynutil.delete(" ") + two_digit, 1)
sequence2 |= (
two_digit
+ pynini.closure(pynutil.delete(" ") + two_digit, 1)
+ pynini.closure(pynutil.delete(" ") + NEMO_ALPHA, 1)
)
sequence = (sequence | sequence2) @ (pynini.closure(NEMO_ALNUM) + NEMO_DIGIT + pynini.closure(NEMO_ALNUM))
return sequence.optimize()
class TelephoneFst(GraphFst):
"""
Finite state transducer for classifying telephone numbers, e.g.
one two three one two three five six seven eight -> { number_part: "123-123-5678" }
This class also support card number and IP format.
"one two three dot one double three dot o dot four o" -> { number_part: "123.133.0.40"}
"three two double seven three two one four three two one four three double zero five" ->
{ number_part: 3277 3214 3214 3005}
Args:
cardinal: CardinalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, cardinal: GraphFst, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="telephone", kind="classify")
# country code, number_part, extension
digit_to_str = (
pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")).optimize())
| pynini.cross("0", pynini.union("o", "oh", "zero")).optimize()
)
str_to_digit = pynini.invert(digit_to_str)
if input_case == INPUT_CASED:
str_to_digit = capitalized_input_graph(str_to_digit)
double_digit = pynini.union(
*[
pynini.cross(
pynini.project(str(i) @ digit_to_str, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_to_str, "output"),
pynutil.insert("double ") + pynini.project(str(i) @ digit_to_str, "output"),
)
for i in range(10)
]
)
double_digit.invert()
triple_digit = pynini.union(
*[
pynini.cross(
pynini.project(str(i) @ digit_to_str, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_to_str, "output")
+ pynini.accep(" ")
+ pynini.project(str(i) @ digit_to_str, "output"),
pynutil.insert("triple ") + pynini.project(str(i) @ digit_to_str, "output"),
)
for i in range(10)
]
)
triple_digit.invert()
# to handle cases like "one twenty three"
two_digit_cardinal = pynini.compose(cardinal.graph_no_exception, NEMO_DIGIT ** 2)
double_digit_to_digit = (
pynini.compose(double_digit, str_to_digit + pynutil.delete(" ") + str_to_digit) | two_digit_cardinal
)
triple_digit_to_digit = pynini.compose(
triple_digit, str_to_digit + delete_space + str_to_digit + delete_space + str_to_digit
)
single_or_double_digit = (pynutil.add_weight(double_digit_to_digit, -0.0001) | str_to_digit).optimize()
single_double_or_triple_digit = (
pynutil.add_weight(triple_digit_to_digit, -0.0001) | single_or_double_digit | delete_space
).optimize()
single_or_double_digit |= (
single_or_double_digit
+ pynini.closure(pynutil.add_weight(pynutil.delete(" ") + single_or_double_digit, 0.0001))
).optimize()
single_double_or_triple_digit |= (
single_double_or_triple_digit
+ pynini.closure(pynutil.add_weight(pynutil.delete(" ") + single_double_or_triple_digit, 0.0001))
).optimize()
number_part = pynini.compose(
single_double_or_triple_digit,
NEMO_DIGIT ** 3 + pynutil.insert("-") + NEMO_DIGIT ** 3 + pynutil.insert("-") + NEMO_DIGIT ** 4,
).optimize()
number_part = pynutil.insert("number_part: \"") + number_part.optimize() + pynutil.insert("\"")
cardinal_option = pynini.compose(single_double_or_triple_digit, NEMO_DIGIT ** (2, 3))
country_code = (
pynutil.insert("country_code: \"")
+ pynini.closure(pynini.cross("plus ", "+"), 0, 1)
+ ((pynini.closure(str_to_digit + pynutil.delete(" "), 0, 2) + str_to_digit) | cardinal_option)
+ pynutil.insert("\"")
)
optional_country_code = pynini.closure(country_code + pynutil.delete(" ") + insert_space, 0, 1).optimize()
graph = optional_country_code + number_part
# credit card number
space_four_digits = insert_space + NEMO_DIGIT ** 4
space_five_digits = space_four_digits + NEMO_DIGIT
space_six_digits = space_five_digits + NEMO_DIGIT
credit_card_graph = pynini.compose(
single_double_or_triple_digit,
NEMO_DIGIT ** 4 + (space_six_digits | (space_four_digits ** 2)) + space_four_digits,
).optimize()
credit_card_graph |= pynini.compose(
single_double_or_triple_digit, NEMO_DIGIT ** 4 + space_six_digits + space_five_digits
).optimize()
graph |= pynutil.insert("number_part: \"") + credit_card_graph.optimize() + pynutil.insert("\"")
# SSN
ssn_graph = pynini.compose(
single_double_or_triple_digit,
NEMO_DIGIT ** 3 + pynutil.insert("-") + NEMO_DIGIT ** 2 + pynutil.insert("-") + NEMO_DIGIT ** 4,
).optimize()
graph |= pynutil.insert("number_part: \"") + ssn_graph.optimize() + pynutil.insert("\"")
# ip
digit_or_double = pynini.closure(str_to_digit + pynutil.delete(" "), 0, 1) + double_digit_to_digit
digit_or_double |= double_digit_to_digit + pynini.closure(pynutil.delete(" ") + str_to_digit, 0, 1)
digit_or_double |= str_to_digit + (pynutil.delete(" ") + str_to_digit) ** (0, 2)
digit_or_double |= cardinal_option
digit_or_double = digit_or_double.optimize()
ip_graph = digit_or_double + (pynini.cross(" dot ", ".") + digit_or_double) ** 3
graph |= (
pynutil.insert("number_part: \"")
+ pynutil.add_weight(ip_graph.optimize(), MIN_NEG_WEIGHT)
+ pynutil.insert("\"")
)
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph)
# serial graph shouldn't apply TO_LOWER
graph |= (
pynutil.insert("number_part: \"")
+ pynutil.add_weight(get_serial_number(cardinal=cardinal), weight=0.0001)
+ pynutil.insert("\"")
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
NEMO_CHAR,
GraphFst,
capitalized_input_graph,
)
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
e.g. thirteenth -> ordinal { integer: "13" }
Args:
cardinal: CardinalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, cardinal: GraphFst, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="ordinal", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_digit = pynini.string_file(get_abs_path("data/ordinals/digit.tsv"))
graph_teens = pynini.string_file(get_abs_path("data/ordinals/teen.tsv"))
graph = pynini.closure(NEMO_CHAR) + pynini.union(
graph_digit, graph_teens, pynini.cross("tieth", "ty"), pynini.cross("th", "")
)
self.graph = pynini.compose(graph, cardinal_graph)
if input_case == INPUT_CASED:
self.graph = capitalized_input_graph(self.graph)
final_graph = pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
GraphFst,
convert_space,
string_map_cased,
)
from nemo_text_processing.text_normalization.en.utils import load_labels
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelisted tokens
e.g. misses -> tokens { name: "mrs." }
This class has highest priority among all classifier grammars.
Whitelisted tokens are defined and loaded from "data/whitelist.tsv" (unless input_file specified).
Args:
input_file: path to a file with whitelist replacements (each line of the file: written_form\tspoken_form\n),
e.g. nemo_text_processing/inverse_text_normalization/en/data/whitelist.tsv
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED, input_file: str = None):
super().__init__(name="whitelist", kind="classify")
if input_file is None:
input_file = get_abs_path("data/whitelist.tsv")
if not os.path.exists(input_file):
raise ValueError(f"Whitelist file {input_file} not found")
whitelist = string_map_cased(input_file, input_case)
graph = pynutil.insert("name: \"") + convert_space(whitelist) + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.inverse_text_normalization.en.taggers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.date import DateFst
from nemo_text_processing.inverse_text_normalization.en.taggers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.en.taggers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.en.taggers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.en.taggers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.inverse_text_normalization.en.taggers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.en.taggers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.en.taggers.whitelist import WhiteListFst
from nemo_text_processing.inverse_text_normalization.en.taggers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_LOWER_CASED,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence, that is lower cased.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str = INPUT_LOWER_CASED,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify")
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"en_itn_{input_case}.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f"ClassifyFst.fst was restored from {far_file}.")
else:
logging.info(f"Creating ClassifyFst grammars.")
cardinal = CardinalFst(input_case=input_case)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal, input_case=input_case)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal, input_case=input_case)
decimal_graph = decimal.fst
measure_graph = MeasureFst(cardinal=cardinal, decimal=decimal, input_case=input_case).fst
date_graph = DateFst(ordinal=ordinal, input_case=input_case).fst
word_graph = WordFst().fst
time_graph = TimeFst(input_case=input_case).fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, input_case=input_case).fst
whitelist_graph = WhiteListFst(input_file=whitelist, input_case=input_case).fst
punct_graph = PunctuationFst().fst
electronic_graph = ElectronicFst(input_case=input_case).fst
telephone_graph = TelephoneFst(cardinal, input_case=input_case).fst
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.09)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electronic_graph, 1.1)
| pynutil.add_weight(word_graph, 100)
)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=1.1) + pynutil.insert(" }")
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(delete_extra_space + token_plus_punct)
graph = delete_space + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from pynini.lib import pynutil
class PunctuationFst(GraphFst):
"""
Finite state transducer for classifying punctuation
e.g. a, -> tokens { name: "a" } tokens { name: "," }
"""
def __init__(self):
super().__init__(name="punctuation", kind="classify")
s = "!#$%&\'()*+,-./:;<=>?@^_`{|}~"
punct = pynini.union(*s)
graph = pynutil.insert("name: \"") + punct + pynutil.insert("\"")
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/punctuation.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
MIN_NEG_WEIGHT,
MINUS,
NEMO_DIGIT,
NEMO_SIGMA,
TO_LOWER,
GraphFst,
capitalized_input_graph,
delete_extra_space,
delete_space,
)
from nemo_text_processing.text_normalization.en.utils import load_labels
from pynini.lib import pynutil
def get_quantity(
decimal: 'pynini.FstLike', cardinal_up_to_hundred: 'pynini.FstLike', input_case: str = INPUT_LOWER_CASED
) -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. one million -> integer_part: "1" quantity: "million"
e.g. one point five million -> integer_part: "1" fractional_part: "5" quantity: "million"
Args:
decimal: decimal FST
cardinal_up_to_hundred: cardinal FST
input_case: accepting either "lower_cased" or "cased" input.
"""
numbers = cardinal_up_to_hundred @ (
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT)
)
suffix_labels = load_labels(get_abs_path("data/numbers/thousands.tsv"))
suffix_labels = [x[0] for x in suffix_labels if x[0] != "thousand"]
suffix = pynini.union(*suffix_labels).optimize()
if input_case == INPUT_CASED:
suffix |= pynini.union(*[x[0].upper() + x[1:] for x in suffix_labels]).optimize()
res = (
pynutil.insert("integer_part: \"")
+ numbers
+ pynutil.insert("\"")
+ delete_extra_space
+ pynutil.insert("quantity: \"")
+ suffix
+ pynutil.insert("\"")
)
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + (suffix | "thousand") + pynutil.insert("\"")
if input_case == INPUT_CASED:
res |= decimal + delete_extra_space + pynutil.insert("quantity: \"") + "Thousand" + pynutil.insert("\"")
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal
e.g. minus twelve point five o o six billion -> decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" }
e.g. one billion -> decimal { integer_part: "1" quantity: "billion" }
Args:
cardinal: CardinalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, cardinal: GraphFst, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="decimal", kind="classify")
cardinal_graph = cardinal.graph_no_exception
graph_decimal = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_decimal |= pynini.string_file(get_abs_path("data/numbers/zero.tsv")) | pynini.cross("o", "0")
graph_decimal = pynini.closure(graph_decimal + delete_space) + graph_decimal
self.graph = graph_decimal
point = pynutil.delete("point")
optional_graph_negative = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(MINUS, "\"true\"") + delete_extra_space, 0, 1,
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_decimal + pynutil.insert("\"")
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(graph_integer + delete_extra_space, 0, 1) + point + delete_extra_space + graph_fractional
)
final_graph = optional_graph_negative + final_graph_wo_sign
self.final_graph_wo_negative = final_graph_wo_sign | get_quantity(
final_graph_wo_sign, cardinal.graph_hundred_component_at_least_one_none_zero_digit, input_case=input_case
)
# accept semiotic spans that start with a capital letter
self.final_graph_wo_negative |= pynutil.add_weight(
pynini.compose(TO_LOWER + NEMO_SIGMA, self.final_graph_wo_negative).optimize(), MIN_NEG_WEIGHT
)
quantity_graph = get_quantity(
final_graph_wo_sign, cardinal.graph_hundred_component_at_least_one_none_zero_digit, input_case=input_case
)
final_graph |= optional_graph_negative + quantity_graph
if input_case == INPUT_CASED:
final_graph = capitalized_input_graph(final_graph)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_SIGMA,
GraphFst,
capitalized_input_graph,
convert_space,
delete_extra_space,
delete_space,
get_singulars,
insert_space,
)
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money
e.g. twelve dollars and five cents -> money { integer_part: "12" fractional_part: 05 currency: "$" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="money", kind="classify")
# quantity, integer_part, fractional_part, currency
cardinal_graph = cardinal.graph_no_exception
# add support for missing hundred (only for 3 digit numbers)
# "one fifty" -> "one hundred fifty"
with_hundred = pynini.compose(
pynini.closure(NEMO_NOT_SPACE) + pynini.accep(" ") + pynutil.insert("hundred ") + NEMO_SIGMA,
pynini.compose(cardinal_graph, NEMO_DIGIT ** 3),
)
cardinal_graph |= with_hundred
graph_decimal_final = decimal.final_graph_wo_negative
unit = pynini.string_file(get_abs_path("data/currency.tsv"))
unit_singular = pynini.invert(unit)
if input_case == INPUT_CASED:
unit_singular = capitalized_input_graph(unit_singular)
unit_plural = get_singulars(unit_singular)
graph_unit_singular = pynutil.insert("currency: \"") + convert_space(unit_singular) + pynutil.insert("\"")
graph_unit_plural = (
pynutil.insert("currency: \"") + convert_space(unit_plural | unit_singular) + pynutil.insert("\"")
)
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
one_graph = pynini.accep("one").optimize()
if input_case == INPUT_CASED:
one_graph |= pynini.accep("One").optimize()
cent_graph = pynutil.delete("cent")
cents_graph = pynutil.delete("cents")
if input_case == INPUT_CASED:
cent_graph |= pynutil.delete("Cent")
cents_graph |= pynutil.delete("Cents")
# twelve dollars (and) fifty cents, zero cents
cents_standalone = (
pynutil.insert("fractional_part: \"")
+ pynini.union(
pynutil.add_weight(((NEMO_SIGMA - one_graph) @ cardinal_graph), -0.7)
@ add_leading_zero_to_double_digit
+ delete_space
+ cents_graph,
pynini.cross(one_graph, "01") + delete_space + cent_graph,
)
+ pynutil.insert("\"")
)
optional_cents_standalone = pynini.closure(
delete_space
+ pynini.closure(pynutil.delete("and") + delete_space, 0, 1)
+ insert_space
+ cents_standalone,
0,
1,
)
# twelve dollars fifty, only after integer
optional_cents_suffix = pynini.closure(
delete_extra_space
+ pynutil.insert("fractional_part: \"")
+ pynutil.add_weight(cardinal_graph @ add_leading_zero_to_double_digit, -0.7)
+ pynutil.insert("\""),
0,
1,
)
graph_integer = (
pynutil.insert("integer_part: \"")
+ ((NEMO_SIGMA - one_graph) @ cardinal_graph)
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_plural
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_integer |= (
pynutil.insert("integer_part: \"")
+ pynini.cross(one_graph, "1")
+ pynutil.insert("\"")
+ delete_extra_space
+ graph_unit_singular
+ (optional_cents_standalone | optional_cents_suffix)
)
graph_decimal = graph_decimal_final + delete_extra_space + graph_unit_plural
graph_decimal |= pynutil.insert("currency: \"$\" integer_part: \"0\" ") + cents_standalone
final_graph = graph_integer | graph_decimal
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path, num_to_word
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
MINUS,
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
capitalized_input_graph,
delete_space,
)
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals
e.g. minus twenty three -> cardinal { integer: "23" negative: "-" } }
Numbers below thirteen are not converted.
Args:
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="cardinal", kind="classify")
self.input_case = input_case
graph_zero = pynini.string_file(get_abs_path("data/numbers/zero.tsv"))
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv"))
graph_ties = pynini.string_file(get_abs_path("data/numbers/ties.tsv"))
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv"))
self.graph_two_digit = graph_teen | ((graph_ties) + delete_space + (graph_digit | pynutil.insert("0")))
graph_hundred = pynini.cross("hundred", "")
graph_hundred_component = pynini.union(graph_digit + delete_space + graph_hundred, pynutil.insert("0"))
graph_hundred_component += delete_space
graph_hundred_component += pynini.union(
graph_teen | pynutil.insert("00"),
(graph_ties | pynutil.insert("0")) + delete_space + (graph_digit | pynutil.insert("0")),
)
graph_hundred_component_at_least_one_none_zero_digit = graph_hundred_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
self.graph_hundred_component_at_least_one_none_zero_digit = (
graph_hundred_component_at_least_one_none_zero_digit
)
# Transducer for eleven hundred -> 1100 or twenty one hundred eleven -> 2111
graph_hundred_as_thousand = pynini.union(graph_teen, graph_ties + delete_space + graph_digit)
graph_hundred_as_thousand += delete_space + graph_hundred
graph_hundred_as_thousand += delete_space + pynini.union(
graph_teen | pynutil.insert("00"),
(graph_ties | pynutil.insert("0")) + delete_space + (graph_digit | pynutil.insert("0")),
)
graph_hundreds = graph_hundred_component | graph_hundred_as_thousand
graph_ties_component = pynini.union(
graph_teen | pynutil.insert("00"),
(graph_ties | pynutil.insert("0")) + delete_space + (graph_digit | pynutil.insert("0")),
)
graph_ties_component_at_least_one_none_zero_digit = graph_ties_component @ (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT)
)
self.graph_ties_component_at_least_one_none_zero_digit = graph_ties_component_at_least_one_none_zero_digit
# %%% International numeric format
graph_thousands = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("thousand"),
pynutil.insert("000", weight=0.1),
)
graph_million = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("million"),
pynutil.insert("000", weight=0.1),
)
graph_billion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("billion"),
pynutil.insert("000", weight=0.1),
)
graph_trillion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("trillion"),
pynutil.insert("000", weight=0.1),
)
graph_quadrillion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("quadrillion"),
pynutil.insert("000", weight=0.1),
)
graph_quintillion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("quintillion"),
pynutil.insert("000", weight=0.1),
)
graph_sextillion = pynini.union(
graph_hundred_component_at_least_one_none_zero_digit + delete_space + self.delete_word("sextillion"),
pynutil.insert("000", weight=0.1),
)
# %%%
graph_int = (
graph_sextillion
+ delete_space
+ graph_quintillion
+ delete_space
+ graph_quadrillion
+ delete_space
+ graph_trillion
+ delete_space
+ graph_billion
+ delete_space
+ graph_million
+ delete_space
+ graph_thousands
)
# %% Indian numeric format simple https://en.wikipedia.org/wiki/Indian_numbering_system
# This only covers "standard format".
# Conventional format like thousand crores/lakh crores is yet to be implemented
graph_in_thousands = pynini.union(
graph_ties_component_at_least_one_none_zero_digit + delete_space + pynutil.delete("thousand"),
pynutil.insert("00", weight=0.1),
)
graph_in_lakhs = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("lakh") | pynutil.delete("lakhs")),
pynutil.insert("00", weight=0.1),
)
graph_in_crores = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("crore") | pynutil.delete("crores")),
pynutil.insert("00", weight=0.1),
)
graph_in_arabs = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("arab") | pynutil.delete("arabs")),
pynutil.insert("00", weight=0.1),
)
graph_in_kharabs = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("kharab") | pynutil.delete("kharabs")),
pynutil.insert("00", weight=0.1),
)
graph_in_nils = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("nil") | pynutil.delete("nils")),
pynutil.insert("00", weight=0.1),
)
graph_in_padmas = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("padma") | pynutil.delete("padmas")),
pynutil.insert("00", weight=0.1),
)
graph_in_shankhs = pynini.union(
graph_ties_component_at_least_one_none_zero_digit
+ delete_space
+ (pynutil.delete("shankh") | pynutil.delete("shankhs")),
pynutil.insert("00", weight=0.1),
)
graph_ind = (
graph_in_shankhs
+ delete_space
+ graph_in_padmas
+ delete_space
+ graph_in_nils
+ delete_space
+ graph_in_kharabs
+ delete_space
+ graph_in_arabs
+ delete_space
+ graph_in_crores
+ delete_space
+ graph_in_lakhs
+ delete_space
+ graph_in_thousands
)
graph = pynini.union((graph_int | graph_ind) + delete_space + graph_hundreds, graph_zero,)
graph = graph @ pynini.union(
pynutil.delete(pynini.closure("0")) + pynini.difference(NEMO_DIGIT, "0") + pynini.closure(NEMO_DIGIT), "0"
)
labels_exception = [num_to_word(x) for x in range(0, 13)]
if input_case == INPUT_CASED:
labels_exception += [x.capitalize() for x in labels_exception]
graph_exception = pynini.union(*labels_exception).optimize()
graph = (
pynini.cdrewrite(pynutil.delete("and"), NEMO_SPACE, NEMO_SPACE, NEMO_SIGMA)
@ (NEMO_ALPHA + NEMO_SIGMA)
@ graph
).optimize()
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph)
self.graph_no_exception = graph
self.graph = (pynini.project(graph, "input") - graph_exception.arcsort()) @ graph
optional_minus_graph = pynini.closure(
pynutil.insert("negative: ") + pynini.cross(MINUS, "\"-\"") + NEMO_SPACE, 0, 1
)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
def delete_word(self, word: str):
""" Capitalizes word for `cased` input"""
delete_graph = pynutil.delete(word).optimize()
if self.input_case == INPUT_CASED:
if len(word) > 0:
delete_graph |= pynutil.delete(word[0].upper() + word[1:])
return delete_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path, get_various_formats
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
MIN_POS_WEIGHT,
NEMO_ALPHA,
GraphFst,
capitalized_input_graph,
insert_space,
)
from nemo_text_processing.text_normalization.en.utils import load_labels
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. c d f one at a b c dot e d u -> tokens { electronic { username: "cdf1" domain: "abc.edu" } }
Args:
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, input_case: str = INPUT_LOWER_CASED):
super().__init__(name="electronic", kind="classify")
delete_extra_space = pynutil.delete(" ")
num = pynini.string_file(get_abs_path("data/numbers/digit.tsv")) | pynini.string_file(
get_abs_path("data/numbers/zero.tsv")
)
if input_case == INPUT_CASED:
num = capitalized_input_graph(num)
alpha_num = (NEMO_ALPHA | num).optimize()
url_symbols = pynini.string_file(get_abs_path("data/electronic/url_symbols.tsv")).invert()
accepted_username = alpha_num | url_symbols
process_dot = pynini.cross("dot", ".")
alternative_dot = (
pynini.closure(delete_extra_space, 0, 1) + pynini.accep(".") + pynini.closure(delete_extra_space, 0, 1)
)
username = (alpha_num + pynini.closure(delete_extra_space + accepted_username)) | pynutil.add_weight(
pynini.closure(NEMO_ALPHA, 1), weight=0.0001
)
username = pynutil.insert("username: \"") + username + pynutil.insert("\"")
single_alphanum = pynini.closure(alpha_num + delete_extra_space) + alpha_num
server = (
single_alphanum
| pynini.string_file(get_abs_path("data/electronic/server_name.tsv"))
| pynini.closure(NEMO_ALPHA, 2)
)
if input_case == INPUT_CASED:
domain = []
# get domain formats
for d in load_labels(get_abs_path("data/electronic/domain.tsv")):
domain.extend(get_various_formats(d[0]))
domain = pynini.string_map(domain).optimize()
else:
domain = pynini.string_file(get_abs_path("data/electronic/domain.tsv"))
domain = single_alphanum | domain | pynini.closure(NEMO_ALPHA, 2)
domain_graph = (
pynutil.insert("domain: \"")
+ server
+ ((delete_extra_space + process_dot + delete_extra_space) | alternative_dot)
+ domain
+ pynutil.insert("\"")
)
graph = username + delete_extra_space + pynutil.delete("at") + insert_space + delete_extra_space + domain_graph
############# url ###
if input_case == INPUT_CASED:
protocol_end = pynini.cross(pynini.union(*get_various_formats("www")), "www")
protocol_start = pynini.cross(pynini.union(*get_various_formats("http")), "http") | pynini.cross(
pynini.union(*get_various_formats("https")), "https"
)
else:
protocol_end = pynini.cross(pynini.union("w w w", "www"), "www")
protocol_start = pynini.cross("h t t p", "http") | pynini.cross("h t t p s", "https")
protocol_start += pynini.cross(" colon slash slash ", "://")
# .com,
ending = (
delete_extra_space
+ url_symbols
+ delete_extra_space
+ (domain | pynini.closure(accepted_username + delete_extra_space,) + accepted_username)
)
protocol_default = (
(
(pynini.closure(delete_extra_space + accepted_username, 1) | server)
| pynutil.add_weight(pynini.closure(NEMO_ALPHA, 1), weight=0.0001)
)
+ pynini.closure(ending, 1)
).optimize()
protocol = (
pynini.closure(protocol_start, 0, 1) + protocol_end + delete_extra_space + process_dot + protocol_default
).optimize()
if input_case == INPUT_CASED:
protocol |= (
pynini.closure(protocol_start, 0, 1) + protocol_end + alternative_dot + protocol_default
).optimize()
protocol |= pynini.closure(protocol_end + delete_extra_space + process_dot, 0, 1) + protocol_default
protocol = pynutil.insert("protocol: \"") + protocol.optimize() + pynutil.insert("\"")
graph |= protocol
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph, capitalized_graph_weight=MIN_POS_WEIGHT)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.utils import get_abs_path
from nemo_text_processing.text_normalization.en.graph_utils import (
INPUT_CASED,
INPUT_LOWER_CASED,
NEMO_ALPHA,
NEMO_DIGIT,
GraphFst,
capitalized_input_graph,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
graph_teen = pynini.string_file(get_abs_path("data/numbers/teen.tsv")).optimize()
graph_digit = pynini.string_file(get_abs_path("data/numbers/digit.tsv")).optimize()
ties_graph = pynini.string_file(get_abs_path("data/numbers/ties.tsv")).optimize()
def _get_month_graph(input_case: str = INPUT_LOWER_CASED):
"""
Transducer for month, e.g. march -> march
"""
month_graph = pynini.string_file(get_abs_path("data/months.tsv"))
if input_case == INPUT_CASED:
month_graph |= pynini.string_file(get_abs_path("data/months_cased.tsv"))
return month_graph
def _get_ties_graph(input_case: str):
"""
Transducer for 20-99 e.g
twenty three -> 23
"""
graph = ties_graph + (delete_space + graph_digit | pynutil.insert("0"))
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph)
return graph
def _get_range_graph(input_case: str):
"""
Transducer for decades (1**0s, 2**0s), centuries (2*00s, 1*00s), millennia (2000s)
"""
graph_ties = _get_ties_graph(input_case=input_case)
graph = (graph_ties | graph_teen) + delete_space + pynini.cross("hundreds", "00s")
graph |= pynini.cross("two", "2") + delete_space + pynini.cross("thousands", "000s")
graph |= (
(graph_ties | graph_teen)
+ delete_space
+ (pynini.closure(NEMO_ALPHA, 1) + (pynini.cross("ies", "y") | pynutil.delete("s")))
@ (graph_ties | pynini.cross("ten", "10"))
+ pynutil.insert("s")
)
graph @= pynini.union("1", "2") + NEMO_DIGIT + NEMO_DIGIT + NEMO_DIGIT + "s"
graph = capitalized_input_graph(graph)
return graph
def _get_year_graph(input_case: str):
"""
Transducer for year, e.g. twenty twenty -> 2020
"""
def _get_digits_graph():
zero = pynini.cross((pynini.accep("oh") | pynini.accep("o")), "0")
graph = zero + delete_space + graph_digit
graph.optimize()
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph)
return graph
def _get_thousands_graph():
graph_ties = _get_ties_graph(input_case)
graph_hundred_component = (graph_digit + delete_space + pynutil.delete("hundred")) | pynutil.insert("0")
optional_end = pynini.closure(pynutil.delete("and "), 0, 1)
graph = (
graph_digit
+ delete_space
+ pynutil.delete("thousand")
+ delete_space
+ graph_hundred_component
+ delete_space
+ (graph_teen | graph_ties | (optional_end + pynutil.insert("0") + graph_digit))
)
if input_case == INPUT_CASED:
graph = capitalized_input_graph(graph)
return graph
graph_ties = _get_ties_graph(input_case=input_case)
graph_digits = _get_digits_graph()
graph_thousands = _get_thousands_graph()
graph_ad_bc = delete_space + pynini.string_file(get_abs_path("data/year_suffix.tsv")).invert()
year_graph = (
# 20 19, 40 12, 2012 - assuming no limit on the year
(graph_teen + delete_space + (graph_ties | graph_digits | graph_teen))
| (graph_ties + delete_space + (graph_ties | graph_digits | graph_teen))
| graph_thousands
| ((graph_digit + delete_space + (graph_ties | graph_digits | graph_teen)) + graph_ad_bc)
| ((graph_digit | graph_teen | graph_digits | graph_ties | graph_thousands) + delete_space + graph_ad_bc)
| ((graph_ties + delete_space + (graph_ties | graph_digits | graph_teen)) + delete_space + graph_ad_bc)
| (
(
(graph_teen | graph_digit)
+ delete_space
+ pynutil.delete("hundred")
+ pynutil.insert("0")
+ (graph_digit | pynutil.insert("0"))
)
+ delete_space
+ graph_ad_bc
)
)
year_graph.optimize()
if input_case == INPUT_CASED:
year_graph = capitalized_input_graph(year_graph)
return year_graph
class DateFst(GraphFst):
"""
Finite state transducer for classifying date,
e.g. january fifth twenty twelve -> date { month: "january" day: "5" year: "2012" preserve_order: true }
e.g. the fifth of january twenty twelve -> date { day: "5" month: "january" year: "2012" preserve_order: true }
e.g. twenty twenty -> date { year: "2012" preserve_order: true }
Args:
ordinal: OrdinalFst
input_case: accepting either "lower_cased" or "cased" input.
"""
def __init__(self, ordinal: GraphFst, input_case: str):
super().__init__(name="date", kind="classify")
ordinal_graph = ordinal.graph
year_graph = _get_year_graph(input_case=input_case)
YEAR_WEIGHT = 0.001
year_graph = pynutil.add_weight(year_graph, YEAR_WEIGHT)
month_graph = _get_month_graph(input_case=input_case)
month_graph = pynutil.insert("month: \"") + month_graph + pynutil.insert("\"")
day_graph = pynutil.insert("day: \"") + pynutil.add_weight(ordinal_graph, -0.7) + pynutil.insert("\"")
graph_year = (
delete_extra_space
+ pynutil.insert("year: \"")
+ pynutil.add_weight(year_graph, -YEAR_WEIGHT)
+ pynutil.insert("\"")
)
optional_graph_year = pynini.closure(graph_year, 0, 1,)
graph_mdy = month_graph + (
(delete_extra_space + day_graph) | graph_year | (delete_extra_space + day_graph + graph_year)
)
the_graph = pynutil.delete("the")
if input_case == INPUT_CASED:
the_graph |= pynutil.delete("The").optimize()
graph_dmy = (
the_graph
+ delete_space
+ day_graph
+ delete_space
+ pynutil.delete("of")
+ delete_extra_space
+ month_graph
+ optional_graph_year
)
financial_period_graph = pynini.string_file(get_abs_path("data/date_period.tsv")).invert()
period_fy = (
pynutil.insert("text: \"")
+ financial_period_graph
+ (pynini.cross(" ", "") | pynini.cross(" of ", ""))
+ pynutil.insert("\"")
)
graph_year = (
pynutil.insert("year: \"") + (year_graph | _get_range_graph(input_case=input_case)) + pynutil.insert("\"")
)
graph_fy = period_fy + pynutil.insert(" ") + graph_year
final_graph = graph_mdy | graph_dmy | graph_year | graph_fy
final_graph += pynutil.insert(" preserve_order: true")
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying plain tokens, that do not belong to any special class. This can be considered as the default class.
e.g. sleep -> tokens { name: "sleep" }
"""
def __init__(self):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/taggers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "12" minutes: "30" } -> 12:30
time { hours: "1" minutes: "12" } -> 01:12
time { hours: "2" suffix: "a.m." } -> 02:00 a.m.
"""
def __init__(self):
super().__init__(name="time", kind="verbalize")
add_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (pynutil.insert("0") + NEMO_DIGIT)
hour = (
pynutil.delete("hours:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
minute = (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_DIGIT, 1)
+ pynutil.delete("\"")
)
suffix = (
delete_space
+ insert_space
+ pynutil.delete("suffix:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_suffix = pynini.closure(suffix, 0, 1)
zone = (
delete_space
+ insert_space
+ pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(zone, 0, 1)
graph = (
hour @ add_leading_zero_to_double_digit
+ delete_space
+ pynutil.insert(":")
+ (minute @ add_leading_zero_to_double_digit)
+ optional_suffix
+ optional_zone
)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { negative: "true" cardinal { integer: "12" } units: "kg" } -> -12 kg
Args:
decimal: DecimalFst
cardinal: CardinalFst
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst):
super().__init__(name="measure", kind="verbalize")
optional_sign = pynini.closure(pynini.cross("negative: \"true\"", "-"), 0, 1)
unit = (
pynutil.delete("units:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
+ delete_space
)
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal) + delete_space + pynutil.insert(" ") + unit
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction,
"""
def __init__(self):
super().__init__(name="fraction", kind="verbalize")
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone, e.g.
telephone { number_part: "123-123-5678" }
-> 123-123-5678
"""
def __init__(self):
super().__init__(name="telephone", kind="verbalize")
number_part = pynutil.delete("number_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_country_code = pynini.closure(
pynutil.delete("country_code: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
+ pynini.accep(" "),
0,
1,
)
delete_tokens = self.delete_tokens(optional_country_code + number_part)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "13" } -> 13th
"""
def __init__(self):
super().__init__(name="ordinal", kind="verbalize")
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
convert_eleven = pynini.cross("11", "11th")
convert_twelve = pynini.cross("12", "12th")
convert_thirteen = pynini.cross("13", "13th")
convert_one = pynini.cross("1", "1st")
convert_two = pynini.cross("2", "2nd")
convert_three = pynini.cross("3", "3rd")
convert_rest = pynutil.insert("th", weight=0.01)
suffix = pynini.cdrewrite(
convert_eleven
| convert_twelve
| convert_thirteen
| convert_one
| convert_two
| convert_three
| convert_rest,
"",
"[EOS]",
NEMO_SIGMA,
)
graph = graph @ suffix
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.inverse_text_normalization.en.verbalizers.cardinal import CardinalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.date import DateFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.decimal import DecimalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.electronic import ElectronicFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.measure import MeasureFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.money import MoneyFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.telephone import TelephoneFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.time import TimeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
"""
def __init__(self):
super().__init__(name="verbalize", kind="verbalize")
cardinal = CardinalFst()
cardinal_graph = cardinal.fst
ordinal_graph = OrdinalFst().fst
decimal = DecimalFst()
decimal_graph = decimal.fst
measure_graph = MeasureFst(decimal=decimal, cardinal=cardinal).fst
money_graph = MoneyFst(decimal=decimal).fst
time_graph = TimeFst().fst
date_graph = DateFst().fst
whitelist_graph = WhiteListFst().fst
telephone_graph = TelephoneFst().fst
electronic_graph = ElectronicFst().fst
graph = (
time_graph
| date_graph
| money_graph
| measure_graph
| ordinal_graph
| decimal_graph
| cardinal_graph
| whitelist_graph
| telephone_graph
| electronic_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for verbalizing whitelist
e.g. tokens { name: "mrs." } -> mrs.
"""
def __init__(self):
super().__init__(name="whitelist", kind="verbalize")
graph = (
pynutil.delete("name:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = graph @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.inverse_text_normalization.en.verbalizers.verbalize import VerbalizeFst
from nemo_text_processing.inverse_text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence, e.g.
tokens { name: "its" } tokens { time { hours: "12" minutes: "30" } } tokens { name: "now" } -> its 12:30 now
"""
def __init__(self):
super().__init__(name="verbalize_final", kind="verbalize")
verbalize = VerbalizeFst().fst
word = WordFst().fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class DecimalFst(GraphFst):
"""
Finite state transducer for verbalizing decimal, e.g.
decimal { negative: "true" integer_part: "12" fractional_part: "5006" quantity: "billion" } -> -12.5006 billion
"""
def __init__(self):
super().__init__(name="decimal", kind="verbalize")
optionl_sign = pynini.closure(pynini.cross("negative: \"true\"", "-") + delete_space, 0, 1)
integer = (
pynutil.delete("integer_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_integer = pynini.closure(integer + delete_space, 0, 1)
fractional = (
pynutil.insert(".")
+ pynutil.delete("fractional_part:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_fractional = pynini.closure(fractional + delete_space, 0, 1)
quantity = (
pynutil.delete("quantity:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_quantity = pynini.closure(pynutil.insert(" ") + quantity + delete_space, 0, 1)
graph = optional_integer + optional_fractional + optional_quantity
self.numbers = graph
graph = optionl_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, GraphFst, delete_space
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { integer_part: "12" fractional_part: "05" currency: "$" } -> $12.05
Args:
decimal: DecimalFst
"""
def __init__(self, decimal: GraphFst):
super().__init__(name="money", kind="verbalize")
unit = (
pynutil.delete("currency:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_CHAR - " ", 1)
+ pynutil.delete("\"")
)
graph = unit + delete_space + decimal.numbers
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/money.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class CardinalFst(GraphFst):
"""
Finite state transducer for verbalizing cardinal
e.g. cardinal { integer: "23" negative: "-" } -> -23
"""
def __init__(self):
super().__init__(name="cardinal", kind="verbalize")
optional_sign = pynini.closure(
pynutil.delete("negative:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space,
0,
1,
)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.numbers = graph
graph = optional_sign + graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for verbalizing electronic
e.g. tokens { electronic { username: "cdf1" domain: "abc.edu" } } -> [email protected]
"""
def __init__(self):
super().__init__(name="electronic", kind="verbalize")
user_name = (
pynutil.delete("username:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
domain = (
pynutil.delete("domain:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
protocol = (
pynutil.delete("protocol:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph = user_name + delete_space + pynutil.insert("@") + domain
graph |= protocol
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/electronic.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
GraphFst,
delete_extra_space,
delete_space,
)
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for verbalizing date, e.g.
date { month: "january" day: "5" year: "2012" preserve_order: true } -> february 5 2012
date { day: "5" month: "january" year: "2012" preserve_order: true } -> 5 february 2012
"""
def __init__(self):
super().__init__(name="date", kind="verbalize")
month = (
pynutil.delete("month:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
day = (
pynutil.delete("day:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
year = (
pynutil.delete("year:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ delete_space
+ pynutil.delete("\"")
)
period = (
pynutil.delete("text:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
graph_fy = period + pynini.closure(delete_extra_space + year, 0, 1)
# month (day) year
graph_mdy = (
month + pynini.closure(delete_extra_space + day, 0, 1) + pynini.closure(delete_extra_space + year, 0, 1)
)
# (day) month year
graph_dmy = (
pynini.closure(day + delete_extra_space, 0, 1) + month + pynini.closure(delete_extra_space + year, 0, 1)
)
optional_preserve_order = pynini.closure(
pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
| pynutil.delete("field_order:")
+ delete_space
+ pynutil.delete("\"")
+ NEMO_NOT_QUOTE
+ pynutil.delete("\"")
+ delete_space
)
final_graph = (graph_mdy | year | graph_dmy | graph_fy) + delete_space + optional_preserve_order
delete_tokens = self.delete_tokens(final_graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/date.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_CHAR, NEMO_SIGMA, GraphFst, delete_space
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for verbalizing plain tokens
e.g. tokens { name: "sleep" } -> sleep
"""
def __init__(self):
super().__init__(name="word", kind="verbalize")
chars = pynini.closure(NEMO_CHAR - " ", 1)
char = pynutil.delete("name:") + delete_space + pynutil.delete("\"") + chars + pynutil.delete("\"")
graph = char @ pynini.cdrewrite(pynini.cross(u"\u00A0", " "), "", "", NEMO_SIGMA)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/verbalizers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/data/numbers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/data/ordinals/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/data/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/inverse_text_normalization/en/data/time/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/g2p/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/g2p/data/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from argparse import ArgumentParser
from time import perf_counter
from typing import List, Optional, Tuple
import editdistance
import pynini
from nemo_text_processing.text_normalization.data_loader_utils import post_process_punct, pre_process
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.utils_audio_based import get_alignment
from pynini.lib import rewrite
"""
The script provides multiple normalization options and chooses the best one that minimizes CER of the ASR output
(most of the semiotic classes use deterministic=False flag).
To run this script with a .json manifest file, the manifest file should contain the following fields:
"text" - raw text (could be changed using "--manifest_text_field")
"pred_text" - ASR model prediction, see https://github.com/NVIDIA/NeMo/blob/main/examples/asr/transcribe_speech.py
on how to transcribe manifest
Example for a manifest line:
{"text": "In December 2015, ...", "pred_txt": "on december two thousand fifteen"}
When the manifest is ready, run:
python normalize_with_audio.py \
--manifest PATH/TO/MANIFEST.JSON \
--language en \
--output_filename=<PATH TO OUTPUT .JSON MANIFEST> \
--n_jobs=-1 \
--batch_size=300 \
--manifest_text_field="text"
To see possible normalization options for a text input without an audio file (could be used for debugging), run:
python python normalize_with_audio.py --text "RAW TEXT"
Specify `--cache_dir` to generate .far grammars once and re-used them for faster inference
"""
class NormalizerWithAudio(Normalizer):
"""
Normalizer class that converts text from written to spoken form.
Useful for TTS preprocessing.
Args:
input_case: expected input capitalization
lang: language
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
post_process: WFST-based post processing, e.g. to remove extra spaces added during TN.
Note: punct_post_process flag in normalize() supports all languages.
max_number_of_permutations_per_split: a maximum number
of permutations which can be generated from input sequence of tokens.
"""
def __init__(
self,
input_case: str,
lang: str = 'en',
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
lm: bool = False,
post_process: bool = True,
max_number_of_permutations_per_split: int = 729,
):
# initialize non-deterministic normalizer
super().__init__(
input_case=input_case,
lang=lang,
deterministic=False,
cache_dir=cache_dir,
overwrite_cache=overwrite_cache,
whitelist=whitelist,
lm=lm,
post_process=post_process,
)
self.tagger_non_deterministic = self.tagger
self.verbalizer_non_deterministic = self.verbalizer
if lang != "ru":
# initialize deterministic normalizer
super().__init__(
input_case=input_case,
lang=lang,
deterministic=True,
cache_dir=cache_dir,
overwrite_cache=overwrite_cache,
whitelist=whitelist,
lm=lm,
post_process=post_process,
max_number_of_permutations_per_split=max_number_of_permutations_per_split,
)
else:
self.tagger, self.verbalizer = None, None
self.lm = lm
def normalize(
self,
text: str,
n_tagged: int,
punct_post_process: bool = True,
verbose: bool = False,
pred_text: Optional[str] = None,
cer_threshold: float = -1,
**kwargs,
) -> str:
"""
Main function. Normalizes tokens from written to spoken form
e.g. 12 kg -> twelve kilograms
Args:
text: string that may include semiotic classes
n_tagged: number of tagged options to consider, -1 - to get all possible tagged options
punct_post_process: whether to normalize punctuation
verbose: whether to print intermediate meta information
pred_text: ASR model transcript
cer_threshold: if CER for pred_text and the normalization option is above the cer_threshold,
default deterministic normalization will be used. Set to -1 to disable cer-based filtering.
Specify the value in %, e.g. 100 not 1.
Returns:
normalized text options (usually there are multiple ways of normalizing a given semiotic class)
"""
if pred_text is None or pred_text == "" or self.tagger is None:
return self.normalize_non_deterministic(
text=text, n_tagged=n_tagged, punct_post_process=punct_post_process, verbose=verbose
)
try:
det_norm = super().normalize(
text=text, verbose=verbose, punct_pre_process=False, punct_post_process=punct_post_process
)
except RecursionError:
raise RecursionError(f"RecursionError. Try decreasing --max_number_of_permutations_per_split")
semiotic_spans, pred_text_spans, norm_spans, text_with_span_tags_list, masked_idx_list = get_alignment(
text, det_norm, pred_text, verbose=False
)
sem_tag_idx = 0
for cur_semiotic_span, cur_pred_text, cur_deter_norm in zip(semiotic_spans, pred_text_spans, norm_spans):
if len(cur_semiotic_span) == 0:
text_with_span_tags_list[masked_idx_list[sem_tag_idx]] = ""
else:
non_deter_options = self.normalize_non_deterministic(
text=cur_semiotic_span, n_tagged=n_tagged, punct_post_process=punct_post_process, verbose=verbose,
)
try:
best_option, cer, _ = self.select_best_match(
normalized_texts=non_deter_options, pred_text=cur_pred_text, verbose=verbose,
)
if cer_threshold > 0 and cer > cer_threshold:
best_option = cur_deter_norm
if verbose and True:
print(
f"CER of the best normalization option is above cer_theshold, using determinictis option. CER: {cer}"
)
except:
# fall back to the default normalization option
best_option = cur_deter_norm
text_with_span_tags_list[masked_idx_list[sem_tag_idx]] = best_option
sem_tag_idx += 1
normalized_text = " ".join(text_with_span_tags_list)
return normalized_text.replace(" ", " ")
def normalize_non_deterministic(
self, text: str, n_tagged: int, punct_post_process: bool = True, verbose: bool = False
):
# get deterministic option
if self.tagger:
deterministic_form = super().normalize(
text=text, verbose=verbose, punct_pre_process=False, punct_post_process=punct_post_process
)
else:
deterministic_form = None
original_text = text
text = pre_process(text) # to handle []
text = text.strip()
if not text:
if verbose:
print(text)
return text
text = pynini.escape(text)
if self.lm:
if self.lang not in ["en"]:
raise ValueError(f"{self.lang} is not supported in LM mode")
if self.lang == "en":
# this to keep arpabet phonemes in the list of options
if "[" in text and "]" in text:
lattice = rewrite.rewrite_lattice(text, self.tagger_non_deterministic.fst)
else:
try:
lattice = rewrite.rewrite_lattice(text, self.tagger_non_deterministic.fst_no_digits)
except pynini.lib.rewrite.Error:
lattice = rewrite.rewrite_lattice(text, self.tagger_non_deterministic.fst)
lattice = rewrite.lattice_to_nshortest(lattice, n_tagged)
tagged_texts = [(x[1], float(x[2])) for x in lattice.paths().items()]
tagged_texts.sort(key=lambda x: x[1])
tagged_texts, weights = list(zip(*tagged_texts))
else:
tagged_texts = self._get_tagged_text(text, n_tagged)
# non-deterministic Eng normalization uses tagger composed with verbalizer, no permutation in between
if self.lang == "en":
normalized_texts = tagged_texts
normalized_texts = [self.post_process(text) for text in normalized_texts]
else:
normalized_texts = []
for tagged_text in tagged_texts:
self._verbalize(tagged_text, normalized_texts, n_tagged, verbose=verbose)
if len(normalized_texts) == 0:
raise ValueError()
if punct_post_process:
# do post-processing based on Moses detokenizer
if self.moses_detokenizer:
normalized_texts = [self.moses_detokenizer.detokenize([t]) for t in normalized_texts]
normalized_texts = [
post_process_punct(input=original_text, normalized_text=t) for t in normalized_texts
]
if self.lm:
remove_dup = sorted(list(set(zip(normalized_texts, weights))), key=lambda x: x[1])
normalized_texts, weights = zip(*remove_dup)
return list(normalized_texts), weights
if deterministic_form is not None:
normalized_texts.append(deterministic_form)
normalized_texts = set(normalized_texts)
return normalized_texts
def normalize_line(
self,
n_tagged: int,
line: str,
verbose: bool = False,
punct_pre_process=False,
punct_post_process=True,
text_field: str = "text",
asr_pred_field: str = "pred_text",
output_field: str = "normalized",
cer_threshold: float = -1,
):
"""
Normalizes "text_field" in line from a .json manifest
Args:
n_tagged: number of normalization options to return
line: line of a .json manifest
verbose: set to True to see intermediate output of normalization
punct_pre_process: set to True to do punctuation pre-processing
punct_post_process: set to True to do punctuation post-processing
text_field: name of the field in the manifest to normalize
asr_pred_field: name of the field in the manifest with ASR predictions
output_field: name of the field in the manifest to save normalized text
cer_threshold: if CER for pred_text and the normalization option is above the cer_threshold,
default deterministic normalization will be used. Set to -1 to disable cer-based filtering.
Specify the value in %, e.g. 100 not 1.
"""
line = json.loads(line)
normalized_text = self.normalize(
text=line["text"],
verbose=verbose,
n_tagged=n_tagged,
punct_post_process=punct_post_process,
pred_text=line[asr_pred_field],
cer_threshold=cer_threshold,
)
line[output_field] = normalized_text
return line
def _get_tagged_text(self, text, n_tagged):
"""
Returns text after tokenize and classify
Args;
text: input text
n_tagged: number of tagged options to consider, -1 - return all possible tagged options
"""
if n_tagged == -1:
if self.lang == "en":
# this to keep arpabet phonemes in the list of options
if "[" in text and "]" in text:
tagged_texts = rewrite.rewrites(text, self.tagger_non_deterministic.fst)
else:
try:
tagged_texts = rewrite.rewrites(text, self.tagger_non_deterministic.fst_no_digits)
except pynini.lib.rewrite.Error:
tagged_texts = rewrite.rewrites(text, self.tagger_non_deterministic.fst)
else:
tagged_texts = rewrite.rewrites(text, self.tagger_non_deterministic.fst)
else:
if self.lang == "en":
# this to keep arpabet phonemes in the list of options
if "[" in text and "]" in text:
tagged_texts = rewrite.top_rewrites(text, self.tagger_non_deterministic.fst, nshortest=n_tagged)
else:
try:
# try self.tagger graph that produces output without digits
tagged_texts = rewrite.top_rewrites(
text, self.tagger_non_deterministic.fst_no_digits, nshortest=n_tagged
)
except pynini.lib.rewrite.Error:
tagged_texts = rewrite.top_rewrites(
text, self.tagger_non_deterministic.fst, nshortest=n_tagged
)
else:
tagged_texts = rewrite.top_rewrites(text, self.tagger_non_deterministic.fst, nshortest=n_tagged)
return tagged_texts
def _verbalize(self, tagged_text: str, normalized_texts: List[str], n_tagged: int, verbose: bool = False):
"""
Verbalizes tagged text
Args:
tagged_text: text with tags
normalized_texts: list of possible normalization options
verbose: if true prints intermediate classification results
"""
def get_verbalized_text(tagged_text):
return rewrite.top_rewrites(tagged_text, self.verbalizer_non_deterministic.fst, n_tagged)
self.parser(tagged_text)
tokens = self.parser.parse()
tags_reordered = self.generate_permutations(tokens)
for tagged_text_reordered in tags_reordered:
try:
tagged_text_reordered = pynini.escape(tagged_text_reordered)
normalized_texts.extend(get_verbalized_text(tagged_text_reordered))
if verbose:
print(tagged_text_reordered)
except pynini.lib.rewrite.Error:
continue
def select_best_match(
self, normalized_texts: List[str], pred_text: str, verbose: bool = False, remove_punct: bool = False,
):
"""
Selects the best normalization option based on the lowest CER
Args:
normalized_texts: normalized text options
pred_text: ASR model transcript of the audio file corresponding to the normalized text
verbose: whether to print intermediate meta information
remove_punct: whether to remove punctuation before calculating CER
Returns:
normalized text with the lowest CER and CER value
"""
normalized_texts_cer = calculate_cer(normalized_texts, pred_text, remove_punct)
normalized_texts_cer = sorted(normalized_texts_cer, key=lambda x: x[1])
normalized_text, cer, idx = normalized_texts_cer[0]
if verbose:
print('-' * 30)
for option in normalized_texts:
print(option)
print('-' * 30)
return normalized_text, cer, idx
def calculate_cer(normalized_texts: List[str], pred_text: str, remove_punct=False) -> List[Tuple[str, float]]:
"""
Calculates character error rate (CER)
Args:
normalized_texts: normalized text options
pred_text: ASR model output
Returns: normalized options with corresponding CER
"""
normalized_options = []
for i, text in enumerate(normalized_texts):
text_clean = text.replace('-', ' ').lower()
if remove_punct:
for punct in "!?:;,.-()*+-/<=>@^_":
text_clean = text_clean.replace(punct, " ").replace(" ", " ")
cer = editdistance.eval(pred_text, text_clean) * 100.0 / len(pred_text)
normalized_options.append((text, cer, i))
return normalized_options
def parse_args():
parser = ArgumentParser()
parser.add_argument("--text", help="input string or path to a .txt file", default=None, type=str)
parser.add_argument(
"--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str
)
parser.add_argument(
"--language", help="Select target language", choices=["en", "ru", "de", "es", "sv"], default="en", type=str
)
parser.add_argument("--manifest", default=None, help="path to .json manifest")
parser.add_argument(
"--output_filename",
default=None,
help="Path of where to save .json manifest with normalization outputs."
" It will only be saved if --manifest is a .json manifest.",
type=str,
)
parser.add_argument(
'--manifest_text_field',
help="A field in .json manifest to normalize (applicable only --manifest is specified)",
type=str,
default="text",
)
parser.add_argument(
'--manifest_asr_pred_field',
help="A field in .json manifest with ASR predictions (applicable only --manifest is specified)",
type=str,
default="pred_text",
)
parser.add_argument(
"--n_tagged",
type=int,
default=30,
help="number of tagged options to consider, -1 - return all possible tagged options",
)
parser.add_argument("--verbose", help="print info for debugging", action="store_true")
parser.add_argument(
"--no_remove_punct_for_cer",
help="Set to True to NOT remove punctuation before calculating CER",
action="store_true",
)
parser.add_argument(
"--no_punct_post_process", help="set to True to disable punctuation post processing", action="store_true"
)
parser.add_argument("--overwrite_cache", help="set to True to re-create .far grammar files", action="store_true")
parser.add_argument("--whitelist", help="path to a file with with whitelist", default=None, type=str)
parser.add_argument(
"--cache_dir",
help="path to a dir with .far grammar file. Set to None to avoid using cache",
default=None,
type=str,
)
parser.add_argument("--n_jobs", default=-2, type=int, help="The maximum number of concurrently running jobs")
parser.add_argument(
"--lm", action="store_true", help="Set to True for WFST+LM. Only available for English right now."
)
parser.add_argument(
"--cer_threshold",
default=-1,
type=float,
help="if CER for pred_text and the normalization option is above the cer_threshold, default deterministic normalization will be used. Set to -1 to disable cer-based filtering. Specify the value in %, e.g. 100 not 1.",
)
parser.add_argument("--batch_size", default=200, type=int, help="Number of examples for each process")
parser.add_argument(
"--max_number_of_permutations_per_split",
default=729,
type=int,
help="a maximum number of permutations which can be generated from input sequence of tokens.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
args.whitelist = os.path.abspath(args.whitelist) if args.whitelist else None
if args.text is not None:
normalizer = NormalizerWithAudio(
input_case=args.input_case,
lang=args.language,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=args.whitelist,
lm=args.lm,
max_number_of_permutations_per_split=args.max_number_of_permutations_per_split,
)
start = perf_counter()
if os.path.exists(args.text):
with open(args.text, 'r') as f:
args.text = f.read().strip()
options = normalizer.normalize(
text=args.text,
n_tagged=args.n_tagged,
punct_post_process=not args.no_punct_post_process,
verbose=args.verbose,
)
for option in options:
print(option)
elif args.manifest.endswith('.json'):
normalizer = NormalizerWithAudio(
input_case=args.input_case,
lang=args.language,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=args.whitelist,
max_number_of_permutations_per_split=args.max_number_of_permutations_per_split,
)
start = perf_counter()
normalizer.normalize_manifest(
manifest=args.manifest,
n_jobs=args.n_jobs,
punct_pre_process=True,
punct_post_process=not args.no_punct_post_process,
batch_size=args.batch_size,
output_filename=args.output_filename,
n_tagged=args.n_tagged,
text_field=args.manifest_text_field,
asr_pred_field=args.manifest_asr_pred_field,
cer_threshold=args.cer_threshold,
)
else:
raise ValueError(
"Provide either path to .json manifest with '--manifest' OR "
+ "an input text with '--text' (for debugging without audio)"
)
print(f'Execution time: {round((perf_counter() - start)/60, 2)} min.')
| NeMo-text-processing-main | nemo_text_processing/text_normalization/normalize_with_audio.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import json
import os
import re
import shutil
import sys
from argparse import ArgumentParser
from collections import OrderedDict
from glob import glob
from math import factorial
from time import perf_counter
from typing import Dict, List, Optional, Union
import pynini
import regex
import tqdm
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.data_loader_utils import (
load_file,
post_process_punct,
pre_process,
write_file,
)
from nemo_text_processing.text_normalization.preprocessing_utils import additional_split
from nemo_text_processing.text_normalization.token_parser import PRESERVE_ORDER_KEY, TokenParser
from pynini.lib.rewrite import top_rewrite
from sacremoses import MosesDetokenizer
from tqdm import tqdm
# this is to handle long input
sys.setrecursionlimit(3000)
SPACE_DUP = re.compile(' {2,}')
"""
To normalize a single entry:
python normalize.py --text=<INPUT_TEXT>
To normalize text in .json manifest:
python normalize.py \
--input_file=<PATH TO INPUT .JSON MANIFEST> \
--output_file=<PATH TO OUTPUT .JSON MANIFEST> \
--n_jobs=-1 \
--batch_size=300 \
--manifest_text_field="text" \
--whitelist=<PATH TO YOUR WHITELIST>
For a complete list of optional arguments, run:
>>> python normalize.py --help
To integrate Normalizer in your script:
>>> from nemo_text_processing.text_normalization.normalize import Normalizer
# see the script for args details
>>> normalizer_en = Normalizer(input_case='cased', lang='en', cache_dir=CACHE_DIR, overwrite_cache=False, post_process=True)
>>> normalizer_en.normalize("<INPUT_TEXT>")
# normalize list of entries
>>> normalizer_en.normalize_list(["<INPUT_TEXT1>", "<INPUT_TEXT2>"])
# normalize .json manifest entries
>>> normalizer_en.normalize_manifest(manifest=<PATH TO INPUT .JSON MANIFEST>, n_jobs=-1, batch_size=300,
output_filename=<PATH TO OUTPUT .JSON MANIFEST>, text_field="text",
punct_pre_process=False, punct_post_process=False)
"""
class Normalizer:
"""
Normalizer class that converts text from written to spoken form.
Useful for TTS preprocessing.
Args:
input_case: Input text capitalization, set to 'cased' if text contains capital letters.
This flag affects normalization rules applied to the text. Note, `lower_cased` won't lower case input.
lang: language specifying the TN rules, by default: English
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
post_process: WFST-based post processing, e.g. to remove extra spaces added during TN.
Note: punct_post_process flag in normalize() supports all languages.
max_number_of_permutations_per_split: a maximum number
of permutations which can be generated from input sequence of tokens.
"""
def __init__(
self,
input_case: str,
lang: str = 'en',
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
lm: bool = False,
post_process: bool = True,
max_number_of_permutations_per_split: int = 729,
):
assert input_case in ["lower_cased", "cased"]
self.post_processor = None
if lang == "en":
from nemo_text_processing.text_normalization.en.verbalizers.verbalize_final import VerbalizeFinalFst
from nemo_text_processing.text_normalization.en.verbalizers.post_processing import PostProcessingFst
if post_process:
self.post_processor = PostProcessingFst(cache_dir=cache_dir, overwrite_cache=overwrite_cache)
if deterministic:
from nemo_text_processing.text_normalization.en.taggers.tokenize_and_classify import ClassifyFst
else:
if lm:
from nemo_text_processing.text_normalization.en.taggers.tokenize_and_classify_lm import ClassifyFst
else:
from nemo_text_processing.text_normalization.en.taggers.tokenize_and_classify_with_audio import (
ClassifyFst,
)
elif lang == 'ru':
# Ru TN only support non-deterministic cases and produces multiple normalization options
# use normalize_with_audio.py
from nemo_text_processing.text_normalization.ru.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.ru.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'de':
from nemo_text_processing.text_normalization.de.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.de.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'es':
from nemo_text_processing.text_normalization.es.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.es.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'fr':
from nemo_text_processing.text_normalization.fr.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.fr.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'sv':
from nemo_text_processing.text_normalization.sv.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.sv.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'hu':
from nemo_text_processing.text_normalization.hu.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.hu.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'zh':
from nemo_text_processing.text_normalization.zh.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.zh.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'ar':
from nemo_text_processing.text_normalization.ar.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.ar.verbalizers.verbalize_final import VerbalizeFinalFst
elif lang == 'it':
from nemo_text_processing.text_normalization.it.taggers.tokenize_and_classify import ClassifyFst
from nemo_text_processing.text_normalization.it.verbalizers.verbalize_final import VerbalizeFinalFst
else:
raise NotImplementedError(f"Language {lang} has not been supported yet.")
self.input_case = input_case
self.tagger = ClassifyFst(
input_case=self.input_case,
deterministic=deterministic,
cache_dir=cache_dir,
overwrite_cache=overwrite_cache,
whitelist=whitelist,
)
self.verbalizer = VerbalizeFinalFst(
deterministic=deterministic, cache_dir=cache_dir, overwrite_cache=overwrite_cache
)
self.max_number_of_permutations_per_split = max_number_of_permutations_per_split
self.parser = TokenParser()
self.lang = lang
self.moses_detokenizer = MosesDetokenizer(lang=lang)
def normalize_list(
self,
texts: List[str],
verbose: bool = False,
punct_pre_process: bool = False,
punct_post_process: bool = False,
batch_size: int = 1,
n_jobs: int = 1,
**kwargs,
):
"""
NeMo text normalizer
Args:
texts: list of input strings
verbose: whether to print intermediate meta information
punct_pre_process: whether to do punctuation pre-processing
punct_post_process: whether to do punctuation post-processing
n_jobs: the maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
batch_size: Number of examples for each process
Returns converted list input strings
"""
def _process_batch(batch, verbose, punct_pre_process, punct_post_process, **kwargs):
"""
Normalizes batch of text sequences
Args:
batch: list of texts
verbose: whether to print intermediate meta information
punct_pre_process: whether to do punctuation pre-processing
punct_post_process: whether to do punctuation post-processing
"""
normalized_lines = [
self.normalize(
text,
verbose=verbose,
punct_pre_process=punct_pre_process,
punct_post_process=punct_post_process,
**kwargs,
)
for text in tqdm(batch)
]
return normalized_lines
# to save intermediate results to a file
batch = min(len(texts), batch_size)
try:
normalized_texts = Parallel(n_jobs=n_jobs)(
delayed(_process_batch)(texts[i : i + batch], verbose, punct_pre_process, punct_post_process, **kwargs)
for i in range(0, len(texts), batch)
)
except BaseException as e:
raise e
normalized_texts = list(itertools.chain(*normalized_texts))
return normalized_texts
def _estimate_number_of_permutations_in_nested_dict(
self, token_group: Dict[str, Union[OrderedDict, str, bool]]
) -> int:
num_perms = 1
for k, inner in token_group.items():
if isinstance(inner, dict):
num_perms *= self._estimate_number_of_permutations_in_nested_dict(inner)
num_perms *= factorial(len(token_group))
return num_perms
def _split_tokens_to_reduce_number_of_permutations(self, tokens: List[dict]) -> List[List[dict]]:
"""
Splits a sequence of tokens in a smaller sequences of tokens in a way that maximum number of composite
tokens permutations does not exceed ``max_number_of_permutations_per_split``.
For example,
.. code-block:: python
# setup normalizer with self.max_number_of_permutations_per_split=6
tokens = [
{"tokens": {"date": {"year": "twenty eighteen", "month": "december", "day": "thirty one"}}},
{"tokens": {"date": {"year": "twenty eighteen", "month": "january", "day": "eight"}}},
]
split = normalizer._split_tokens_to_reduce_number_of_permutations(tokens)
assert split == [
[{"tokens": {"date": {"year": "twenty eighteen", "month": "december", "day": "thirty one"}}}],
[{"tokens": {"date": {"year": "twenty eighteen", "month": "january", "day": "eight"}}}],
]
Date tokens contain 3 items each which gives 6 permutations for every date. Since there are 2 dates, total
number of permutations would be ``6 * 6 == 36``. Parameter ``self.max_number_of_permutations_per_split`` equals 6,
so input sequence of tokens is split into 2 smaller sequences.
Args:
tokens: a list of dictionaries, possibly nested.
Returns:
a list of smaller sequences of tokens resulting from ``tokens`` split.
"""
splits = []
prev_end_of_split = 0
current_number_of_permutations = 1
for i, token_group in enumerate(tokens):
n = self._estimate_number_of_permutations_in_nested_dict(token_group)
if n * current_number_of_permutations > self.max_number_of_permutations_per_split:
splits.append(tokens[prev_end_of_split:i])
prev_end_of_split = i
current_number_of_permutations = 1
if n > self.max_number_of_permutations_per_split:
raise ValueError(
f"Could not split token list with respect to condition that every split can generate number of "
f"permutations less or equal to "
f"`self.max_number_of_permutations_per_split={self.max_number_of_permutations_per_split}`. "
f"There is an unsplittable token group that generates more than "
f"{self.max_number_of_permutations_per_split} permutations. Try to increase "
f"`--max_number_of_permutations_per_split` parameter."
)
current_number_of_permutations *= n
splits.append(tokens[prev_end_of_split:])
assert sum([len(s) for s in splits]) == len(tokens)
return splits
def normalize(
self, text: str, verbose: bool = False, punct_pre_process: bool = False, punct_post_process: bool = False
) -> str:
"""
Main function. Normalizes tokens from written to spoken form
e.g. 12 kg -> twelve kilograms
Args:
text: string that may include semiotic classes
verbose: whether to print intermediate meta information
punct_pre_process: whether to perform punctuation pre-processing, for example, [25] -> [ 25 ]
punct_post_process: whether to normalize punctuation
Returns: spoken form
"""
if len(text.split()) > 500:
print(
"WARNING! Your input is too long and could take a long time to normalize."
"Use split_text_into_sentences() to make the input shorter and then call normalize_list()."
)
original_text = text
if punct_pre_process:
text = pre_process(text)
text = text.strip()
if not text:
if verbose:
print(text)
return text
text = pynini.escape(text)
tagged_lattice = self.find_tags(text)
tagged_text = Normalizer.select_tag(tagged_lattice)
if verbose:
print(tagged_text)
self.parser(tagged_text)
tokens = self.parser.parse()
split_tokens = self._split_tokens_to_reduce_number_of_permutations(tokens)
output = ""
for s in split_tokens:
tags_reordered = self.generate_permutations(s)
verbalizer_lattice = None
for tagged_text in tags_reordered:
tagged_text = pynini.escape(tagged_text)
verbalizer_lattice = self.find_verbalizer(tagged_text)
if verbalizer_lattice.num_states() != 0:
break
if verbalizer_lattice is None:
raise ValueError(f"No permutations were generated from tokens {s}")
output += ' ' + Normalizer.select_verbalizer(verbalizer_lattice)
output = SPACE_DUP.sub(' ', output[1:])
if self.lang == "en" and hasattr(self, 'post_processor'):
output = self.post_process(output)
if punct_post_process:
# do post-processing based on Moses detokenizer
output = self.moses_detokenizer.detokenize([output], unescape=False)
output = post_process_punct(input=original_text, normalized_text=output)
return output
def normalize_line(
self,
line: str,
verbose: bool = False,
punct_pre_process=False,
punct_post_process=True,
text_field: str = "text",
output_field: str = "normalized",
**kwargs,
):
"""
Normalizes "text_field" in line from a .json manifest
Args:
line: line of a .json manifest
verbose: set to True to see intermediate output of normalization
punct_pre_process: set to True to do punctuation pre-processing
punct_post_process: set to True to do punctuation post-processing
text_field: name of the field in the manifest to normalize
output_field: name of the field in the manifest to save normalized text
"""
line = json.loads(line)
normalized_text = self.normalize(
text=line[text_field],
verbose=verbose,
punct_pre_process=punct_pre_process,
punct_post_process=punct_post_process,
**kwargs,
)
line[output_field] = normalized_text
return line
def normalize_manifest(
self,
manifest: str,
n_jobs: int,
punct_pre_process: bool,
punct_post_process: bool,
batch_size: int,
output_filename: Optional[str] = None,
text_field: str = "text",
**kwargs,
):
"""
Normalizes "text_field" from .json manifest.
Args:
manifest: path to .json manifest file
n_jobs: the maximum number of concurrently running jobs. If -1 all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
punct_pre_process: set to True to do punctuation pre-processing
punct_post_process: set to True to do punctuation post-processing
batch_size: number of samples to process per iteration (int)
output_filename: path to .json file to save normalized text
text_field: name of the field in the manifest to normalize
**kwargs are need for audio-based normalization that requires extra args
"""
def _process_batch(
batch_idx: int,
batch: List[str],
dir_name: str,
punct_pre_process=False,
punct_post_process=True,
text_field: str = "text",
output_field: str = "normalized",
**kwargs,
):
"""
Normalizes batch of text sequences
Args:
batch: list of texts
batch_idx: batch index
dir_name: path to output directory to save results
"""
normalized_lines = [
self.normalize_line(
line=line,
verbose=False,
punct_post_process=punct_post_process,
punct_pre_process=punct_pre_process,
text_field=text_field,
output_field=output_field,
**kwargs,
)
for line in tqdm(batch)
]
with open(f"{dir_name}/{batch_idx:06}.json", "w") as f_out:
for line in normalized_lines:
f_out.write(json.dumps(line, ensure_ascii=False) + '\n')
print(f"Batch -- {batch_idx} -- is complete")
if output_filename is None:
output_filename = manifest.replace('.json', '_normalized.json')
with open(manifest, 'r') as f:
lines = f.readlines()
print(f'Normalizing {len(lines)} line(s) of {manifest}...')
# to save intermediate results to a file
batch = min(len(lines), batch_size)
tmp_dir = "/tmp/parts"
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
os.makedirs(tmp_dir)
Parallel(n_jobs=n_jobs)(
delayed(_process_batch)(
idx,
lines[i : i + batch],
tmp_dir,
text_field=text_field,
punct_pre_process=punct_pre_process,
punct_post_process=punct_post_process,
**kwargs,
)
for idx, i in enumerate(range(0, len(lines), batch))
)
# aggregate all intermediate files
with open(output_filename, "w") as f_out:
for batch_f in sorted(glob(f"{tmp_dir}/*.json")):
with open(batch_f, "r") as f_in:
lines = f_in.read()
f_out.write(lines)
print(f'Normalized version saved at {output_filename}')
def split_text_into_sentences(self, text: str, additional_split_symbols: str = "") -> List[str]:
"""
Split text into sentences.
Args:
text: text
additional_split_symbols: Symbols to split sentences if eos sentence split resulted in a long sequence.
Use '|' as a separator between symbols, for example: ';|:'. Use '\s' to split by space.
Returns list of sentences
"""
lower_case_unicode = ""
upper_case_unicode = ""
if self.lang == "ru":
lower_case_unicode = '\u0430-\u04FF'
upper_case_unicode = '\u0410-\u042F'
# end of quoted speech - to be able to split sentences by full stop
text = re.sub(r"([\.\?\!])([\"\'])", r"\g<2>\g<1> ", text)
# remove extra space
text = re.sub(r" +", " ", text)
# remove space in the middle of the lower case abbreviation to avoid splitting into separate sentences
matches = re.findall(rf"[a-z{lower_case_unicode}]\.\s[a-z{lower_case_unicode}]\.", text)
for match in matches:
text = text.replace(match, match.replace(". ", "."))
# Read and split transcript by utterance (roughly, sentences)
split_pattern = rf"(?<!\w\.\w.)(?<![A-Z{upper_case_unicode}][a-z{lower_case_unicode}]+\.)(?<![A-Z{upper_case_unicode}]\.)(?<=\.|\?|\!|\.”|\?”\!”)\s(?![0-9]+[a-z]*\.)"
sentences = regex.split(split_pattern, text)
sentences = additional_split(sentences, additional_split_symbols)
return sentences
def _permute(self, d: OrderedDict) -> List[str]:
"""
Creates reorderings of dictionary elements and serializes as strings
Args:
d: (nested) dictionary of key value pairs
Return permutations of different string serializations of key value pairs
"""
l = []
if PRESERVE_ORDER_KEY in d.keys():
d_permutations = [d.items()]
else:
d_permutations = itertools.permutations(d.items())
for perm in d_permutations:
subl = [""]
for k, v in perm:
if isinstance(v, str):
subl = ["".join(x) for x in itertools.product(subl, [f"{k}: \"{v}\" "])]
elif isinstance(v, OrderedDict):
rec = self._permute(v)
subl = ["".join(x) for x in itertools.product(subl, [f" {k} {{ "], rec, [f" }} "])]
elif isinstance(v, bool):
subl = ["".join(x) for x in itertools.product(subl, [f"{k}: true "])]
else:
raise ValueError()
l.extend(subl)
return l
def generate_permutations(self, tokens: List[dict]):
"""
Generates permutations of string serializations of list of dictionaries
Args:
tokens: list of dictionaries
Returns string serialization of list of dictionaries
"""
def _helper(prefix: str, token_list: List[dict], idx: int):
"""
Generates permutations of string serializations of given dictionary
Args:
token_list: list of dictionaries
prefix: prefix string
idx: index of next dictionary
Returns string serialization of dictionary
"""
if idx == len(token_list):
yield prefix
return
token_options = self._permute(token_list[idx])
for token_option in token_options:
yield from _helper(prefix + token_option, token_list, idx + 1)
return _helper("", tokens, 0)
def find_tags(self, text: str) -> 'pynini.FstLike':
"""
Given text use tagger Fst to tag text
Args:
text: sentence
Returns: tagged lattice
"""
lattice = text @ self.tagger.fst
return lattice
@staticmethod
def select_tag(lattice: 'pynini.FstLike') -> str:
"""
Given tagged lattice return shortest path
Args:
lattice: pynini.FstLike tag lattice
Returns: shortest path
"""
tagged_text = pynini.shortestpath(lattice, nshortest=1, unique=True).string()
return tagged_text
def find_verbalizer(self, tagged_text: str) -> 'pynini.FstLike':
"""
Given tagged text creates verbalization lattice
This is context-independent.
Args:
tagged_text: input text
Returns: verbalized lattice
"""
lattice = tagged_text @ self.verbalizer.fst
return lattice
@staticmethod
def select_verbalizer(lattice: 'pynini.FstLike') -> str:
"""
Given verbalized lattice return shortest path
Args:
lattice: verbalization lattice
Returns: shortest path
"""
output = pynini.shortestpath(lattice, nshortest=1, unique=True).string()
# lattice = output @ self.verbalizer.punct_graph
# output = pynini.shortestpath(lattice, nshortest=1, unique=True).string()
return output
def post_process(self, normalized_text: 'pynini.FstLike') -> str:
"""
Runs post-processing graph on normalized text
Args:
normalized_text: normalized text
Returns: shortest path
"""
normalized_text = normalized_text.strip()
if not normalized_text:
return normalized_text
normalized_text = pynini.escape(normalized_text)
if self.post_processor is not None:
normalized_text = top_rewrite(normalized_text, self.post_processor.fst)
return normalized_text
def parse_args():
parser = ArgumentParser()
input = parser.add_mutually_exclusive_group()
input.add_argument("--text", dest="input_string", help="input string", type=str)
input.add_argument(
"--input_file",
dest="input_file",
help="input file path. "
"The input file can be either a .txt file containing one example for normalization per line or "
"a .json manifest file. Field to normalize in .json manifest is specified with `--manifest_text_field` arg.",
type=str,
)
parser.add_argument(
'--manifest_text_field',
help="The field in a .json manifest to normalize (applicable only when input_file is a .json manifest)",
type=str,
default="text",
)
parser.add_argument(
"--output_field",
help="Name of the field in a .json manifest in which to save normalized text (applicable only when input_file is a .json manifest)",
type=str,
default="normalized",
)
parser.add_argument('--output_file', dest="output_file", help="Output file path", type=str)
parser.add_argument(
"--language",
help="language",
choices=["en", "de", "es", "fr", "hu", "sv", "zh", "ar", "it"],
default="en",
type=str,
)
parser.add_argument(
"--input_case",
help="Input text capitalization, set to 'cased' if text contains capital letters."
"This argument affects normalization rules applied to the text. Note, `lower_cased` won't lower case input.",
choices=["lower_cased", "cased"],
default="cased",
type=str,
)
parser.add_argument("--verbose", help="print info for debugging", action='store_true')
parser.add_argument(
"--punct_post_process",
help="Add this flag to enable punctuation post processing to match input.",
action="store_true",
)
parser.add_argument(
"--punct_pre_process",
help="Add this flag to add spaces around square brackets, otherwise text between square brackets won't be normalized",
action="store_true",
)
parser.add_argument("--overwrite_cache", help="Add this flag to re-create .far grammar files", action="store_true")
parser.add_argument(
"--whitelist",
help="Path to a file with with whitelist replacement,"
"e.g., for English, whitelist files are stored under text_normalization/en/data/whitelist",
default=None,
type=str,
)
parser.add_argument(
"--cache_dir",
help="path to a dir with .far grammar file. Set to None to avoid using cache",
default=None,
type=str,
)
parser.add_argument("--n_jobs", default=-2, type=int, help="The maximum number of concurrently running jobs")
parser.add_argument("--batch_size", default=200, type=int, help="Number of examples for each process")
parser.add_argument(
"--max_number_of_permutations_per_split",
default=729,
type=int,
help="a maximum number of permutations which can be generated from input sequence of tokens.",
)
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
whitelist = os.path.abspath(args.whitelist) if args.whitelist else None
if not args.input_string and not args.input_file:
raise ValueError("Either `--text` or `--input_file` required")
normalizer = Normalizer(
input_case=args.input_case,
cache_dir=args.cache_dir,
overwrite_cache=args.overwrite_cache,
whitelist=whitelist,
lang=args.language,
max_number_of_permutations_per_split=args.max_number_of_permutations_per_split,
)
start_time = perf_counter()
if args.input_string:
print(
normalizer.normalize(
args.input_string,
verbose=args.verbose,
punct_pre_process=args.punct_pre_process,
punct_post_process=args.punct_post_process,
)
)
elif args.input_file:
if args.input_file.endswith(".json"):
normalizer.normalize_manifest(
args.input_file,
n_jobs=args.n_jobs,
punct_pre_process=args.punct_pre_process,
punct_post_process=args.punct_post_process,
batch_size=args.batch_size,
text_field=args.manifest_text_field,
output_field=args.output_field,
output_filename=args.output_file,
)
else:
print("Loading data: " + args.input_file)
data = load_file(args.input_file)
print("- Data: " + str(len(data)) + " sentences")
normalizer_prediction = normalizer.normalize_list(
data,
verbose=args.verbose,
punct_pre_process=args.punct_pre_process,
punct_post_process=args.punct_post_process,
)
if args.output_file:
write_file(args.output_file, normalizer_prediction)
print(f"- Normalized. Writing out to {args.output_file}")
else:
print(normalizer_prediction)
print(f"Execution time: {perf_counter() - start_time:.02f} sec")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/normalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.normalize_with_audio import NormalizerWithAudio
| NeMo-text-processing-main | nemo_text_processing/text_normalization/__init__.py |
from typing import List
def _split(sentences: List[str], delimiter: str, max_len: int, min_len: int):
"""
Splits sentences based by the specified delimiter. Will attempt to split and combine sentences to get target
min/max length.
Args:
sentences: Sentences to split into segments.
delimiter: symbol to split by
max_len: the maximum number of symbols in the output sentences (the result will be the closest len match)
min_len: the minimum number of the output sentences (the result will be the closest len match)
"""
result = []
for sent in sentences:
if len(sent) < max_len:
result.append(sent)
continue
split_sent = sent.split(delimiter)
# keep the delimiter
split_sent = [(s + delimiter).strip() for s in split_sent[:-1]] + [split_sent[-1]]
if "," in delimiter:
# split based on comma usually results in too short utterance, combine sentences
# that result in a single word split. It's usually not recommended to do that for other delimiters.
comb = []
for s in split_sent:
# if the previous sentence is too short, combine it with the current sentence
if len(comb) > 0 and (len(comb[-1].split()) <= min_len or len(s.split()) <= min_len):
comb[-1] = comb[-1] + " " + s
else:
comb.append(s)
result.extend(comb)
else:
result.extend(split_sent)
return result
def additional_split(sentences: List[str], split_on_symbols: str, max_len: int = 1000, min_len: int = 2) -> List[str]:
"""
Splits sentences by split_on_symbols.
Args:
sentences: Sentences to split into segments.
split_on_symbols: Symbols to split sentences if eos sentence split resulted in a long sequence.
Use '|' as a separator between symbols, for example: ';|:| ', will attempt to split each sentence
by semi-colon ";", colon ":", and space " ".
max_len: the maximum number of symbols in the output sentences (the result will be the closest len match)
min_len: the minimum number of the output sentences (the result will be the closest len match)
"""
if len(split_on_symbols) == 0:
return sentences
split_on_symbols = split_on_symbols.split("|")
another_sent_split = []
for sent in sentences:
split_sent = [sent]
for delimiter in split_on_symbols:
if len(delimiter) == 0:
continue
split_sent = _split(
split_sent, delimiter + " " if delimiter != " " else delimiter, max_len=max_len, min_len=min_len
)
another_sent_split.extend(split_sent)
sentences = [s.strip() for s in another_sent_split if s.strip()]
return sentences
| NeMo-text-processing-main | nemo_text_processing/text_normalization/preprocessing_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import re
import string
import sys
from collections import defaultdict, namedtuple
from typing import Dict, List, Optional, Set, Tuple
from unicodedata import category
EOS_TYPE = "EOS"
PUNCT_TYPE = "PUNCT"
PLAIN_TYPE = "PLAIN"
Instance = namedtuple('Instance', 'token_type un_normalized normalized')
known_types = [
"PLAIN",
"DATE",
"CARDINAL",
"LETTERS",
"VERBATIM",
"MEASURE",
"DECIMAL",
"ORDINAL",
"DIGIT",
"MONEY",
"TELEPHONE",
"ELECTRONIC",
"FRACTION",
"TIME",
"ADDRESS",
]
def _load_kaggle_text_norm_file(file_path: str) -> List[Instance]:
"""
https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
Loads text file in the Kaggle Google text normalization file format: <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
E.g.
PLAIN Brillantaisia <self>
PLAIN is <self>
PLAIN a <self>
PLAIN genus <self>
PLAIN of <self>
PLAIN plant <self>
PLAIN in <self>
PLAIN family <self>
PLAIN Acanthaceae <self>
PUNCT . sil
<eos> <eos>
Args:
file_path: file path to text file
Returns: flat list of instances
"""
res = []
with open(file_path, 'r') as fp:
for line in fp:
parts = line.strip().split("\t")
if parts[0] == "<eos>":
res.append(Instance(token_type=EOS_TYPE, un_normalized="", normalized=""))
else:
l_type, l_token, l_normalized = parts
l_token = l_token.lower()
l_normalized = l_normalized.lower()
if l_type == PLAIN_TYPE:
res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_token))
elif l_type != PUNCT_TYPE:
res.append(Instance(token_type=l_type, un_normalized=l_token, normalized=l_normalized))
return res
def load_files(file_paths: List[str], load_func=_load_kaggle_text_norm_file) -> List[Instance]:
"""
Load given list of text files using the `load_func` function.
Args:
file_paths: list of file paths
load_func: loading function
Returns: flat list of instances
"""
res = []
for file_path in file_paths:
res.extend(load_func(file_path=file_path))
return res
def clean_generic(text: str) -> str:
"""
Cleans text without affecting semiotic classes.
Args:
text: string
Returns: cleaned string
"""
text = text.strip()
text = text.lower()
return text
def evaluate(preds: List[str], labels: List[str], input: Optional[List[str]] = None, verbose: bool = True) -> float:
"""
Evaluates accuracy given predictions and labels.
Args:
preds: predictions
labels: labels
input: optional, only needed for verbosity
verbose: if true prints [input], golden labels and predictions
Returns accuracy
"""
acc = 0
nums = len(preds)
for i in range(nums):
pred_norm = clean_generic(preds[i])
label_norm = clean_generic(labels[i])
if pred_norm == label_norm:
acc = acc + 1
else:
if input:
print(f"inpu: {json.dumps(input[i])}")
print(f"gold: {json.dumps(label_norm)}")
print(f"pred: {json.dumps(pred_norm)}")
return acc / nums
def training_data_to_tokens(
data: List[Instance], category: Optional[str] = None
) -> Dict[str, Tuple[List[str], List[str]]]:
"""
Filters the instance list by category if provided and converts it into a map from token type to list of un_normalized and normalized strings
Args:
data: list of instances
category: optional semiotic class category name
Returns Dict: token type -> (list of un_normalized strings, list of normalized strings)
"""
result = defaultdict(lambda: ([], []))
for instance in data:
if instance.token_type != EOS_TYPE:
if category is None or instance.token_type == category:
result[instance.token_type][0].append(instance.un_normalized)
result[instance.token_type][1].append(instance.normalized)
return result
def training_data_to_sentences(data: List[Instance]) -> Tuple[List[str], List[str], List[Set[str]]]:
"""
Takes instance list, creates list of sentences split by EOS_Token
Args:
data: list of instances
Returns (list of unnormalized sentences, list of normalized sentences, list of sets of categories in a sentence)
"""
# split data at EOS boundaries
sentences = []
sentence = []
categories = []
sentence_categories = set()
for instance in data:
if instance.token_type == EOS_TYPE:
sentences.append(sentence)
sentence = []
categories.append(sentence_categories)
sentence_categories = set()
else:
sentence.append(instance)
sentence_categories.update([instance.token_type])
un_normalized = [" ".join([instance.un_normalized for instance in sentence]) for sentence in sentences]
normalized = [" ".join([instance.normalized for instance in sentence]) for sentence in sentences]
return un_normalized, normalized, categories
def post_process_punctuation(text: str) -> str:
"""
Normalized quotes and spaces
Args:
text: text
Returns: text with normalized spaces and quotes
"""
text = (
text.replace('( ', '(')
.replace(' )', ')')
.replace('{ ', '{')
.replace(' }', '}')
.replace('[ ', '[')
.replace(' ]', ']')
.replace(' ', ' ')
.replace('”', '"')
.replace("’", "'")
.replace("»", '"')
.replace("«", '"')
.replace("\\", "")
.replace("„", '"')
.replace("´", "'")
.replace("’", "'")
.replace('“', '"')
.replace("‘", "'")
.replace('`', "'")
.replace('- -', "--")
)
for punct in "!,.:;?":
text = text.replace(f' {punct}', punct)
return text.strip()
def pre_process(text: str) -> str:
"""
Optional text preprocessing before normalization (part of TTS TN pipeline)
Args:
text: string that may include semiotic classes
Returns: text with spaces around punctuation marks
"""
space_both = '[]'
for punct in space_both:
text = text.replace(punct, ' ' + punct + ' ')
# remove extra space
text = re.sub(r' +', ' ', text)
return text
def load_file(file_path: str) -> List[str]:
"""
Loads given text file with separate lines into list of string.
Args:
file_path: file path
Returns: flat list of string
"""
res = []
with open(file_path, 'r') as fp:
for line in fp:
res.append(line)
return res
def write_file(file_path: str, data: List[str]):
"""
Writes out list of string to file.
Args:
file_path: file path
data: list of string
"""
with open(file_path, 'w') as fp:
for line in data:
fp.write(line + '\n')
def post_process_punct(input: str, normalized_text: str, add_unicode_punct: bool = False):
"""
Post-processing of the normalized output to match input in terms of spaces around punctuation marks.
After NN normalization, Moses detokenization puts a space after
punctuation marks, and attaches an opening quote "'" to the word to the right.
E.g., input to the TN NN model is "12 test' example",
after normalization and detokenization -> "twelve test 'example" (the quote is considered to be an opening quote,
but it doesn't match the input and can cause issues during TTS voice generation.)
The current function will match the punctuation and spaces of the normalized text with the input sequence.
"12 test' example" -> "twelve test 'example" -> "twelve test' example" (the quote was shifted to match the input).
Args:
input: input text (original input to the NN, before normalization or tokenization)
normalized_text: output text (output of the TN NN model)
add_unicode_punct: set to True to handle unicode punctuation marks as well as default string.punctuation (increases post processing time)
"""
# in the post-processing WFST graph "``" are repalced with '"" quotes (otherwise single quotes "`" won't be handled correctly)
# this function fixes spaces around them based on input sequence, so here we're making the same double quote replacement
# to make sure these new double quotes work with this function
if "``" in input and "``" not in normalized_text:
input = input.replace("``", '"')
input = [x for x in input]
normalized_text = [x for x in normalized_text]
punct_marks = [x for x in string.punctuation if x in input]
if add_unicode_punct:
punct_unicode = [
chr(i)
for i in range(sys.maxunicode)
if category(chr(i)).startswith("P") and chr(i) not in punct_marks and chr(i) in input
]
punct_marks = punct_marks.extend(punct_unicode)
for punct in punct_marks:
try:
equal = True
if input.count(punct) != normalized_text.count(punct):
equal = False
idx_in, idx_out = 0, 0
while punct in input[idx_in:]:
idx_out = normalized_text.index(punct, idx_out)
idx_in = input.index(punct, idx_in)
def _is_valid(idx_out, idx_in, normalized_text, input):
"""Check if previous or next word match (for cases when punctuation marks are part of
semiotic token, i.e. some punctuation can be missing in the normalized text)"""
return (idx_out > 0 and idx_in > 0 and normalized_text[idx_out - 1] == input[idx_in - 1]) or (
idx_out < len(normalized_text) - 1
and idx_in < len(input) - 1
and normalized_text[idx_out + 1] == input[idx_in + 1]
)
if not equal and not _is_valid(idx_out, idx_in, normalized_text, input):
idx_in += 1
continue
if idx_in > 0 and idx_out > 0:
if normalized_text[idx_out - 1] == " " and input[idx_in - 1] != " ":
normalized_text[idx_out - 1] = ""
elif normalized_text[idx_out - 1] != " " and input[idx_in - 1] == " ":
normalized_text[idx_out - 1] += " "
if idx_in < len(input) - 1 and idx_out < len(normalized_text) - 1:
if normalized_text[idx_out + 1] == " " and input[idx_in + 1] != " ":
normalized_text[idx_out + 1] = ""
elif normalized_text[idx_out + 1] != " " and input[idx_in + 1] == " ":
normalized_text[idx_out] = normalized_text[idx_out] + " "
idx_out += 1
idx_in += 1
except:
logging.info(f"Skipping post-processing of {''.join(normalized_text)} for '{punct}'")
normalized_text = "".join(normalized_text)
return re.sub(r' +', ' ', normalized_text)
| NeMo-text-processing-main | nemo_text_processing/text_normalization/data_loader_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict
from cdifflib import CSequenceMatcher
MATCH = "match"
NONMATCH = "non-match"
SEMIOTIC_TAG = "[SEMIOTIC_SPAN]"
def _get_alignment(a: str, b: str) -> Dict:
"""
Constructs alignment between a and b
Returns:
a dictionary, where keys are a's word index and values is a Tuple that contains span from b, and whether it
matches a or not, e.g.:
>>> a = "a b c"
>>> b = "a b d f"
>>> print(_get_alignment(a, b))
{0: (0, 1, 'match'), 1: (1, 2, 'match'), 2: (2, 4, 'non-match')}
"""
a = a.lower().split()
b = b.lower().split()
s = CSequenceMatcher(None, a, b, autojunk=False)
# s contains a list of triples. Each triple is of the form (i, j, n), and means that a[i:i+n] == b[j:j+n].
# The triples are monotonically increasing in i and in j.
s = s.get_matching_blocks()
diffs = {}
non_match_start_l = 0
non_match_start_r = 0
for match in s:
l_start, r_start, length = match
if non_match_start_l < l_start:
while non_match_start_l < l_start:
diffs[non_match_start_l] = (non_match_start_r, r_start, NONMATCH)
non_match_start_l += 1
for len_ in range(length):
diffs[l_start + len_] = (r_start + len_, r_start + 1 + len_, MATCH)
non_match_start_l = l_start + length
non_match_start_r = r_start + length
return diffs
def adjust_boundaries(norm_raw_diffs: Dict, norm_pred_diffs: Dict, raw: str, norm: str, pred_text: str, verbose=False):
"""
Adjust alignment boundaries by taking norm--raw texts and norm--pred_text alignments, and creating raw-pred_text alignment
alignment.
norm_raw_diffs: output of _get_alignment(norm, raw)
norm_pred_diffs: output of _get_alignment(norm, pred_text)
raw: input text
norm: output of default normalization (deterministic)
pred_text: ASR prediction
verbose: set to True to output intermediate output of adjustments (for debugging)
Return:
semiotic_spans: List[str] - List of semiotic spans from raw text
pred_texts: List[str] - List of pred_texts correponding to semiotic_spans
norm_spans: List[str] - List of normalized texts correponding to semiotic_spans
raw_text_masked_list: List[str] - List of words from raw text where every semiotic span is replaces with SEMIOTIC_TAG
raw_text_mask_idx: List[int] - List of indexes of SEMIOTIC_TAG in raw_text_masked_list
e.g.:
>>> raw = 'This is #4 ranking on G.S.K.T.'
>>> pred_text = 'this iss for ranking on g k p'
>>> norm = 'This is nubmer four ranking on GSKT'
output:
semiotic_spans: ['is #4', 'G.S.K.T.']
pred_texts: ['iss for', 'g k p']
norm_spans: ['is nubmer four', 'GSKT']
raw_text_masked_list: ['This', '[SEMIOTIC_SPAN]', 'ranking', 'on', '[SEMIOTIC_SPAN]']
raw_text_mask_idx: [1, 4]
"""
raw_pred_spans = []
word_id = 0
while word_id < len(norm.split()):
norm_raw, norm_pred = norm_raw_diffs[word_id], norm_pred_diffs[word_id]
# if there is a mismatch in norm_raw and norm_pred, expand the boundaries of the shortest mismatch to align with the longest one
# e.g., norm_raw = (1, 2, 'match') norm_pred = (1, 5, 'non-match') => expand norm_raw until the next matching sequence or the end of string to align with norm_pred
if (norm_raw[2] == MATCH and norm_pred[2] == NONMATCH) or (norm_raw[2] == NONMATCH and norm_pred[2] == MATCH):
mismatched_id = word_id
non_match_raw_start = norm_raw[0]
non_match_pred_start = norm_pred[0]
done = False
word_id += 1
while word_id < len(norm.split()) and not done:
norm_raw, norm_pred = norm_raw_diffs[word_id], norm_pred_diffs[word_id]
if norm_raw[2] == MATCH and norm_pred[2] == MATCH:
non_match_raw_end = norm_raw_diffs[word_id - 1][1]
non_match_pred_end = norm_pred_diffs[word_id - 1][1]
word_id -= 1
done = True
else:
word_id += 1
if not done:
non_match_raw_end = len(raw.split())
non_match_pred_end = len(pred_text.split())
raw_pred_spans.append(
(
mismatched_id,
(non_match_raw_start, non_match_raw_end, NONMATCH),
(non_match_pred_start, non_match_pred_end, NONMATCH),
)
)
else:
raw_pred_spans.append((word_id, norm_raw, norm_pred))
word_id += 1
# aggregate neighboring spans with the same status
spans_merged_neighbors = []
last_status = None
for idx, item in enumerate(raw_pred_spans):
if last_status is None:
last_status = item[1][2]
raw_start = item[1][0]
pred_text_start = item[2][0]
norm_span_start = item[0]
raw_end = item[1][1]
pred_text_end = item[2][1]
elif last_status is not None and last_status == item[1][2]:
raw_end = item[1][1]
pred_text_end = item[2][1]
else:
spans_merged_neighbors.append(
[[norm_span_start, item[0]], [raw_start, raw_end], [pred_text_start, pred_text_end], last_status]
)
last_status = item[1][2]
raw_start = item[1][0]
pred_text_start = item[2][0]
norm_span_start = item[0]
raw_end = item[1][1]
pred_text_end = item[2][1]
if last_status == item[1][2]:
raw_end = item[1][1]
pred_text_end = item[2][1]
spans_merged_neighbors.append(
[[norm_span_start, item[0]], [raw_start, raw_end], [pred_text_start, pred_text_end], last_status]
)
else:
spans_merged_neighbors.append(
[
[raw_pred_spans[idx - 1][0], len(norm.split())],
[item[1][0], len(raw.split())],
[item[2][0], len(pred_text.split())],
item[1][2],
]
)
raw_list = raw.split()
pred_text_list = pred_text.split()
norm_list = norm.split()
# increase boundaries between raw and pred_text if some spans contain empty pred_text
extended_spans = []
raw_norm_spans_corrected_for_pred_text = []
idx = 0
while idx < len(spans_merged_neighbors):
item = spans_merged_neighbors[idx]
cur_semiotic = " ".join(raw_list[item[1][0] : item[1][1]])
cur_pred_text = " ".join(pred_text_list[item[2][0] : item[2][1]])
cur_norm_span = " ".join(norm_list[item[0][0] : item[0][1]])
logging.debug(f"cur_semiotic: {cur_semiotic}")
logging.debug(f"cur_pred_text: {cur_pred_text}")
logging.debug(f"cur_norm_span: {cur_norm_span}")
# if cur_pred_text is an empty string
if item[2][0] == item[2][1]:
# for the last item
if idx == len(spans_merged_neighbors) - 1 and len(raw_norm_spans_corrected_for_pred_text) > 0:
last_item = raw_norm_spans_corrected_for_pred_text[-1]
last_item[0][1] = item[0][1]
last_item[1][1] = item[1][1]
last_item[2][1] = item[2][1]
last_item[-1] = item[-1]
else:
raw_start, raw_end = item[0]
norm_start, norm_end = item[1]
pred_start, pred_end = item[2]
while idx < len(spans_merged_neighbors) - 1 and not (
(pred_end - pred_start) > 2 and spans_merged_neighbors[idx][-1] == MATCH
):
idx += 1
raw_end = spans_merged_neighbors[idx][0][1]
norm_end = spans_merged_neighbors[idx][1][1]
pred_end = spans_merged_neighbors[idx][2][1]
cur_item = [[raw_start, raw_end], [norm_start, norm_end], [pred_start, pred_end], NONMATCH]
raw_norm_spans_corrected_for_pred_text.append(cur_item)
extended_spans.append(len(raw_norm_spans_corrected_for_pred_text) - 1)
idx += 1
else:
raw_norm_spans_corrected_for_pred_text.append(item)
idx += 1
semiotic_spans = []
norm_spans = []
pred_texts = []
raw_text_masked = ""
for idx, item in enumerate(raw_norm_spans_corrected_for_pred_text):
cur_semiotic = " ".join(raw_list[item[1][0] : item[1][1]])
cur_pred_text = " ".join(pred_text_list[item[2][0] : item[2][1]])
cur_norm_span = " ".join(norm_list[item[0][0] : item[0][1]])
if idx == len(raw_norm_spans_corrected_for_pred_text) - 1:
cur_norm_span = " ".join(norm_list[item[0][0] : len(norm_list)])
if (item[-1] == NONMATCH and cur_semiotic != cur_norm_span) or (idx in extended_spans):
raw_text_masked += " " + SEMIOTIC_TAG
semiotic_spans.append(cur_semiotic)
pred_texts.append(cur_pred_text)
norm_spans.append(cur_norm_span)
else:
raw_text_masked += " " + " ".join(raw_list[item[1][0] : item[1][1]])
raw_text_masked_list = raw_text_masked.strip().split()
raw_text_mask_idx = [idx for idx, x in enumerate(raw_text_masked_list) if x == SEMIOTIC_TAG]
if verbose:
print("+" * 50)
print("raw_pred_spans:")
for item in spans_merged_neighbors:
print(f"{raw.split()[item[1][0]: item[1][1]]} -- {pred_text.split()[item[2][0]: item[2][1]]}")
print("+" * 50)
print("spans_merged_neighbors:")
for item in spans_merged_neighbors:
print(f"{raw.split()[item[1][0]: item[1][1]]} -- {pred_text.split()[item[2][0]: item[2][1]]}")
print("+" * 50)
print("raw_norm_spans_corrected_for_pred_text:")
for item in raw_norm_spans_corrected_for_pred_text:
print(f"{raw.split()[item[1][0]: item[1][1]]} -- {pred_text.split()[item[2][0]: item[2][1]]}")
print("+" * 50)
return semiotic_spans, pred_texts, norm_spans, raw_text_masked_list, raw_text_mask_idx
def get_alignment(raw: str, norm: str, pred_text: str, verbose: bool = False):
"""
Aligns raw text with deterministically normalized text and ASR output, finds semiotic spans
"""
for value in [raw, norm, pred_text]:
if value is None or value == "":
return [], [], [], [], []
norm_pred_diffs = _get_alignment(norm, pred_text)
norm_raw_diffs = _get_alignment(norm, raw)
semiotic_spans, pred_texts, norm_spans, raw_text_masked_list, raw_text_mask_idx = adjust_boundaries(
norm_raw_diffs, norm_pred_diffs, raw, norm, pred_text, verbose
)
if verbose:
for i in range(len(semiotic_spans)):
print("=" * 40)
# print(i)
print(f"semiotic : {semiotic_spans[i]}")
print(f"pred text: {pred_texts[i]}")
print(f"norm : {norm_spans[i]}")
print("=" * 40)
return semiotic_spans, pred_texts, norm_spans, raw_text_masked_list, raw_text_mask_idx
if __name__ == "__main__":
raw = 'This is a #4 ranking on G.S.K.T.'
pred_text = 'this iss p k for ranking on g k p'
norm = 'This is nubmer four ranking on GSKT'
output = get_alignment(raw, norm, pred_text, True)
print(output)
| NeMo-text-processing-main | nemo_text_processing/text_normalization/utils_audio_based.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from nemo_text_processing.text_normalization.data_loader_utils import (
evaluate,
known_types,
load_files,
training_data_to_sentences,
training_data_to_tokens,
)
from nemo_text_processing.text_normalization.normalize import Normalizer
'''
Runs Evaluation on data in the format of : <semiotic class>\t<unnormalized text>\t<`self` if trivial class or normalized text>
like the Google text normalization data https://www.kaggle.com/richardwilliamsproat/text-normalization-for-english-russian-and-polish
'''
def parse_args():
parser = ArgumentParser()
parser.add_argument("--input", help="input file path", type=str)
parser.add_argument(
"--lang",
help="language",
choices=['ar', 'de', 'en', 'es', 'fr', 'hu', 'it', 'ru', 'sv', 'zh'],
default="en",
type=str,
)
parser.add_argument(
"--input_case", help="input capitalization", choices=["lower_cased", "cased"], default="cased", type=str
)
parser.add_argument(
"--cat",
dest="category",
help="focus on class only (" + ", ".join(known_types) + ")",
type=str,
default=None,
choices=known_types,
)
parser.add_argument("--filter", action='store_true', help="clean data for normalization purposes")
return parser.parse_args()
if __name__ == "__main__":
# Example usage:
# python run_evaluate.py --input=<INPUT> --cat=<CATEGORY> --filter
args = parse_args()
if args.lang == 'en':
from nemo_text_processing.text_normalization.en.clean_eval_data import filter_loaded_data
file_path = args.input
normalizer = Normalizer(input_case=args.input_case, lang=args.lang)
print("Loading training data: " + file_path)
training_data = load_files([file_path])
if args.filter:
training_data = filter_loaded_data(training_data)
if args.category is None:
print("Sentence level evaluation...")
sentences_un_normalized, sentences_normalized, _ = training_data_to_sentences(training_data)
print("- Data: " + str(len(sentences_normalized)) + " sentences")
sentences_prediction = normalizer.normalize_list(sentences_un_normalized)
print("- Normalized. Evaluating...")
sentences_accuracy = evaluate(
preds=sentences_prediction, labels=sentences_normalized, input=sentences_un_normalized
)
print("- Accuracy: " + str(sentences_accuracy))
print("Token level evaluation...")
tokens_per_type = training_data_to_tokens(training_data, category=args.category)
token_accuracy = {}
for token_type in tokens_per_type:
print("- Token type: " + token_type)
tokens_un_normalized, tokens_normalized = tokens_per_type[token_type]
print(" - Data: " + str(len(tokens_normalized)) + " tokens")
tokens_prediction = normalizer.normalize_list(tokens_un_normalized)
print(" - Denormalized. Evaluating...")
token_accuracy[token_type] = evaluate(
preds=tokens_prediction, labels=tokens_normalized, input=tokens_un_normalized
)
print(" - Accuracy: " + str(token_accuracy[token_type]))
token_count_per_type = {token_type: len(tokens_per_type[token_type][0]) for token_type in tokens_per_type}
token_weighted_accuracy = [
token_count_per_type[token_type] * accuracy for token_type, accuracy in token_accuracy.items()
]
print("- Accuracy: " + str(sum(token_weighted_accuracy) / sum(token_count_per_type.values())))
print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
print(" - Total: " + str(sum(token_count_per_type.values())), '\n')
for token_type in token_accuracy:
if token_type not in known_types:
raise ValueError("Unexpected token type: " + token_type)
if args.category is None:
c1 = ['Class', 'sent level'] + known_types
c2 = ['Num Tokens', len(sentences_normalized)] + [
token_count_per_type[known_type] if known_type in tokens_per_type else '0' for known_type in known_types
]
c3 = ['Normalization', sentences_accuracy] + [
token_accuracy[known_type] if known_type in token_accuracy else '0' for known_type in known_types
]
for i in range(len(c1)):
print(f'{str(c1[i]):10s} | {str(c2[i]):10s} | {str(c3[i]):5s}')
else:
print(f'numbers\t{token_count_per_type[args.category]}')
print(f'Normalization\t{token_accuracy[args.category]}')
| NeMo-text-processing-main | nemo_text_processing/text_normalization/run_evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from collections import OrderedDict
from typing import Dict, List, Union
PRESERVE_ORDER_KEY = "preserve_order"
EOS = "<EOS>"
class TokenParser:
"""
Parses tokenized/classified text, e.g. 'tokens { money { integer: "20" currency: "$" } } tokens { name: "left"}'
Args
text: tokenized text
"""
def __call__(self, text):
"""
Setup function
Args:
text: text to be parsed
"""
self.text = text
self.len_text = len(text)
self.char = text[0] # cannot handle empty string
self.index = 0
def parse(self) -> List[dict]:
"""
Main function. Implements grammar:
A -> space F space F space F ... space
Returns list of dictionaries
"""
l = list()
while self.parse_ws():
token = self.parse_token()
if not token:
break
l.append(token)
return l
def parse_token(self) -> Dict[str, Union[str, dict]]:
"""
Implements grammar:
F-> no_space KG no_space
Returns: K, G as dictionary values
"""
d = OrderedDict()
key = self.parse_string_key()
if key is None:
return None
self.parse_ws()
if key == PRESERVE_ORDER_KEY:
self.parse_char(":")
self.parse_ws()
value = self.parse_chars("true")
else:
value = self.parse_token_value()
d[key] = value
return d
def parse_token_value(self) -> Union[str, dict]:
"""
Implements grammar:
G-> no_space :"VALUE" no_space | no_space {A} no_space
Returns: string or dictionary
"""
if self.char == ":":
self.parse_char(":")
self.parse_ws()
self.parse_char("\"")
value_string = self.parse_string_value()
self.parse_char("\"")
return value_string
elif self.char == "{":
d = OrderedDict()
self.parse_char("{")
list_token_dicts = self.parse()
# flatten tokens
for tok_dict in list_token_dicts:
for k, v in tok_dict.items():
d[k] = v
self.parse_char("}")
return d
else:
raise ValueError()
def parse_char(self, exp) -> bool:
"""
Parses character
Args:
exp: character to read in
Returns true if successful
"""
assert self.char == exp
self.read()
return True
def parse_chars(self, exp) -> bool:
"""
Parses characters
Args:
exp: characters to read in
Returns true if successful
"""
ok = False
for x in exp:
ok |= self.parse_char(x)
return ok
def parse_string_key(self) -> str:
"""
Parses string key, can only contain ascii and '_' characters
Returns parsed string key
"""
assert self.char not in string.whitespace and self.char != EOS
incl_criterium = string.ascii_letters + "_"
l = []
while self.char in incl_criterium:
l.append(self.char)
if not self.read():
raise ValueError()
if not l:
return None
return "".join(l)
def parse_string_value(self) -> str:
"""
Parses string value, ends with quote followed by space
Returns parsed string value
"""
# assert self.char not in string.whitespace and self.char != EOS
assert self.char != EOS
l = []
while self.char != "\"" or self.text[self.index + 1] != " ":
l.append(self.char)
if not self.read():
raise ValueError()
if not l:
return None
return "".join(l)
def parse_ws(self):
"""
Deletes whitespaces.
Returns true if not EOS after parsing
"""
not_eos = self.char != EOS
while not_eos and self.char == " ":
not_eos = self.read()
return not_eos
def read(self):
"""
Reads in next char.
Returns true if not EOS
"""
if self.index < self.len_text - 1: # should be unique
self.index += 1
self.char = self.text[self.index]
return True
self.char = EOS
return False
| NeMo-text-processing-main | nemo_text_processing/text_normalization/token_parser.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright 2015 and onwards Google, Inc.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import delete_space, insert_space
from pynini.lib import byte, pynutil
from .utils import get_abs_path, load_labels
_ALPHA_UPPER = "ABCDEFGHIJKLMNOPQRSTUVWXYZÅÄÖÜÉ"
_ALPHA_LOWER = "abcdefghijklmnopqrstuvwxyzåäöüé"
TO_LOWER = pynini.union(*[pynini.cross(x, y) for x, y in zip(_ALPHA_UPPER, _ALPHA_LOWER)])
TO_UPPER = pynini.invert(TO_LOWER)
SV_LOWER = pynini.union(*_ALPHA_LOWER).optimize()
SV_UPPER = pynini.union(*_ALPHA_UPPER).optimize()
SV_ALPHA = pynini.union(SV_LOWER, SV_UPPER).optimize()
SV_ALNUM = pynini.union(byte.DIGIT, SV_ALPHA).optimize()
bos_or_space = pynini.union("[BOS]", " ")
eos_or_space = pynini.union("[EOS]", " ")
ensure_space = pynini.cross(pynini.closure(delete_space, 0, 1), " ")
def roman_to_int(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Alters given fst to convert Roman integers (lower and upper cased) into Arabic numerals. Valid for values up to 1000.
e.g.
"V" -> "5"
"i" -> "1"
Args:
fst: Any fst. Composes fst onto Roman conversion outputs.
"""
def _load_roman(file: str):
roman = load_labels(get_abs_path(file))
roman_numerals = [(x, y) for x, y in roman] + [(x.upper(), y) for x, y in roman]
return pynini.string_map(roman_numerals)
digit = _load_roman("data/roman/digit.tsv")
ties = _load_roman("data/roman/ties.tsv")
hundreds = _load_roman("data/roman/hundreds.tsv")
graph = (
digit
| ties + (digit | pynutil.add_weight(pynutil.insert("0"), 0.01))
| (
hundreds
+ (ties | pynutil.add_weight(pynutil.insert("0"), 0.01))
+ (digit | pynutil.add_weight(pynutil.insert("0"), 0.01))
)
).optimize()
return graph @ fst
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/graph_utils.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import os
def get_abs_path(rel_path):
"""
Get absolute path
Args:
rel_path: relative path to this file
Returns absolute path
"""
return os.path.dirname(os.path.abspath(__file__)) + '/' + rel_path
def load_labels(abs_path):
"""
loads relative path file as dictionary
Args:
abs_path: absolute path
Returns dictionary of mappings
"""
with open(abs_path, encoding="utf-8") as label_tsv:
labels = list(csv.reader(label_tsv, delimiter="\t"))
return labels
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
convert_space,
delete_extra_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import ensure_space
from nemo_text_processing.text_normalization.sv.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for classifying time, e.g.
12:30 e.m. est -> time { hours: "tolv" minutes: "trettio" suffix: "eftermiddag" zone: "e s t" }
2.30 e.m. -> time { hours: "två" minutes: "trettio" suffix: "eftermiddag" }
02.30 e.m. -> time { hours: "två" minutes: "trettio" suffix: "eftermiddag" }
2.00 e.m. -> time { hours: "två" suffix: "eftermiddag" }
kl. 2 e.m. -> time { hours: "två" suffix: "eftermiddag" }
02:00 -> time { hours: "två" }
2:00 -> time { hours: "två" }
10:00:05 e.m. -> time { hours: "tio" minutes: "noll" seconds: "fem" suffix: "eftermiddag" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="time", kind="classify", deterministic=deterministic)
suffix_graph = pynini.string_map(load_labels(get_abs_path("data/time/suffix.tsv")))
time_zone_graph = pynini.string_file(get_abs_path("data/time/time_zone.tsv"))
# only used for < 1000 thousand -> 0 weight
cardinal = cardinal.graph
labels_hour = [str(x) for x in range(0, 24)]
labels_minute_single = [str(x) for x in range(1, 10)]
labels_minute_double = [str(x) for x in range(10, 60)]
delete_leading_zero_to_double_digit = (NEMO_DIGIT + NEMO_DIGIT) | (
pynini.closure(pynutil.delete("0"), 0, 1) + NEMO_DIGIT
)
time_sep = pynutil.delete(pynini.union(":", "."))
klockan = pynini.union(pynini.cross("kl.", "klockan"), pynini.cross("kl", "klockan"), "klockan", "klockan är")
klockan_graph_piece = pynutil.insert("hours: \"") + klockan
graph_hour = delete_leading_zero_to_double_digit @ pynini.union(*labels_hour) @ cardinal
graph_minute_single = pynini.union(*labels_minute_single) @ cardinal
graph_minute_double = pynini.union(*labels_minute_double) @ cardinal
klockan_hour_graph = klockan_graph_piece + ensure_space + graph_hour + pynutil.insert("\"")
final_graph_hour = pynutil.insert("hours: \"") + graph_hour + pynutil.insert("\"")
final_graph_minute = (
pynutil.insert("minutes: \"")
+ (pynutil.delete("0") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
if not deterministic:
final_graph_minute |= (
pynutil.insert("minutes: \"")
+ (pynutil.delete("0") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_graph_minute |= (
pynutil.insert("minutes: \"") + pynini.cross("00", "noll noll") + pynutil.insert("\"")
)
final_graph_second = (
pynutil.insert("seconds: \"")
+ (pynutil.delete("0") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
if not deterministic:
final_graph_second |= (
pynutil.insert("seconds: \"")
+ (pynini.cross("0", "noll") + insert_space + graph_minute_single | graph_minute_double)
+ pynutil.insert("\"")
)
final_graph_second |= (
pynutil.insert("seconds: \"") + pynini.cross("00", "noll noll") + pynutil.insert("\"")
)
final_suffix = pynutil.insert("suffix: \"") + convert_space(suffix_graph) + pynutil.insert("\"")
final_suffix_optional = pynini.closure(ensure_space + final_suffix, 0, 1)
final_time_zone = pynutil.insert("zone: \"") + convert_space(time_zone_graph) + pynutil.insert("\"")
final_time_zone_optional = pynini.closure(NEMO_SPACE + final_time_zone, 0, 1,)
# 2:30 pm, 02:30, 2:00
graph_hm_kl = (
klockan_hour_graph
+ time_sep
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ final_suffix_optional
+ final_time_zone_optional
)
graph_hm_sfx = (
final_graph_hour
+ time_sep
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ ensure_space
+ (final_suffix + final_time_zone_optional | final_time_zone)
)
graph_hm = graph_hm_kl | graph_hm_sfx
# 10:30:05 pm,
graph_hms_sfx = (
final_graph_hour
+ time_sep
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ time_sep
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
+ ensure_space
+ (final_suffix + final_time_zone_optional | final_time_zone)
)
graph_hms_sfx |= (
final_graph_hour
+ pynutil.delete(".")
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ pynutil.delete(".")
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
+ ensure_space
+ (final_suffix + final_time_zone_optional | final_time_zone)
)
graph_hms_kl = (
klockan_hour_graph
+ pynutil.delete(":")
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ pynutil.delete(":")
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
+ final_suffix_optional
+ final_time_zone_optional
)
graph_hms_kl |= (
klockan_hour_graph
+ pynutil.delete(".")
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ pynutil.delete(".")
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
+ final_suffix_optional
+ final_time_zone_optional
)
graph_hms = graph_hms_kl | graph_hms_sfx
if not deterministic:
graph_hms |= (
final_graph_hour
+ pynutil.delete(".")
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ pynutil.delete(".")
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
)
graph_hms |= (
final_graph_hour
+ pynutil.delete(":")
+ (pynini.cross("00", " minutes: \"noll\"") | insert_space + final_graph_minute)
+ pynutil.delete(":")
+ (pynini.cross("00", " seconds: \"noll\"") | insert_space + final_graph_second)
)
self.graph_hms = graph_hms
self.graph_hm = graph_hm
# 2 pm est
ins_minutes = pynutil.insert(" minutes: \"noll\"")
graph_h = (
final_graph_hour + ins_minutes + ensure_space + (final_suffix + final_time_zone_optional | final_time_zone)
)
graph_h |= klockan_hour_graph + ins_minutes + final_suffix_optional + final_time_zone_optional
self.graph_h = graph_h
final_graph = (graph_hm | graph_h | graph_hms).optimize() @ pynini.cdrewrite(
delete_extra_space, "", "", NEMO_SIGMA
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NON_BREAKING_SPACE,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_space,
delete_zero_or_one_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import SV_ALPHA, TO_LOWER
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for classifying measure, suppletive aware, e.g.
-12kg -> measure { negative: "true" cardinal { integer: "tolv" } units: "kilogram" }
1kg -> measure { cardinal { integer: "ett" } units: "kilogram" }
,5kg -> measure { decimal { fractional_part: "fem" } units: "kilogram" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, fraction: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="classify", deterministic=deterministic)
cardinal_graph_ett = cardinal.graph
cardinal_graph_en = cardinal.graph_en
graph_unit = pynini.string_file(get_abs_path("data/measure/unit.tsv"))
graph_unit_ett = pynini.string_file(get_abs_path("data/measure/unit_neuter.tsv"))
graph_plurals = pynini.string_file(get_abs_path("data/measure/unit_plural.tsv"))
greek_lower = pynini.string_file(get_abs_path("data/measure/greek_lower.tsv"))
greek_upper = pynutil.insert("stort ") + pynini.string_file(get_abs_path("data/measure/greek_upper.tsv"))
greek = greek_lower | greek_upper
graph_unit |= pynini.compose(
pynini.closure(TO_LOWER, 1) + (SV_ALPHA | TO_LOWER) + pynini.closure(SV_ALPHA | TO_LOWER), graph_unit
).optimize()
graph_unit_ett |= pynini.compose(
pynini.closure(TO_LOWER, 1) + (SV_ALPHA | TO_LOWER) + pynini.closure(SV_ALPHA | TO_LOWER), graph_unit_ett
).optimize()
graph_unit_plural = convert_space(graph_unit @ graph_plurals)
graph_unit_plural_ett = convert_space(graph_unit_ett @ graph_plurals)
graph_unit = convert_space(graph_unit)
graph_unit_ett = convert_space(graph_unit_ett)
self.unit_plural_en = graph_unit_plural
self.unit_plural_ett = graph_unit_plural_ett
self.unit_en = graph_unit
self.unit_ett = graph_unit_ett
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
graph_unit2 = (
pynini.cross("/", "per")
+ delete_zero_or_one_space
+ pynutil.insert(NEMO_NON_BREAKING_SPACE)
+ (graph_unit | graph_unit_ett)
)
optional_graph_unit2 = pynini.closure(
delete_zero_or_one_space + pynutil.insert(NEMO_NON_BREAKING_SPACE) + graph_unit2, 0, 1,
)
unit_plural = (
pynutil.insert("units: \"")
+ (graph_unit_plural + optional_graph_unit2 | graph_unit2)
+ pynutil.insert("\"")
)
unit_plural_ett = (
pynutil.insert("units: \"")
+ (graph_unit_plural_ett + optional_graph_unit2 | graph_unit2)
+ pynutil.insert("\"")
)
unit_singular = (
pynutil.insert("units: \"") + (graph_unit + optional_graph_unit2 | graph_unit2) + pynutil.insert("\"")
)
unit_singular_ett = (
pynutil.insert("units: \"") + (graph_unit_ett + optional_graph_unit2 | graph_unit2) + pynutil.insert("\"")
)
subgraph_decimal = (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative_en
+ delete_space
+ pynutil.insert(" } ")
+ unit_plural
)
subgraph_decimal |= (
pynutil.insert("decimal { ")
+ optional_graph_negative
+ decimal.final_graph_wo_negative
+ delete_space
+ pynutil.insert(" } ")
+ unit_plural_ett
)
# support radio FM/AM
subgraph_decimal |= (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ delete_space
+ pynutil.insert(" } ")
+ pynutil.insert("units: \"")
+ pynini.union("AM", "FM")
+ pynutil.insert("\"")
)
subgraph_cardinal = (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - "1") @ cardinal_graph_en)
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_plural
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ ((NEMO_SIGMA - "1") @ cardinal_graph_ett)
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_plural_ett
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ pynini.cross("1", "ett")
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_singular_ett
)
subgraph_cardinal |= (
pynutil.insert("cardinal { ")
+ optional_graph_negative
+ pynutil.insert("integer: \"")
+ pynini.cross("1", "en")
+ delete_space
+ pynutil.insert("\"")
+ pynutil.insert(" } ")
+ unit_singular
)
self.subgraph_cardinal = subgraph_cardinal
unit_graph = (
pynutil.insert("cardinal { integer: \"-\" } units: \"")
+ ((pynini.cross("/", "per") + delete_zero_or_one_space) | (pynini.accep("per") + pynutil.delete(" ")))
+ pynutil.insert(NEMO_NON_BREAKING_SPACE)
+ graph_unit
+ pynutil.insert("\" preserve_order: true")
)
decimal_dash_alpha = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynini.cross('-', '')
+ pynutil.insert(" } units: \"")
+ pynini.closure(SV_ALPHA, 1)
+ pynutil.insert("\"")
)
decimal_times = (
pynutil.insert("decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(" } units: \"")
+ (pynini.cross(pynini.union('x', "X"), 'x') | pynini.cross(pynini.union('x', "X"), ' times'))
+ pynutil.insert("\"")
)
alpha_dash_decimal = (
pynutil.insert("units: \"")
+ pynini.closure(SV_ALPHA, 1)
+ pynini.accep('-')
+ pynutil.insert("\"")
+ pynutil.insert(" decimal { ")
+ decimal.final_graph_wo_negative
+ pynutil.insert(" } preserve_order: true")
)
subgraph_fraction = (
pynutil.insert("fraction { ") + fraction.graph + delete_space + pynutil.insert(" } ") + unit_plural
)
math_operations = pynini.string_file(get_abs_path("data/math_operations.tsv"))
delimiter = pynini.accep(" ") | pynutil.insert(" ")
equals = pynini.cross("=", "är")
if not deterministic:
equals |= pynini.cross("=", "är lika med")
math = (
(cardinal_graph_ett | SV_ALPHA | greek)
+ delimiter
+ math_operations
+ (delimiter | SV_ALPHA)
+ cardinal_graph_ett
+ delimiter
+ equals
+ delimiter
+ (cardinal_graph_ett | SV_ALPHA | greek)
)
math |= (
(cardinal_graph_ett | SV_ALPHA | greek)
+ delimiter
+ equals
+ delimiter
+ (cardinal_graph_ett | SV_ALPHA)
+ delimiter
+ math_operations
+ delimiter
+ cardinal_graph_ett
)
math = (
pynutil.insert("units: \"math\" cardinal { integer: \"")
+ math
+ pynutil.insert("\" } preserve_order: true")
)
final_graph = (
subgraph_decimal
| subgraph_cardinal
| unit_graph
| decimal_dash_alpha
| decimal_times
| alpha_dash_decimal
| subgraph_fraction
| math
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
def get_range(self, cardinal: GraphFst):
"""
Returns range forms for measure tagger, e.g. 2-3, 2x3, 2*2
Args:
cardinal: cardinal GraphFst
"""
range_graph = cardinal + pynini.cross(pynini.union("-", " - "), " till ") + cardinal
for x in [" x ", "x"]:
range_graph |= cardinal + pynini.cross(x, " gånger ") + cardinal
for x in ["*", " * "]:
range_graph |= cardinal + pynini.cross(x, " gånger ") + cardinal
return range_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/measure.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2022, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.sv.graph_utils import ensure_space
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for classifying fraction
"23 4/5" ->
tokens { fraction { integer: "tjugotre" numerator: "fyra" denominator: "femtedel" } }
# en åttondel (1/8)
Args:
cardinal: CardinalFst
ordinal: OrdinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="fraction", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
ordinal_graph = ordinal.graph
numerator_graph = cardinal.graph_en
fractional_endings = pynini.string_map(
[
("ljarte", "ljarddel"),
("tionde", "tiondel"),
("tonde", "tondel"),
("ljonte", "ljondel"),
("lliarte", "lliarddel"),
("llionte", "lliondel"),
("tusende", "tusendel"),
("te", "tedel"),
("de", "dedel"),
("je", "jedel"),
("drade", "dradel"),
("a", "adel"),
]
)
alt_fractional_endings = pynini.string_map([("tondel", "tondedel"), ("tiondel", "tiondedel")])
lexicalised = pynini.string_map([("andradel", "halv"), ("fjärdedel", "kvart")])
alt_lexicalised = pynini.string_map([("halv", "andradel"), ("kvart", "fjärdedel"), ("kvart", "kvarts")])
fractions = (
ordinal_graph
@ pynini.cdrewrite(fractional_endings, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(lexicalised, "[BOS]", "[EOS]", NEMO_SIGMA)
)
fractions_alt = (
fractions
@ pynini.cdrewrite(alt_fractional_endings, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(alt_lexicalised, "[BOS]", "[EOS]", NEMO_SIGMA)
)
if not deterministic:
fractions |= fractions_alt
self.fractions = fractions
fractional_pl_endings = pynini.string_map([("kvart", "kvartar"), ("halv", "halva"), ("del", "delar")])
fractions_pl = fractions @ pynini.cdrewrite(fractional_pl_endings, "", "[EOS]", NEMO_SIGMA)
self.fractional_plural_endings = fractional_pl_endings
self.fractions_plural = fractions_pl
self.fractions_any = self.fractions | self.fractions_plural
integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
numerator = (
pynutil.insert("numerator: \"") + numerator_graph + (pynini.cross("/", "\" ") | pynini.cross(" / ", "\" "))
)
denominator = pynutil.insert("denominator: \"") + fractions + pynutil.insert("\"")
graph = pynini.closure(integer + pynini.accep(" "), 0, 1) + (numerator + denominator)
graph |= pynini.closure(integer + ensure_space, 0, 1) + pynini.compose(
pynini.string_file(get_abs_path("data/numbers/fraction.tsv")), (numerator + denominator)
)
self.graph = graph
final_graph = self.add_tokens(self.graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SPACE,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import ensure_space
from nemo_text_processing.text_normalization.sv.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
tfn. 08-789 52 25
Finite state transducer for classifying telephone numbers, e.g.
123-123-5678 -> { number_part: "ett två tre ett två tre fyra sex sju åtta" }.
Swedish numbers are written in the following formats:
0X-XXX XXX XX
0X-XXX XX XX
0X-XX XX XX
0XX-XXX XX XX
0XX-XX XX XX
0XX-XXX XX
0XXX-XX XX XX
0XXX-XXX XX
See:
https://en.wikipedia.org/wiki/National_conventions_for_writing_telephone_numbers#Sweden
https://codegolf.stackexchange.com/questions/195787/format-a-swedish-phone-number
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="classify", deterministic=deterministic)
cardinal = CardinalFst(deterministic)
add_separator = pynutil.insert(", ")
zero_space = cardinal.zero_space
digit = cardinal.digit
two_digits = cardinal.two_digits_read
three_digits = cardinal.three_digits_read
two_or_three_digits = (two_digits | three_digits).optimize()
one_two_or_three_digits = (digit | two_or_three_digits).optimize()
zero_after_country_code = pynini.union(pynini.cross("(0)", "noll "), zero_space)
bracketed = pynutil.delete("(") + one_two_or_three_digits + pynutil.delete(")")
zero = pynini.cross("0", "noll")
digit |= zero
special_numbers = pynini.string_file(get_abs_path("data/telephone/special_numbers.tsv"))
passable = pynini.union(":", ": ", " ")
prompt_pass = pynini.closure(pynutil.delete(passable) + insert_space, 0, 1)
telephone_abbr = pynini.string_file(get_abs_path("data/telephone/telephone_abbr.tsv"))
telephone_abbr = telephone_abbr + prompt_pass
telephone_prompt = pynini.string_file(get_abs_path("data/telephone/telephone_prompt.tsv"))
prompt_as_code = pynutil.insert("country_code: \"") + telephone_prompt + pynutil.insert("\"")
prompt_as_code |= pynutil.insert("country_code: \"") + telephone_abbr + pynutil.insert("\"")
prompt_as_code |= (
pynutil.insert("country_code: \"") + telephone_prompt + NEMO_SPACE + telephone_abbr + pynutil.insert("\"")
)
prompt_inner = telephone_prompt | telephone_abbr
prompt_inner |= telephone_prompt + NEMO_SPACE + telephone_abbr
country = pynini.closure(pynini.cross("+", "plus "), 0, 1) + one_two_or_three_digits
country_code = pynutil.insert("country_code: \"") + country + pynutil.insert("\"")
country_code |= prompt_as_code
country_code |= pynutil.insert("country_code: \"") + prompt_inner + NEMO_SPACE + country + pynutil.insert("\"")
opt_dash = pynini.closure(pynutil.delete("-"), 0, 1)
area_part = zero_after_country_code + one_two_or_three_digits + opt_dash + add_separator
area_part |= bracketed + add_separator
base_number_part = pynini.union(
three_digits + NEMO_SPACE + three_digits + NEMO_SPACE + two_digits,
three_digits + NEMO_SPACE + two_digits + NEMO_SPACE + two_digits,
three_digits + NEMO_SPACE + two_digits + insert_space + two_digits,
two_digits + NEMO_SPACE + two_digits + NEMO_SPACE + two_digits,
two_digits + NEMO_SPACE + two_digits + insert_space + two_digits,
three_digits + NEMO_SPACE + two_digits,
)
number_part = area_part + delete_space + base_number_part
self.number_graph = number_part
number_part = pynutil.insert("number_part: \"") + number_part + pynutil.insert("\"")
extension = pynutil.insert("extension: \"") + one_two_or_three_digits + pynutil.insert("\"")
extension = pynini.closure(insert_space + extension, 0, 1)
ext_prompt = NEMO_SPACE + pynutil.delete(pynini.union("ankn", "ankn.", "anknytning")) + ensure_space
passable = pynini.union(":", ": ", " ")
prompt_pass = pynutil.delete(passable) + insert_space
special_numbers = pynutil.insert("number_part: \"") + special_numbers + pynutil.insert("\"")
graph = pynini.union(
country_code + ensure_space + number_part,
country_code + ensure_space + number_part + ext_prompt + extension,
number_part + ext_prompt + extension,
country_code + number_part,
country_code + special_numbers,
country_code + number_part + ext_prompt + extension,
)
self.tel_graph = graph.optimize()
# No need to be so exact here, but better for ITN to have it
three_digit_area_code_digit_two = pynini.union("1", "2", "3", "4", "7")
three_digit_area_code_no_zero = (three_digit_area_code_digit_two + NEMO_DIGIT) @ cardinal.two_digits_read
three_digit_area_code = zero_space + three_digit_area_code_no_zero
four_digit_area_code_digit_two = pynini.union("5", "6", "9")
four_digit_area_code_no_zero = (four_digit_area_code_digit_two + NEMO_DIGIT) @ cardinal.three_digits_read
four_digit_area_code = zero_space + four_digit_area_code_no_zero
two_digit_area_code = "08" @ cardinal.two_digits_read
self.area_codes = two_digit_area_code | three_digit_area_code | four_digit_area_code
self.area_codes_no_zero = (
three_digit_area_code_no_zero | four_digit_area_code_no_zero | pynini.cross("8", "åtta")
)
country_code_lead = pynini.cross("+", "plus") | pynini.cross("00", "noll noll")
raw_country_codes = pynini.string_file(get_abs_path("data/telephone/country_codes.tsv"))
self.country_code = country_code_lead + insert_space + (raw_country_codes @ cardinal.any_read_digit)
self.country_plus_area_code = self.country_code + NEMO_SPACE + self.area_codes_no_zero
# ip
ip_prompts = pynini.string_file(get_abs_path("data/telephone/ip_prompt.tsv"))
ip_graph = one_two_or_three_digits + (pynini.cross(".", " punkt ") + one_two_or_three_digits) ** 3
graph |= (
pynini.closure(
pynutil.insert("country_code: \"") + ip_prompts + pynutil.insert("\"") + delete_extra_space, 0, 1
)
+ pynutil.insert("number_part: \"")
+ ip_graph.optimize()
+ pynutil.insert("\"")
)
# ssn
ssn_prompts = pynini.string_file(get_abs_path("data/telephone/ssn_prompt.tsv"))
four_digit_part = digit + (pynutil.insert(" ") + digit) ** 3
ssn_separator = pynini.cross("-", ", ")
ssn_graph = three_digits + ssn_separator + two_digits + ssn_separator + four_digit_part
graph |= (
pynini.closure(
pynutil.insert("country_code: \"") + ssn_prompts + pynutil.insert("\"") + delete_extra_space, 0, 1
)
+ pynutil.insert("number_part: \"")
+ ssn_graph.optimize()
+ pynutil.insert("\"")
)
final_graph = self.add_tokens(graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/telephone.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2022, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_ALPHA,
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.taggers.cardinal import filter_punctuation, make_million
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for classifying ordinal
"21:a" -> ordinal { integer: "tjugoförsta" }
Args:
cardinal: CardinalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, deterministic: bool = True):
super().__init__(name="ordinal", kind="classify")
digit = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/digit.tsv")))
teens = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/teen.tsv")))
ties = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/ties.tsv")))
zero = pynini.invert(pynini.string_file(get_abs_path("data/ordinals/zero.tsv")))
card_ties = pynini.invert(pynini.string_file(get_abs_path("data/numbers/ties.tsv")))
card_digit = pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")))
graph_digit = digit.optimize()
graph_teens = teens.optimize()
graph_ties = ties.optimize()
graph_card_ties = card_ties.optimize()
graph_card_digit = card_digit.optimize()
digits_no_one = (NEMO_DIGIT - "1") @ graph_card_digit
if not deterministic:
graph_ties |= pynini.cross("4", "förtionde")
graph_teens |= pynini.cross("18", "adertonde")
graph_tens_component = graph_teens | graph_card_ties + graph_digit | graph_ties + pynutil.delete('0')
self.graph_tens_component = graph_tens_component
graph_tens = graph_tens_component
digit_or_space = pynini.closure(NEMO_DIGIT | pynini.accep(" "))
cardinal_format = (NEMO_DIGIT - "0") + pynini.closure(digit_or_space + NEMO_DIGIT, 0, 1)
a_format = (
(pynini.closure(cardinal_format + (NEMO_DIGIT - "1"), 0, 1) + pynini.union("1", "2"))
| (NEMO_DIGIT - "1") + pynini.union("1", "2")
| pynini.union("1", "2")
) + pynutil.delete(pynini.union(":a", ":A"))
e_format = pynini.closure(
(NEMO_DIGIT - "1" - "2")
| (cardinal_format + "1" + NEMO_DIGIT)
| (cardinal_format + (NEMO_DIGIT - "1") + (NEMO_DIGIT - "1" - "2")),
1,
) + pynutil.delete(pynini.union(":e", ":E"))
suffixed_ordinal = a_format | e_format
self.suffixed_ordinal = suffixed_ordinal.optimize()
bare_hundreds = digits_no_one + pynini.cross("00", "hundrade")
bare_hundreds |= pynini.cross("100", "hundrade")
if not deterministic:
bare_hundreds |= pynini.cross("100", "etthundrade")
bare_hundreds |= pynini.cross("100", "ett hundrade")
bare_hundreds |= digit + pynutil.insert(NEMO_SPACE) + pynini.cross("00", "hundrade")
hundreds = digits_no_one + pynutil.insert("hundra")
hundreds |= pynini.cross("1", "hundra")
if not deterministic:
hundreds |= pynini.cross("1", "etthundra")
hundreds |= pynini.cross("1", "ett hundra")
hundreds |= digit + pynutil.insert(NEMO_SPACE) + pynutil.insert("hundra")
graph_hundreds = hundreds + pynini.union(graph_tens, (pynutil.delete("0") + graph_digit),)
if not deterministic:
graph_hundreds |= hundreds + pynini.union(
(graph_teens | pynutil.insert(NEMO_SPACE) + graph_teens), (pynini.cross("0", NEMO_SPACE) + graph_digit)
)
graph_hundreds |= bare_hundreds
graph_hundreds_component = pynini.union(graph_hundreds, pynutil.delete("0") + graph_tens)
graph_hundreds_component_at_least_one_non_zero_digit = graph_hundreds_component | (
pynutil.delete("00") + graph_digit
)
graph_hundreds_component_at_least_one_non_zero_digit_no_one = graph_hundreds_component | (
pynutil.delete("00") + digits_no_one
)
self.hundreds = graph_hundreds.optimize()
tusen = pynutil.insert("tusen")
if not deterministic:
tusen |= pynutil.insert(" tusen")
tusen |= pynutil.insert("ettusen")
tusen |= pynutil.insert(" ettusen")
tusen |= pynutil.insert("ett tusen")
tusen |= pynutil.insert(" ett tusen")
graph_thousands_component_at_least_one_non_zero_digit = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_non_zero_digit,
cardinal.graph_hundreds_component_at_least_one_non_zero_digit_no_one
+ tusen
+ ((insert_space + graph_hundreds_component_at_least_one_non_zero_digit) | pynutil.delete("000")),
pynini.cross("001", tusen)
+ ((insert_space + graph_hundreds_component_at_least_one_non_zero_digit) | pynutil.delete("000")),
)
self.graph_thousands_component_at_least_one_non_zero_digit = (
graph_thousands_component_at_least_one_non_zero_digit.optimize()
)
graph_thousands_component_at_least_one_non_zero_digit_no_one = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_non_zero_digit_no_one,
cardinal.graph_hundreds_component_at_least_one_non_zero_digit_no_one
+ tusen
+ ((insert_space + graph_hundreds_component_at_least_one_non_zero_digit) | pynutil.delete("000")),
pynini.cross("001", tusen)
+ ((insert_space + graph_hundreds_component_at_least_one_non_zero_digit) | pynutil.delete("000")),
)
self.graph_thousands_component_at_least_one_non_zero_digit_no_one = (
graph_thousands_component_at_least_one_non_zero_digit_no_one.optimize()
)
non_zero_no_one = cardinal.graph_hundreds_component_at_least_one_non_zero_digit_no_one
graph_million = make_million("miljon", non_zero_no_one, deterministic)
graph_milliard = make_million("miljard", non_zero_no_one, deterministic)
graph_billion = make_million("biljon", non_zero_no_one, deterministic)
graph_billiard = make_million("biljard", non_zero_no_one, deterministic)
graph_trillion = make_million("triljon", non_zero_no_one, deterministic)
graph_trilliard = make_million("triljard", non_zero_no_one, deterministic)
graph = (
graph_trilliard
+ graph_trillion
+ graph_billiard
+ graph_billion
+ graph_milliard
+ graph_million
+ (graph_thousands_component_at_least_one_non_zero_digit | pynutil.delete("000000"))
)
ordinal_endings = pynini.string_map(
[
("ljon", "ljonte"),
("ljoner", "ljonte"),
("llion", "llionte"),
("llioner", "llionte"),
("ljard", "ljarte"),
("ljarder", "ljarte"),
("lliard", "lliarte"),
("lliarder", "lliarte"),
("tusen", "tusende"),
]
)
self.graph = (
((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0))
@ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", NEMO_SIGMA)
@ NEMO_DIGIT ** 24
@ graph
@ pynini.cdrewrite(delete_space, "[BOS]", "", NEMO_SIGMA)
@ pynini.cdrewrite(delete_space, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(ordinal_endings, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(
pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 2), NEMO_SPACE), NEMO_ALPHA, NEMO_ALPHA, NEMO_SIGMA
)
)
cleaned_graph = self.graph
self.graph |= zero
self.graph = filter_punctuation(self.graph).optimize()
self.suffixed_to_words = self.suffixed_ordinal @ self.graph
self.bare_ordinals = cleaned_graph
kapitlet_word = pynini.union("kapitlet", pynini.cross("kap", "kapitlet"))
kapitlet = cleaned_graph + NEMO_SPACE + kapitlet_word
tok_graph = (
pynutil.insert("integer: \"")
+ (cleaned_graph + pynutil.delete(".") | self.suffixed_to_words | kapitlet)
+ pynutil.insert("\"")
)
final_graph = self.add_tokens(tok_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/ordinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, insert_space
from nemo_text_processing.text_normalization.sv.graph_utils import SV_UPPER
from pynini.lib import pynutil
class AbbreviationFst(GraphFst):
"""
Finite state transducer for classifying electronic: as URLs, email addresses, etc.
e.g. "ABC" -> tokens { abbreviation { value: "A B C" } }
Args:
whitelist: whitelist FST
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, whitelist: 'pynini.FstLike', deterministic: bool = True):
super().__init__(name="abbreviation", kind="classify", deterministic=deterministic)
dot = pynini.accep(".")
# A.B.C. -> A. B. C.
graph = SV_UPPER + dot + pynini.closure(insert_space + SV_UPPER + dot, 1)
# A.B.C. -> A.B.C.
graph |= SV_UPPER + dot + pynini.closure(SV_UPPER + dot, 1)
# ABC -> A B C
graph |= SV_UPPER + pynini.closure(insert_space + SV_UPPER, 1)
# exclude words that are included in the whitelist
if whitelist is not None:
graph = pynini.compose(
pynini.difference(pynini.project(graph, "input"), pynini.project(whitelist.graph, "input")), graph
)
graph = pynutil.insert("value: \"") + graph.optimize() + pynutil.insert("\"")
graph = self.add_tokens(graph)
self.fst = graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/abbreviation.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, convert_space
from nemo_text_processing.text_normalization.sv.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class WhiteListFst(GraphFst):
"""
Finite state transducer for classifying whitelist, e.g.
"s:t" -> tokens { name: "sankt" }
This class has highest priority among all classifier grammars. Whitelisted tokens are defined and loaded from "data/whitelist.tsv".
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
input_file: path to a file with whitelist replacements
"""
def __init__(self, input_case: str, deterministic: bool = True, input_file: str = None):
super().__init__(name="whitelist", kind="classify", deterministic=deterministic)
def _get_whitelist_graph(input_case, file):
whitelist = load_labels(file)
if input_case == "lower_cased":
whitelist = [[x[0].lower()] + x[1:] for x in whitelist]
graph = pynini.string_map(whitelist)
return graph
graph = _get_whitelist_graph(input_case, get_abs_path("data/whitelist.tsv"))
if not deterministic and input_case != "lower_cased":
graph |= pynutil.add_weight(
_get_whitelist_graph("lower_cased", get_abs_path("data/whitelist.tsv")), weight=0.0001
)
if input_file:
whitelist_provided = _get_whitelist_graph(input_case, input_file)
if not deterministic:
graph |= whitelist_provided
else:
graph = whitelist_provided
if not deterministic:
units_graph = _get_whitelist_graph(input_case, file=get_abs_path("data/measure/unit.tsv"))
units_graph |= _get_whitelist_graph(input_case, file=get_abs_path("data/measure/unit_neuter.tsv"))
units_graph |= _get_whitelist_graph(input_case, file=get_abs_path("data/abbreviations_nondet.tsv"))
graph |= units_graph
self.graph = graph
self.final_graph = convert_space(self.graph).optimize()
self.fst = (pynutil.insert("name: \"") + self.final_graph + pynutil.insert("\"")).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/whitelist.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import time
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.sv.taggers.abbreviation import AbbreviationFst
from nemo_text_processing.text_normalization.sv.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.sv.taggers.date import DateFst
from nemo_text_processing.text_normalization.sv.taggers.decimal import DecimalFst
from nemo_text_processing.text_normalization.sv.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.sv.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.sv.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.sv.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.sv.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.sv.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.sv.taggers.time import TimeFst
from nemo_text_processing.text_normalization.sv.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.sv.taggers.word import WordFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"sv_tn_{deterministic}_deterministic_{input_case}_{whitelist_file}_tokenize.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f"Creating ClassifyFst grammars.")
start_time = time.time()
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
logging.debug(f"cardinal: {time.time() - start_time: .2f}s -- {cardinal_graph.num_states()} nodes")
start_time = time.time()
ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic)
ordinal_graph = ordinal.fst
logging.debug(f"ordinal: {time.time() - start_time: .2f}s -- {ordinal_graph.num_states()} nodes")
start_time = time.time()
decimal = DecimalFst(cardinal=cardinal, deterministic=deterministic)
decimal_graph = decimal.fst
logging.debug(f"decimal: {time.time() - start_time: .2f}s -- {decimal_graph.num_states()} nodes")
start_time = time.time()
fraction = FractionFst(deterministic=deterministic, ordinal=ordinal, cardinal=cardinal)
fraction_graph = fraction.fst
logging.debug(f"fraction: {time.time() - start_time: .2f}s -- {fraction_graph.num_states()} nodes")
start_time = time.time()
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
logging.debug(f"measure: {time.time() - start_time: .2f}s -- {measure_graph.num_states()} nodes")
start_time = time.time()
date_graph = DateFst(cardinal=cardinal, ordinal=ordinal, deterministic=deterministic).fst
logging.debug(f"date: {time.time() - start_time: .2f}s -- {date_graph.num_states()} nodes")
start_time = time.time()
time_graph = TimeFst(cardinal=cardinal, deterministic=deterministic).fst
logging.debug(f"time: {time.time() - start_time: .2f}s -- {time_graph.num_states()} nodes")
start_time = time.time()
telephone_graph = TelephoneFst(deterministic=deterministic).fst
logging.debug(f"telephone: {time.time() - start_time: .2f}s -- {telephone_graph.num_states()} nodes")
start_time = time.time()
electonic_graph = ElectronicFst(deterministic=deterministic).fst
logging.debug(f"electronic: {time.time() - start_time: .2f}s -- {electonic_graph.num_states()} nodes")
start_time = time.time()
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=deterministic).fst
logging.debug(f"money: {time.time() - start_time: .2f}s -- {money_graph.num_states()} nodes")
start_time = time.time()
whitelist_graph = WhiteListFst(
input_case=input_case, deterministic=deterministic, input_file=whitelist
).fst
logging.debug(f"whitelist: {time.time() - start_time: .2f}s -- {whitelist_graph.num_states()} nodes")
start_time = time.time()
punctuation = PunctuationFst(deterministic=deterministic)
punct_graph = punctuation.fst
logging.debug(f"punct: {time.time() - start_time: .2f}s -- {punct_graph.num_states()} nodes")
start_time = time.time()
word_graph = WordFst(deterministic=deterministic).fst
logging.debug(f"word: {time.time() - start_time: .2f}s -- {word_graph.num_states()} nodes")
classify = (
pynutil.add_weight(whitelist_graph, 1.01)
| pynutil.add_weight(time_graph, 1.1)
| pynutil.add_weight(date_graph, 1.09)
| pynutil.add_weight(decimal_graph, 1.1)
| pynutil.add_weight(measure_graph, 1.1)
| pynutil.add_weight(cardinal_graph, 1.1)
| pynutil.add_weight(ordinal_graph, 1.1)
| pynutil.add_weight(money_graph, 1.1)
| pynutil.add_weight(telephone_graph, 1.1)
| pynutil.add_weight(electonic_graph, 1.1)
| pynutil.add_weight(fraction_graph, 1.1)
)
if not deterministic:
abbreviation_graph = AbbreviationFst(whitelist, deterministic=deterministic).fst
classify |= pynutil.add_weight(abbreviation_graph, 100)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
classify |= pynutil.add_weight(word_graph, 100)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/tokenize_and_classify.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_SIGMA, GraphFst
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
def get_quantity(
decimal: 'pynini.FstLike',
decimal_ett: 'pynini.FstLike',
cardinal_up_to_thousand: 'pynini.FstLike',
cardinal_up_to_thousand_ett: 'pynini.FstLike',
include_abbr: bool,
itn: bool = False,
) -> 'pynini.FstLike':
"""
Returns FST that transforms either a cardinal or decimal followed by a quantity into a numeral,
e.g. 1 miljon -> integer_part: "en" quantity: "miljon"
e.g. 1,5 miljoner -> integer_part: "en" fractional_part: "fem" quantity: "miljoner"
Args:
decimal: decimal FST
cardinal_up_to_hundred: cardinal FST
"""
quantities = pynini.string_file(get_abs_path("data/numbers/millions.tsv"))
quantities_abbr = pynini.string_file(get_abs_path("data/numbers/millions_abbr.tsv"))
quantities_pl = quantities + "er"
quantities_pl |= quantities @ pynini.cdrewrite(pynini.cross("", "er"), "", "[EOS]", NEMO_SIGMA)
if include_abbr or not itn:
quantity = quantities | quantities_abbr
quantities_pl |= quantities_abbr + pynutil.insert("er")
else:
quantity = quantities
one_en = pynini.cross("1", "en")
one_ett = pynini.cross("1", "ett")
if itn:
# accept both here, even if wrong
one_en = pynini.cross("en", "1")
one_en |= pynini.cross("ett", "1")
res = (
pynutil.insert("integer_part: \"")
+ cardinal_up_to_thousand
+ pynutil.insert("\"")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ quantities_pl
+ pynutil.insert("\"")
)
if not itn:
res |= (
pynutil.insert("integer_part: \"")
+ cardinal_up_to_thousand_ett
+ pynutil.insert("\"")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ "tusen"
+ pynutil.insert("\"")
)
res |= (
pynutil.insert("integer_part: \"")
+ one_ett
+ pynutil.insert("\"")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ "tusen"
+ pynutil.insert("\"")
)
res |= (
pynutil.insert("integer_part: \"")
+ one_en
+ pynutil.insert("\"")
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ quantity
+ pynutil.insert("\"")
)
res |= (
decimal
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ quantities_pl
+ pynutil.insert("\"")
)
if not itn:
res |= (
decimal_ett
+ pynini.closure(pynutil.delete(" "), 0, 1)
+ pynutil.insert(" quantity: \"")
+ "tusen"
+ pynutil.insert("\"")
)
return res
class DecimalFst(GraphFst):
"""
Finite state transducer for classifying decimal, e.g.
-12,5006 biljon -> decimal { negative: "true" integer_part: "tolv" fractional_part: "fem noll noll sex" quantity: "biljon" }
1 biljon -> decimal { integer_part: "en" quantity: "biljon" }
cardinal: CardinalFst
"""
def __init__(self, cardinal: GraphFst, deterministic: bool):
super().__init__(name="decimal", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph
cardinal_graph_en = cardinal.graph_en
cardinal_graph_hundreds_one_non_zero = cardinal.graph_hundreds_component_at_least_one_non_zero_digit_no_one
cardinal_graph_hundreds_one_non_zero_en = (
cardinal.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en
)
self.cardinal_graph_hundreds_one_non_zero_en = cardinal_graph_hundreds_one_non_zero_en
self.cardinal_graph_hundreds_one_non_zero = cardinal_graph_hundreds_one_non_zero
self.graph = cardinal.two_or_three_digits_read_frac
self.graph_itn = pynini.invert(cardinal.two_or_three_digits_read_frac_both).optimize()
if not deterministic:
self.graph |= cardinal.single_digits_graph.optimize()
self.graph |= cardinal_graph
point = pynutil.delete(",")
optional_graph_negative = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
self.graph_fractional = pynutil.insert("fractional_part: \"") + self.graph + pynutil.insert("\"")
self.graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
self.graph_integer_en = pynutil.insert("integer_part: \"") + cardinal_graph_en + pynutil.insert("\"")
final_graph_wo_sign = (
pynini.closure(self.graph_integer + pynutil.insert(" "), 0, 1)
+ point
+ pynutil.insert(" ")
+ self.graph_fractional
)
self.final_graph_wo_sign = final_graph_wo_sign
final_graph_wo_sign_en = (
pynini.closure(self.graph_integer_en + pynutil.insert(" "), 0, 1)
+ point
+ pynutil.insert(" ")
+ self.graph_fractional
)
self.final_graph_wo_sign_en = final_graph_wo_sign_en
quantity_w_abbr = get_quantity(
final_graph_wo_sign_en,
final_graph_wo_sign,
cardinal_graph_hundreds_one_non_zero_en,
cardinal_graph_hundreds_one_non_zero,
include_abbr=True,
)
quantity_wo_abbr = get_quantity(
final_graph_wo_sign_en,
final_graph_wo_sign,
cardinal_graph_hundreds_one_non_zero_en,
cardinal_graph_hundreds_one_non_zero,
include_abbr=False,
)
self.final_graph_wo_negative_w_abbr = final_graph_wo_sign | quantity_w_abbr
self.final_graph_wo_negative_w_abbr_en = final_graph_wo_sign_en | quantity_w_abbr
self.final_graph_wo_negative = final_graph_wo_sign | quantity_wo_abbr
self.final_graph_wo_negative_en = final_graph_wo_sign_en | quantity_wo_abbr
final_graph = optional_graph_negative + self.final_graph_wo_negative_w_abbr
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/decimal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
GraphFst,
convert_space,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import SV_ALPHA, ensure_space
from nemo_text_processing.text_normalization.sv.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class MoneyFst(GraphFst):
"""
Finite state transducer for classifying money, suppletive aware, e.g.
$12,05 -> money { integer_part: "tolv" currency_maj: "dollar" fractional_part: "fem" currency_min: "cent" preserve_order: true }
$12,0500 -> money { integer_part: "tolv" currency_maj: "dollar" fractional_part: "fem" currency_min: "cent" preserve_order: true }
$1 -> money { currency_maj: "dollar" integer_part: "en" }
$1,00 -> money { currency_maj: "dollar" integer_part: "en" }
$0,05 -> money { fractional_part: "fem" currency_min: "cent" preserve_order: true }
$1 miljon -> money { currency_maj: "dollar" integer_part: "en" quantity: "miljon" }
$1,2 miljon -> money { currency_maj: "dollar" integer_part: "en" fractional_part: "två" quantity: "miljon" }
$1,2320 -> money { currency_maj: "dollar" integer_part: "en" fractional_part: "two three two" }
Args:
cardinal: CardinalFst
decimal: DecimalFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="classify", deterministic=deterministic)
cardinal_graph = cardinal.graph_no_one_en
cardinal_graph_ett = cardinal.graph_no_one
graph_decimal_final = decimal.final_graph_wo_negative_w_abbr_en
graph_decimal_final_ett = decimal.final_graph_wo_negative_w_abbr
min_singular = pynini.string_file(get_abs_path("data/money/currency_minor_singular.tsv"))
min_plural = pynini.string_file(get_abs_path("data/money/currency_minor_plural.tsv"))
maj_singular = pynini.string_file(get_abs_path("data/money/currency_major.tsv"))
maj_singular_nt = pynini.string_file(get_abs_path("data/money/currency_major_nt.tsv"))
maj_plural = pynini.string_file(get_abs_path("data/money/currency_plurals.tsv"))
maj_singular_labels = load_labels(get_abs_path("data/money/currency_major.tsv"))
maj_singular_labels_nt = load_labels(get_abs_path("data/money/currency_major_nt.tsv"))
maj_unit_plural = convert_space(maj_singular @ maj_plural)
maj_unit_plural_nt = convert_space(maj_singular_nt @ maj_plural)
maj_unit_singular = convert_space(maj_singular)
maj_unit_singular_nt = convert_space(maj_singular_nt)
self.maj_unit_plural = maj_unit_plural
self.maj_unit_plural_nt = maj_unit_plural_nt
self.maj_unit_singular = maj_unit_singular
self.maj_unit_singular_nt = maj_unit_singular_nt
graph_maj_singular = pynutil.insert("currency_maj: \"") + maj_unit_singular + pynutil.insert("\"")
graph_maj_plural = pynutil.insert("currency_maj: \"") + maj_unit_plural + pynutil.insert("\"")
graph_maj_singular_nt = pynutil.insert("currency_maj: \"") + maj_unit_singular_nt + pynutil.insert("\"")
graph_maj_plural_nt = pynutil.insert("currency_maj: \"") + maj_unit_plural_nt + pynutil.insert("\"")
optional_delete_fractional_zeros = pynini.closure(
pynutil.delete(",") + pynini.closure(pynutil.delete("0"), 1), 0, 1
)
graph_integer_sg_en = pynutil.insert("integer_part: \"") + pynini.cross("1", "en") + pynutil.insert("\"")
graph_integer_sg_ett = pynutil.insert("integer_part: \"") + pynini.cross("1", "ett") + pynutil.insert("\"")
# only for decimals where third decimal after comma is non-zero or with quantity
decimal_delete_last_zeros = (
pynini.closure(NEMO_DIGIT | pynutil.delete(" "))
+ pynini.accep(",")
+ pynini.closure(NEMO_DIGIT, 2)
+ (NEMO_DIGIT - "0")
+ pynini.closure(pynutil.delete("0"))
)
decimal_with_quantity = NEMO_SIGMA + SV_ALPHA
decimal_part = (decimal_delete_last_zeros | decimal_with_quantity) @ graph_decimal_final
decimal_part_ett = (decimal_delete_last_zeros | decimal_with_quantity) @ graph_decimal_final_ett
graph_decimal = pynini.union(
graph_maj_plural + ensure_space + decimal_part,
graph_maj_plural_nt + ensure_space + decimal_part_ett,
decimal_part_ett + ensure_space + graph_maj_plural_nt,
decimal_part + ensure_space + graph_maj_plural,
)
graph_integer = pynutil.insert("integer_part: \"") + cardinal_graph + pynutil.insert("\"")
graph_integer_ett = pynutil.insert("integer_part: \"") + cardinal_graph_ett + pynutil.insert("\"")
graph_integer_only = graph_maj_singular + ensure_space + graph_integer_sg_en
graph_integer_only |= graph_maj_singular_nt + ensure_space + graph_integer_sg_ett
graph_integer_only |= graph_maj_plural + ensure_space + graph_integer
graph_integer_only |= graph_maj_plural_nt + ensure_space + graph_integer_ett
graph_integer_only |= graph_integer_sg_en + ensure_space + graph_maj_singular
graph_integer_only |= graph_integer_sg_ett + ensure_space + graph_maj_singular_nt
graph_integer_only |= graph_integer + ensure_space + graph_maj_plural
graph_integer_only |= graph_integer_ett + ensure_space + graph_maj_plural_nt
final_graph = (graph_integer_only + optional_delete_fractional_zeros) | graph_decimal
# remove trailing zeros of non zero number in the first 2 digits and fill up to 2 digits
# e.g. 2000 -> 20, 0200->02, 01 -> 01, 10 -> 10
# not accepted: 002, 00, 0,
two_digits_fractional_part = (
pynini.closure(NEMO_DIGIT) + (NEMO_DIGIT - "0") + pynini.closure(pynutil.delete("0"))
) @ (
(pynutil.delete("0") + (NEMO_DIGIT - "0"))
| ((NEMO_DIGIT - "0") + pynutil.insert("0"))
| ((NEMO_DIGIT - "0") + NEMO_DIGIT)
)
graph_min_singular = pynutil.insert(" currency_min: \"") + min_singular + pynutil.insert("\"")
graph_min_plural = pynutil.insert(" currency_min: \"") + min_plural + pynutil.insert("\"")
maj_singular_labels_all = [(x[0], "en") for x in maj_singular_labels]
maj_singular_labels_all += [(x[0], "ett") for x in maj_singular_labels_nt]
# format ** dollars ** cent
decimal_graph_with_minor = None
integer_graph_reordered = None
decimal_default_reordered = None
for curr_symbol, one_form in maj_singular_labels_all:
preserve_order = pynutil.insert(" preserve_order: true")
if one_form == "en":
integer_plus_maj = graph_integer + insert_space + (pynutil.insert(curr_symbol) @ graph_maj_plural)
integer_plus_maj |= (
graph_integer_sg_en + insert_space + (pynutil.insert(curr_symbol) @ graph_maj_singular)
)
else:
integer_plus_maj = (
graph_integer_ett + insert_space + (pynutil.insert(curr_symbol) @ graph_maj_plural_nt)
)
integer_plus_maj |= (
graph_integer_sg_ett + insert_space + (pynutil.insert(curr_symbol) @ graph_maj_singular_nt)
)
integer_plus_maj_with_comma = pynini.compose(
NEMO_DIGIT - "0" + pynini.closure(NEMO_DIGIT | delete_space), integer_plus_maj
)
integer_plus_maj = pynini.compose(pynini.closure(NEMO_DIGIT) - "0", integer_plus_maj)
integer_plus_maj |= integer_plus_maj_with_comma
# all of the minor currency units are "en"
graph_fractional_one = two_digits_fractional_part @ pynini.cross("1", "en")
graph_fractional_one = pynutil.insert("fractional_part: \"") + graph_fractional_one + pynutil.insert("\"")
graph_fractional = (
two_digits_fractional_part
@ (pynini.closure(NEMO_DIGIT, 1, 2) - "1")
@ cardinal.graph_hundreds_component_at_least_one_non_zero_digit_en
)
graph_fractional = pynutil.insert("fractional_part: \"") + graph_fractional + pynutil.insert("\"")
fractional_plus_min = graph_fractional + ensure_space + (pynutil.insert(curr_symbol) @ graph_min_plural)
fractional_plus_min |= (
graph_fractional_one + ensure_space + (pynutil.insert(curr_symbol) @ graph_min_singular)
)
decimal_graph_with_minor_curr = integer_plus_maj + pynini.cross(",", " ") + fractional_plus_min
if not deterministic:
decimal_graph_with_minor_curr |= pynutil.add_weight(
integer_plus_maj
+ pynini.cross(",", " ")
+ pynutil.insert("fractional_part: \"")
+ two_digits_fractional_part @ cardinal.graph_hundreds_component_at_least_one_non_zero_digit_en
+ pynutil.insert("\""),
weight=0.0001,
)
default_fraction_graph = (decimal_delete_last_zeros | decimal_with_quantity) @ graph_decimal_final
decimal_graph_with_minor_curr |= (
pynini.closure(pynutil.delete("0"), 0, 1) + pynutil.delete(",") + fractional_plus_min
)
decimal_graph_with_minor_curr = (
pynutil.delete(curr_symbol) + decimal_graph_with_minor_curr + preserve_order
)
decimal_graph_with_minor = (
decimal_graph_with_minor_curr
if decimal_graph_with_minor is None
else pynini.union(decimal_graph_with_minor, decimal_graph_with_minor_curr).optimize()
)
if not deterministic:
integer_graph_reordered_curr = (
pynutil.delete(curr_symbol) + integer_plus_maj + preserve_order
).optimize()
integer_graph_reordered = (
integer_graph_reordered_curr
if integer_graph_reordered is None
else pynini.union(integer_graph_reordered, integer_graph_reordered_curr).optimize()
)
decimal_default_reordered_curr = (
pynutil.delete(curr_symbol)
+ default_fraction_graph
+ ensure_space
+ pynutil.insert(curr_symbol) @ graph_maj_plural
)
decimal_default_reordered = (
decimal_default_reordered_curr
if decimal_default_reordered is None
else pynini.union(decimal_default_reordered, decimal_default_reordered_curr)
).optimize()
# weight for SH
final_graph |= pynutil.add_weight(decimal_graph_with_minor, -0.0001)
if not deterministic:
final_graph |= integer_graph_reordered | decimal_default_reordered
# to handle "$2.00" cases
final_graph |= pynini.compose(
NEMO_SIGMA + pynutil.delete(",") + pynini.closure(pynutil.delete("0"), 1), integer_graph_reordered
)
final_graph = self.add_tokens(final_graph.optimize())
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/money.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2022, 2023 Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SIGMA,
NEMO_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import SV_ALPHA
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
def make_million(number: str, non_zero_no_one: 'pynini.FstLike', deterministic: bool = True) -> 'pynini.FstLike':
"""
Helper function for millions/milliards and higher
Args:
number: the string of the number
non_zero_no_one: An fst of digits excluding 0 and 1, to prefix to the number
deterministic: if True, generate a deterministic fst
Returns:
graph: A pynini.FstLike object
"""
old_orth = number.replace("lj", "lli")
graph = pynutil.add_weight(pynini.cross("001", number), -0.001)
if not deterministic:
graph |= pynutil.add_weight(pynini.cross("001", old_orth), -0.001)
# 'ett' is usually wrong for these numbers, but it occurs
for one in ["en", "ett"]:
graph |= pynutil.add_weight(pynini.cross("001", f"{one} {number}"), -0.001)
graph |= pynutil.add_weight(pynini.cross("001", f"{one} {old_orth}"), -0.001)
graph |= pynutil.add_weight(pynini.cross("001", f"{one}{number}"), -0.001)
graph |= pynutil.add_weight(pynini.cross("001", f"{one}{old_orth}"), -0.001)
graph |= non_zero_no_one + pynutil.insert(f" {number}er")
if not deterministic:
graph |= pynutil.add_weight(non_zero_no_one + pynutil.insert(f" {old_orth}er"), -0.001)
graph |= pynutil.add_weight(non_zero_no_one + pynutil.insert(f"{old_orth}er"), -0.001)
graph |= pynutil.delete("000")
graph += insert_space
return graph
def filter_punctuation(fst: 'pynini.FstLike') -> 'pynini.FstLike':
"""
Helper function for parsing number strings. Converts common cardinal strings (groups of three digits delineated by space)
and converts to a string of digits:
"1 000" -> "1000"
Args:
fst: Any pynini.FstLike object. Function composes fst onto string parser fst
Returns:
fst: A pynini.FstLike object
"""
exactly_three_digits = NEMO_DIGIT ** 3 # for blocks of three
up_to_three_digits = pynini.closure(NEMO_DIGIT, 1, 3) # for start of string
cardinal_separator = NEMO_SPACE
cardinal_string = pynini.closure(
NEMO_DIGIT, 1
) # For string w/o punctuation (used for page numbers, thousand series)
cardinal_string |= (
up_to_three_digits
+ pynutil.delete(cardinal_separator)
+ pynini.closure(exactly_three_digits + pynutil.delete(cardinal_separator))
+ exactly_three_digits
)
return cardinal_string @ fst
class CardinalFst(GraphFst):
"""
Finite state transducer for classifying cardinals, e.g.
"1000" -> cardinal { integer: "tusen" }
"2 000 000" -> cardinal { integer: "två miljon" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="cardinal", kind="classify", deterministic=deterministic)
zero = pynini.invert(pynini.string_file(get_abs_path("data/numbers/zero.tsv")))
digit = pynini.invert(pynini.string_file(get_abs_path("data/numbers/digit.tsv")))
teen = pynini.invert(pynini.string_file(get_abs_path("data/numbers/teen.tsv")))
ties = pynini.invert(pynini.string_file(get_abs_path("data/numbers/ties.tsv")))
ett_to_en = pynini.cross("ett", "en")
ties_alt_endings = pynini.string_map([("go", "gi"), ("tio", "ti")])
# Any single digit
graph_digit = digit
digits_no_one = (NEMO_DIGIT - "1") @ graph_digit
if deterministic:
final_digit = digit
else:
final_digit = digit | pynini.cross("1", "en")
graph_digit = final_digit
self.digit = final_digit
single_digits_graph = graph_digit | zero
self.single_digits_graph = single_digits_graph + pynini.closure(insert_space + single_digits_graph)
# spoken this way, so useful for e2e ASR
alt_ties = ties @ pynini.cdrewrite(ties_alt_endings, "", "[EOS]", NEMO_SIGMA)
if not deterministic:
ties |= pynutil.add_weight(alt_ties, -0.001)
ties |= pynutil.add_weight(pynini.cross("4", "förtio"), -0.001)
ties |= pynutil.add_weight(pynini.cross("4", "förti"), -0.001)
ties |= pynutil.add_weight(pynini.cross("2", "tju"), -0.001)
# Any double digit
graph_tens = teen
graph_ties = ties
if deterministic:
graph_tens |= graph_ties + (pynutil.delete('0') | graph_digit)
else:
graph_tens |= pynutil.add_weight(pynini.cross("18", "aderton"), -0.001)
graph_tens |= pynutil.add_weight(
graph_ties + (pynutil.delete('0') | (graph_digit | insert_space + graph_digit)), -0.001
)
hundreds = digits_no_one + pynutil.insert("hundra")
hundreds |= pynini.cross("1", "hundra")
if not deterministic:
hundreds |= pynutil.add_weight(pynini.cross("1", "etthundra"), -0.001)
hundreds |= pynutil.add_weight(digit + pynutil.insert(NEMO_SPACE) + pynutil.insert("hundra"), -0.001)
self.tens = graph_tens.optimize()
graph_two_digit_non_zero = pynini.union(graph_digit, graph_tens, (pynutil.delete("0") + graph_digit))
if not deterministic:
graph_two_digit_non_zero |= pynutil.add_weight(
pynini.union(graph_digit, graph_tens, (pynini.cross("0", NEMO_SPACE) + graph_digit)), -0.001
)
self.two_digit_non_zero = graph_two_digit_non_zero.optimize()
graph_final_two_digit_non_zero = pynini.union(final_digit, graph_tens, (pynutil.delete("0") + final_digit))
if not deterministic:
graph_final_two_digit_non_zero |= pynutil.add_weight(
pynini.union(final_digit, graph_tens, (pynini.cross("0", NEMO_SPACE) + final_digit)), -0.001
)
self.final_two_digit_non_zero = graph_final_two_digit_non_zero.optimize()
# Three digit strings
graph_hundreds = hundreds + pynini.union(pynutil.delete("00"), graph_tens, (pynutil.delete("0") + final_digit))
if not deterministic:
graph_hundreds |= pynutil.add_weight(
hundreds
+ pynini.union(
pynutil.delete("00"),
(graph_tens | pynutil.insert(NEMO_SPACE) + graph_tens),
(pynini.cross("0", NEMO_SPACE) + final_digit),
),
-0.001,
)
self.hundreds = graph_hundreds.optimize()
# For all three digit strings with leading zeroes (graph appends '0's to manage place in string)
graph_hundreds_component = pynini.union(graph_hundreds, pynutil.delete("0") + graph_tens)
graph_hundreds_component_at_least_one_non_zero_digit = graph_hundreds_component | (
pynutil.delete("00") + graph_digit
)
graph_hundreds_component_at_least_one_non_zero_digit_no_one = graph_hundreds_component | (
pynutil.delete("00") + digits_no_one
)
self.graph_hundreds_component_at_least_one_non_zero_digit_no_one = (
graph_hundreds_component_at_least_one_non_zero_digit_no_one.optimize()
)
tusen = pynutil.insert("tusen")
etttusen = pynini.cross("001", "tusen")
if not deterministic:
tusen |= pynutil.add_weight(pynutil.insert(" tusen"), -0.001)
etttusen |= pynutil.add_weight(pynini.cross("001", " tusen"), -0.001)
etttusen |= pynutil.add_weight(pynini.cross("001", "etttusen"), -0.001)
etttusen |= pynutil.add_weight(pynini.cross("001", " etttusen"), -0.001)
etttusen |= pynutil.add_weight(pynini.cross("001", "ett tusen"), -0.001)
etttusen |= pynutil.add_weight(pynini.cross("001", " ett tusen"), -0.001)
following_hundred = insert_space + graph_hundreds_component_at_least_one_non_zero_digit
if not deterministic:
following_hundred |= graph_hundreds_component_at_least_one_non_zero_digit
graph_thousands_component_at_least_one_non_zero_digit = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_non_zero_digit,
graph_hundreds_component_at_least_one_non_zero_digit_no_one
+ tusen
+ (following_hundred | pynutil.delete("000")),
etttusen + (following_hundred | pynutil.delete("000")),
)
self.graph_thousands_component_at_least_one_non_zero_digit = (
graph_thousands_component_at_least_one_non_zero_digit.optimize()
)
graph_thousands_component_at_least_one_non_zero_digit_no_one = pynini.union(
pynutil.delete("000") + graph_hundreds_component_at_least_one_non_zero_digit_no_one,
graph_hundreds_component_at_least_one_non_zero_digit_no_one
+ tusen
+ (following_hundred | pynutil.delete("000")),
etttusen + (following_hundred | pynutil.delete("000")),
)
self.graph_thousands_component_at_least_one_non_zero_digit_no_one = (
graph_thousands_component_at_least_one_non_zero_digit_no_one.optimize()
)
non_zero_no_one = graph_hundreds_component_at_least_one_non_zero_digit_no_one
graph_million = make_million("miljon", non_zero_no_one, deterministic)
graph_milliard = make_million("miljard", non_zero_no_one, deterministic)
graph_billion = make_million("biljon", non_zero_no_one, deterministic)
graph_billiard = make_million("biljard", non_zero_no_one, deterministic)
graph_trillion = make_million("triljon", non_zero_no_one, deterministic)
graph_trilliard = make_million("triljard", non_zero_no_one, deterministic)
graph = (
graph_trilliard
+ graph_trillion
+ graph_billiard
+ graph_billion
+ graph_milliard
+ graph_million
+ (graph_thousands_component_at_least_one_non_zero_digit | pynutil.delete("000000"))
)
self.graph = (
((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0))
@ pynini.cdrewrite(pynini.closure(pynutil.insert("0")), "[BOS]", "", NEMO_SIGMA)
@ NEMO_DIGIT ** 24
@ graph
@ pynini.cdrewrite(delete_space, "[BOS]", "", NEMO_SIGMA)
@ pynini.cdrewrite(delete_space, "", "[EOS]", NEMO_SIGMA)
@ pynini.cdrewrite(
pynini.cross(pynini.closure(NEMO_WHITE_SPACE, 2), NEMO_SPACE), SV_ALPHA, SV_ALPHA, NEMO_SIGMA
)
)
self.graph_hundreds_component_at_least_one_non_zero_digit = (
pynini.closure(NEMO_DIGIT, 2, 3) | pynini.difference(NEMO_DIGIT, pynini.accep("0"))
) @ self.graph
self.graph_hundreds_component_at_least_one_non_zero_digit_en = (
self.graph_hundreds_component_at_least_one_non_zero_digit
@ pynini.cdrewrite(ett_to_en, "", "[EOS]", NEMO_SIGMA)
)
# For plurals, because the 'one' in 21, etc. still needs to agree
self.graph_hundreds_component_at_least_one_non_zero_digit_no_one = (
pynini.project(self.graph_hundreds_component_at_least_one_non_zero_digit, "input") - "1"
) @ self.graph_hundreds_component_at_least_one_non_zero_digit
self.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en = (
pynini.project(self.graph_hundreds_component_at_least_one_non_zero_digit_en, "input") - "1"
) @ self.graph_hundreds_component_at_least_one_non_zero_digit_en
zero_space = zero + insert_space
self.zero_space = zero_space
self.three_digits_read = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one,
zero_space + ((NEMO_DIGIT ** 2) @ graph_tens),
zero_space + zero_space + digit,
)
self.three_digits_read_en = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en,
zero_space + ((NEMO_DIGIT ** 2) @ graph_tens),
zero_space + zero_space + digit,
)
self.three_digits_read_frac = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one,
zero_space + digit + insert_space + digit,
)
self.three_digits_read_frac_en = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en,
zero_space + digit + insert_space + digit,
)
self.two_or_three_digits_read_frac = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one,
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ graph_tens,
zero_space + single_digits_graph + pynini.closure(insert_space + digit, 0, 1),
single_digits_graph + pynini.closure(insert_space + single_digits_graph, 3),
zero_space + zero_space + zero,
single_digits_graph,
)
self.two_or_three_digits_read_frac_en = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en,
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ (graph_tens @ pynini.cdrewrite(ett_to_en, "", "[EOS]", NEMO_SIGMA)),
zero_space + single_digits_graph + pynini.closure(insert_space + single_digits_graph, 0, 1),
single_digits_graph + pynini.closure(insert_space + single_digits_graph, 3),
zero_space + zero_space + zero,
single_digits_graph,
)
self.two_or_three_digits_read_frac_both = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one,
((NEMO_DIGIT - "0") + (NEMO_DIGIT ** 2))
@ self.graph_hundreds_component_at_least_one_non_zero_digit_no_one_en,
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ graph_tens,
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ (graph_tens @ pynini.cdrewrite(ett_to_en, "", "[EOS]", NEMO_SIGMA)),
zero_space + single_digits_graph + pynini.closure(insert_space + digit, 0, 1),
zero_space + single_digits_graph + pynini.closure(insert_space + single_digits_graph, 0, 1),
single_digits_graph + pynini.closure(insert_space + single_digits_graph, 3),
zero_space + zero_space + zero,
single_digits_graph,
).optimize()
self.two_digits_read = pynini.union(((NEMO_DIGIT - "0") + NEMO_DIGIT) @ graph_tens, zero_space + digit)
self.two_digits_read_en = pynini.union(
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ (graph_tens @ pynini.cdrewrite(ett_to_en, "", "[EOS]", NEMO_SIGMA)),
zero_space + digit,
)
self.any_read_digit = ((NEMO_DIGIT - "0") @ digit) + pynini.closure(insert_space + digit)
if not deterministic:
self.three_digits_read |= pynutil.add_weight(digit + insert_space + digit + insert_space + digit, -0.001)
self.three_digits_read |= pynutil.add_weight(
((NEMO_DIGIT - "0") + NEMO_DIGIT) @ graph_tens + insert_space + digit, -0.001
)
self.three_digits_read |= pynutil.add_weight(
digit + insert_space + ((NEMO_DIGIT - "0") + NEMO_DIGIT) @ graph_tens, -0.001
)
self.two_digits_read |= pynutil.add_weight(digit + insert_space + digit, -0.001)
self.any_read_digit |= self.two_digits_read
self.any_read_digit |= self.three_digits_read
self.graph |= zero
self.graph_unfiltered = self.graph
self.graph = filter_punctuation(self.graph).optimize()
self.graph_en = self.graph @ pynini.cdrewrite(ett_to_en, "", "[EOS]", NEMO_SIGMA)
self.graph_no_one = (pynini.project(self.graph, "input") - "1") @ self.graph
self.graph_no_one_en = (pynini.project(self.graph_en, "input") - "1") @ self.graph_en
joiner_chars = pynini.union("-", "–", "—")
joiner = pynini.cross(joiner_chars, " till ")
self.range = self.graph + joiner + self.graph
if not deterministic:
either_one = self.graph | self.graph_en
self.range = either_one + joiner + either_one
optional_minus_graph = pynini.closure(pynutil.insert("negative: ") + pynini.cross("-", "\"true\" "), 0, 1)
final_graph = optional_minus_graph + pynutil.insert("integer: \"") + self.graph + pynutil.insert("\"")
if not deterministic:
final_graph |= pynutil.add_weight(
optional_minus_graph + pynutil.insert("integer: \"") + self.graph_en + pynutil.insert("\""), -0.001
)
final_graph |= pynutil.add_weight(
pynutil.insert("integer: \"") + self.single_digits_graph + pynutil.insert("\""), -0.001
)
final_graph = self.add_tokens(final_graph)
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/cardinal.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_CHAR,
NEMO_DIGIT,
NEMO_NOT_SPACE,
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.verbalizers.abbreviation import AbbreviationFst as vAbbreviationFst
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst as vWordFst
from nemo_text_processing.text_normalization.sv.taggers.abbreviation import AbbreviationFst
from nemo_text_processing.text_normalization.sv.taggers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.sv.taggers.date import DateFst
from nemo_text_processing.text_normalization.sv.taggers.decimal import DecimalFst
from nemo_text_processing.text_normalization.sv.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.sv.taggers.fraction import FractionFst
from nemo_text_processing.text_normalization.sv.taggers.measure import MeasureFst
from nemo_text_processing.text_normalization.sv.taggers.money import MoneyFst
from nemo_text_processing.text_normalization.sv.taggers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.sv.taggers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.sv.taggers.time import TimeFst
from nemo_text_processing.text_normalization.sv.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.sv.taggers.word import WordFst
from nemo_text_processing.text_normalization.sv.verbalizers.cardinal import CardinalFst as vCardinalFst
from nemo_text_processing.text_normalization.sv.verbalizers.date import DateFst as vDateFst
from nemo_text_processing.text_normalization.sv.verbalizers.decimals import DecimalFst as vDecimalFst
from nemo_text_processing.text_normalization.sv.verbalizers.electronic import ElectronicFst as vElectronicFst
from nemo_text_processing.text_normalization.sv.verbalizers.fraction import FractionFst as vFractionFst
from nemo_text_processing.text_normalization.sv.verbalizers.measure import MeasureFst as vMeasureFst
from nemo_text_processing.text_normalization.sv.verbalizers.money import MoneyFst as vMoneyFst
from nemo_text_processing.text_normalization.sv.verbalizers.ordinal import OrdinalFst as vOrdinalFst
from nemo_text_processing.text_normalization.sv.verbalizers.telephone import TelephoneFst as vTelephoneFst
from nemo_text_processing.text_normalization.sv.verbalizers.time import TimeFst as vTimeFst
from pynini.lib import pynutil
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = True,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_sv_tn_{deterministic}_deterministic_{whitelist_file}.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(cardinal=cardinal, deterministic=deterministic)
ordinal_graph = ordinal.fst
decimal = DecimalFst(cardinal=cardinal, deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(deterministic=deterministic, ordinal=ordinal, cardinal=cardinal)
fraction_graph = fraction.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
date_graph = DateFst(cardinal=cardinal, ordinal=ordinal, deterministic=deterministic).fst
punctuation = PunctuationFst(deterministic=True)
punct_graph = punctuation.graph
time_graph = TimeFst(cardinal=cardinal, deterministic=deterministic).fst
word_graph = WordFst(deterministic=deterministic).graph
telephone_graph = TelephoneFst(deterministic=deterministic).fst
electronic_graph = ElectronicFst(deterministic=deterministic).fst
money_graph = MoneyFst(cardinal=cardinal, decimal=decimal, deterministic=deterministic).fst
whitelist = WhiteListFst(input_case=input_case, deterministic=deterministic, input_file=whitelist)
whitelist_graph = whitelist.graph
v_cardinal = vCardinalFst(deterministic=deterministic)
v_cardinal_graph = v_cardinal.fst
v_decimal = vDecimalFst(deterministic=deterministic)
v_decimal_graph = v_decimal.fst
v_ordinal = vOrdinalFst(deterministic=deterministic)
v_ordinal_graph = v_ordinal.fst
v_fraction = vFractionFst(deterministic=deterministic)
v_fraction_graph = v_fraction.fst
v_telephone_graph = vTelephoneFst(deterministic=deterministic).fst
v_electronic_graph = vElectronicFst(deterministic=deterministic).fst
v_measure = vMeasureFst(decimal=decimal, cardinal=cardinal, fraction=fraction, deterministic=deterministic)
v_measure_graph = v_measure.fst
v_time_graph = vTimeFst(deterministic=deterministic).fst
v_date_graph = vDateFst(deterministic=deterministic).fst
v_money_graph = vMoneyFst(decimal=decimal, deterministic=deterministic).fst
v_abbreviation = vAbbreviationFst(deterministic=deterministic).fst
v_word_graph = vWordFst(deterministic=deterministic).fst
sem_w = 1
word_w = 100
punct_w = 2
classify_and_verbalize = (
pynutil.add_weight(whitelist_graph, sem_w)
| pynutil.add_weight(pynini.compose(time_graph, v_time_graph), sem_w)
| pynutil.add_weight(pynini.compose(decimal_graph, v_decimal_graph), sem_w)
| pynutil.add_weight(pynini.compose(measure_graph, v_measure_graph), sem_w)
| pynutil.add_weight(pynini.compose(cardinal_graph, v_cardinal_graph), sem_w)
| pynutil.add_weight(pynini.compose(ordinal_graph, v_ordinal_graph), sem_w)
| pynutil.add_weight(pynini.compose(telephone_graph, v_telephone_graph), sem_w)
| pynutil.add_weight(pynini.compose(electronic_graph, v_electronic_graph), sem_w)
| pynutil.add_weight(pynini.compose(fraction_graph, v_fraction_graph), sem_w)
| pynutil.add_weight(pynini.compose(money_graph, v_money_graph), sem_w)
| pynutil.add_weight(word_graph, word_w)
| pynutil.add_weight(pynini.compose(date_graph, v_date_graph), sem_w - 0.01)
| pynutil.add_weight(v_word_graph, 1.1001)
).optimize()
if not deterministic:
abbreviation_graph = AbbreviationFst(whitelist=whitelist, deterministic=deterministic).fst
classify_and_verbalize |= pynutil.add_weight(
pynini.compose(abbreviation_graph, v_abbreviation), word_w
)
punct_only = pynutil.add_weight(punct_graph, weight=punct_w)
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct_only),
1,
)
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" "))
+ classify_and_verbalize
+ pynini.closure(pynutil.insert(" ") + punct)
)
graph = token_plus_punct + pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
)
graph |= punct_only + pynini.closure(punct)
graph = delete_space + graph + delete_space
remove_extra_spaces = pynini.closure(NEMO_NOT_SPACE, 1) + pynini.closure(
delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1)
)
remove_extra_spaces |= (
pynini.closure(pynutil.delete(" "), 1)
+ pynini.closure(NEMO_NOT_SPACE, 1)
+ pynini.closure(delete_extra_space + pynini.closure(NEMO_NOT_SPACE, 1))
)
graph = pynini.compose(graph.optimize(), remove_extra_spaces).optimize()
self.fst = graph
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f'ClassifyFst grammars are saved to {far_file}.')
# to remove normalization options that still contain digits and some special symbols
# e.g., "P&E" -> {P and E, P&E}, "P & E" will be removed from the list of normalization options
no_digits = pynini.closure(pynini.difference(NEMO_CHAR, pynini.union(NEMO_DIGIT, "&")))
self.fst_no_digits = pynini.compose(self.fst, no_digits).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/tokenize_and_classify_with_audio.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_ALPHA, NEMO_DIGIT, GraphFst, insert_space
from nemo_text_processing.text_normalization.sv.utils import get_abs_path, load_labels
from pynini.lib import pynutil
class ElectronicFst(GraphFst):
"""
Finite state transducer for classifying electronic: email addresses
e.g. "[email protected]" -> electronic { username: "abc" domain: "hotmail.com" preserve_order: true }
e.g. "www.abc.com/123" -> electronic { protocol: "www." domain: "abc.com/123" preserve_order: true }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="electronic", kind="classify", deterministic=deterministic)
common_domains = [x[0] for x in load_labels(get_abs_path("data/electronic/domain.tsv"))]
symbols = [x[0] for x in load_labels(get_abs_path("data/electronic/symbols.tsv"))]
dot = pynini.accep(".")
accepted_common_domains = pynini.union(*common_domains)
accepted_symbols = pynini.union(*symbols) - dot
accepted_characters = pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols)
accepted_characters_with_dot = pynini.closure(NEMO_ALPHA | NEMO_DIGIT | accepted_symbols | dot)
# email
username = (
pynutil.insert("username: \"")
+ accepted_characters_with_dot
+ pynutil.insert("\"")
+ pynini.cross('@', ' ')
)
domain_graph = accepted_characters + dot + accepted_characters
domain_graph = pynutil.insert("domain: \"") + domain_graph + pynutil.insert("\"")
domain_common_graph = (
pynutil.insert("domain: \"")
+ accepted_characters
+ accepted_common_domains
+ pynini.closure((accepted_symbols | dot) + pynini.closure(accepted_characters, 1), 0, 1)
+ pynutil.insert("\"")
)
graph = (username + domain_graph) | domain_common_graph
# url
protocol_start = pynini.accep("https://") | pynini.accep("http://")
protocol_end = (
pynini.accep("www.")
if deterministic
else pynini.accep("www.") | pynini.cross("www.", "dubbel ve dubbel ve dubbel ve.")
)
protocol = protocol_start | protocol_end | (protocol_start + protocol_end)
protocol = pynutil.insert("protocol: \"") + protocol + pynutil.insert("\"")
graph |= protocol + insert_space + (domain_graph | domain_common_graph)
self.graph = graph
final_graph = self.add_tokens(self.graph + pynutil.insert(" preserve_order: true"))
self.fst = final_graph.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/electronic.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_DIGIT,
NEMO_SPACE,
GraphFst,
delete_space,
insert_space,
)
from nemo_text_processing.text_normalization.sv.graph_utils import SV_ALPHA
from nemo_text_processing.text_normalization.sv.utils import get_abs_path
from pynini.lib import pynutil
class DateFst(GraphFst):
"""
Finite state transducer for classifying date, e.g.
"2:a januari, 2020" -> date { day: "andra" month: "januari" year: "tjugotjugotvå" }
"2022.01.02" -> date { year: "tjugotjugotvå" month: "januari" day: "andra" }
Args:
cardinal: cardinal GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, cardinal: GraphFst, ordinal: GraphFst, deterministic: bool = True):
super().__init__(name="date", kind="classify", deterministic=deterministic)
delete_leading_zero = (pynutil.delete("0") | (NEMO_DIGIT - "0")) + NEMO_DIGIT
month_numbers = pynini.string_file(get_abs_path("data/dates/months.tsv"))
month_abbr = pynini.string_file(get_abs_path("data/dates/month_abbr.tsv"))
era_suffix = pynini.string_file(get_abs_path("data/dates/era_suffix.tsv"))
era_words = pynini.string_file(get_abs_path("data/dates/era_words.tsv"))
number_to_month = month_numbers.optimize()
self.month_abbr = month_abbr.optimize()
self.era_words = era_words.optimize()
era_norm = era_suffix @ era_words
era_names = pynini.project(era_words, "output")
month_graph = pynini.project(number_to_month, "output")
plain_space = delete_space + insert_space
numbers = cardinal.graph
optional_leading_zero = delete_leading_zero | NEMO_DIGIT
optional_dot = pynini.closure(pynutil.delete("."), 0, 1)
optional_comma = pynini.closure(pynutil.delete(","), 0, 1)
# 01, 31, 1
self.digit_day = pynini.union(*[str(x) for x in range(1, 32)]) @ ordinal.bare_ordinals
digit_day = pynini.union(
pynutil.delete("0") + (NEMO_DIGIT @ self.digit_day), ((NEMO_DIGIT - "0") + NEMO_DIGIT) @ self.digit_day
)
self.digit_day_zero = (
pynini.project(digit_day, "input") - pynini.project((NEMO_DIGIT @ self.digit_day), "input")
) @ digit_day
digit_day |= NEMO_DIGIT @ self.digit_day
digit_words = pynini.project(digit_day, "output")
day_only = (pynutil.insert("day: \"") + digit_day + pynutil.insert("\"")).optimize()
day = (pynutil.insert("day: \"") + digit_day + optional_dot + pynutil.insert("\"")).optimize()
day_sfx = (pynutil.insert("day: \"") + ordinal.suffixed_to_words + pynutil.insert("\"")).optimize()
day_words = (pynutil.insert("day: \"") + digit_words + pynutil.insert("\"")).optimize()
digit_month = optional_leading_zero @ pynini.union(*[str(x) for x in range(1, 13)])
number_to_month = digit_month @ number_to_month
self.number_to_month = number_to_month
month_name = (pynutil.insert("month: \"") + month_graph + pynutil.insert("\"")).optimize()
month_number = (pynutil.insert("month: \"") + number_to_month + pynutil.insert("\"")).optimize()
month_abbreviation = (
pynutil.insert("month: \"") + self.month_abbr + optional_dot + pynutil.insert("\"")
).optimize()
# prefer cardinal over year
year_first = ((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 0, 1)) @ numbers
year_second = pynini.union(
((NEMO_DIGIT - "0") + (NEMO_DIGIT - "0")) @ numbers,
pynini.cross("0", "hundra") + ((NEMO_DIGIT - "0") @ numbers),
((NEMO_DIGIT - "0") + "0") @ numbers,
)
year_hundra = year_first + pynutil.insert("hundra") + year_second
if not deterministic:
year_hundra |= year_first + pynutil.insert(" hundra") + year_second
year_hundra |= year_first + pynutil.insert(" hundra ") + year_second
year_hundra |= year_first + pynutil.insert("hundra ") + year_second
year_second |= pynini.cross("00", "hundra")
year_cardinal = ((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 1, 3)) @ numbers
year = pynini.union(year_first + year_second, year_first) # 90, 990, 1990
if not deterministic:
year |= year_cardinal
year |= year_hundra
year |= year_first + plain_space + year_second
self.year = year
self.year_cardinal = year_cardinal
sou_number = self.year + pynini.cross(":", " kolon ") + numbers
sou_word = pynini.accep("SOU")
if not deterministic:
sou_word |= pynini.cross("SOU", "statens offentliga utredningar")
self.sou = sou_word + plain_space + sou_number
year_second_decades = ((NEMO_DIGIT - "0") + "0") @ numbers
year_second_decades |= pynini.cross("00", "hundra")
decade_num = pynini.union(year_first + year_second_decades, year_second_decades)
decade_word = pynini.union("tal", "talet", "tals")
tals_word = "tals" + pynini.closure(SV_ALPHA, 1)
tal_hyphen = pynutil.delete("-")
if not deterministic:
tal_hyphen |= pynini.cross("-", " ")
decade_num |= ((NEMO_DIGIT - "0") + pynini.closure(NEMO_DIGIT, 1, 2) + "0") @ numbers
decade = (decade_num + tal_hyphen + (decade_word | tals_word)).optimize()
# decade_only = pynutil.insert("decade: \"") + decade + pynutil.insert("\"")
self.decade = decade
decade_only = pynutil.insert("year: \"") + decade + pynutil.insert("\"")
year_only = pynutil.insert("year: \"") + year + pynutil.insert("\"")
era_piece = era_norm | era_names
era_only = pynutil.insert("era: \"") + era_piece + pynutil.insert("\"")
optional_era = pynini.closure(plain_space + era_only, 0, 1)
year_era = year_only + plain_space + era_only + pynutil.insert(" preserve_order: true")
year_opt_era = year_only + optional_era + pynutil.insert(" preserve_order: true")
graph_dmy = (
(day | day_sfx | day_words)
+ plain_space
+ (month_name | month_abbreviation)
+ optional_comma
+ pynini.closure(plain_space + year_opt_era, 0, 1)
)
graph_my = (month_name | month_abbreviation) + optional_comma + plain_space + year_opt_era
day_optional = pynini.closure(pynini.cross("-", NEMO_SPACE) + day, 0, 1)
graph_ymd = year_only + pynini.cross("-", NEMO_SPACE) + month_number + day_optional
separators = [".", "-", "/"]
for sep in separators:
day_optional = pynini.closure(pynini.cross(sep, NEMO_SPACE) + day_only, 0, 1)
year_optional = pynini.closure(pynini.cross(sep, NEMO_SPACE) + year_only + optional_era)
new_graph = day_only + pynini.cross(sep, NEMO_SPACE) + month_number + year_optional
graph_dmy |= new_graph
graph_ymd |= year_only + pynini.cross(sep, NEMO_SPACE) + month_number + day_optional
final_graph = graph_ymd | graph_dmy | year_era | decade_only | graph_my
self.final_graph = final_graph.optimize()
self.fst = self.add_tokens(self.final_graph).optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/date.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_SPACE, GraphFst
from pynini.lib import pynutil
class WordFst(GraphFst):
"""
Finite state transducer for classifying word.
e.g. hund -> tokens { name: "hund" }
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="word", kind="classify")
word = pynutil.insert("name: \"") + pynini.closure(NEMO_NOT_SPACE, 1) + pynutil.insert("\"")
self.fst = word.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/taggers/word.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_extra_space,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TimeFst(GraphFst):
"""
Finite state transducer for verbalizing time, e.g.
time { hours: "tolv" minutes: "trettio" suffix: "förmiddag" zone: "e s t" } -> tolv trettio förmiddag e s t
time { hours: "tolv" } -> tolv
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="time", kind="verbalize", deterministic=deterministic)
ANY_NOT_QUOTE = pynini.closure(NEMO_NOT_QUOTE, 1)
NOT_NOLL = pynini.difference(ANY_NOT_QUOTE, "noll")
hour = pynutil.delete("hours:") + delete_space + pynutil.delete("\"") + ANY_NOT_QUOTE + pynutil.delete("\"")
minute = pynutil.delete("minutes:") + delete_space + pynutil.delete("\"") + NOT_NOLL + pynutil.delete("\"")
minute |= (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynutil.delete("noll")
+ pynutil.delete("\"")
)
if not deterministic:
minute |= (
pynutil.delete("minutes:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.cross("noll", "noll noll")
+ pynutil.delete("\"")
)
suffix = pynutil.delete("suffix:") + delete_space + pynutil.delete("\"") + ANY_NOT_QUOTE + pynutil.delete("\"")
optional_suffix = pynini.closure(delete_space + insert_space + suffix, 0, 1)
zone = (
pynutil.delete("zone:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
optional_zone = pynini.closure(delete_space + insert_space + zone, 0, 1)
second = (
pynutil.delete("seconds:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
# graph_hms = (
# hour
# + pynutil.insert(" hours ")
# + delete_space
# + minute
# + pynutil.insert(" minutes and ")
# + delete_space
# + second
# + pynutil.insert(" seconds")
# + optional_suffix
# + optional_zone
# )
# graph_hms @= pynini.cdrewrite(
# pynutil.delete("o ")
# | pynini.cross("one minutes", "one minute")
# | pynini.cross("one seconds", "one second")
# | pynini.cross("one hours", "one hour"),
# pynini.union(" ", "[BOS]"),
# "",
# NEMO_SIGMA,
# )
graph = hour + NEMO_SPACE + minute + optional_suffix + optional_zone
graph |= hour + NEMO_SPACE + minute + NEMO_SPACE + second + optional_suffix + optional_zone
graph |= hour + NEMO_SPACE + suffix + optional_zone
graph |= hour + optional_zone
graph = (
graph
@ pynini.cdrewrite(delete_extra_space, "", "", NEMO_SIGMA)
@ pynini.cdrewrite(delete_space, "", "[EOS]", NEMO_SIGMA)
)
# graph |= graph_hms
self.graph = graph
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/time.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space, insert_space
from pynini.lib import pynutil
class MeasureFst(GraphFst):
"""
Finite state transducer for verbalizing measure, e.g.
measure { negative: "true" cardinal { integer: "twelve" } units: "kilograms" } -> minus twelve kilograms
measure { decimal { integer_part: "twelve" fractional_part: "five" } units: "kilograms" } -> twelve point five kilograms
tokens { measure { units: "covid" decimal { integer_part: "nineteen" fractional_part: "five" } } } -> covid nineteen point five
Args:
decimal: DecimalFst
cardinal: CardinalFst
fraction: FractionFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, cardinal: GraphFst, fraction: GraphFst, deterministic: bool = True):
super().__init__(name="measure", kind="verbalize", deterministic=deterministic)
optional_sign = cardinal.optional_sign
unit = (
pynutil.delete("units: \"")
+ pynini.difference(pynini.closure(NEMO_NOT_QUOTE, 1), pynini.union("address", "math"))
+ pynutil.delete("\"")
+ delete_space
)
if not deterministic:
unit |= pynini.compose(unit, pynini.cross(pynini.union("inch", "inches"), "\""))
graph_decimal = (
pynutil.delete("decimal {")
+ delete_space
+ optional_sign
+ delete_space
+ decimal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_cardinal = (
pynutil.delete("cardinal {")
+ delete_space
+ optional_sign
+ delete_space
+ cardinal.numbers
+ delete_space
+ pynutil.delete("}")
)
graph_fraction = (
pynutil.delete("fraction {") + delete_space + fraction.graph + delete_space + pynutil.delete("}")
)
graph = (graph_cardinal | graph_decimal | graph_fraction) + delete_space + insert_space + unit
# SH adds "preserve_order: true" by default
preserve_order = pynutil.delete("preserve_order:") + delete_space + pynutil.delete("true") + delete_space
graph |= unit + insert_space + (graph_cardinal | graph_decimal) + delete_space + pynini.closure(preserve_order)
# for only unit
graph |= (
pynutil.delete("cardinal { integer: \"-\"")
+ delete_space
+ pynutil.delete("}")
+ delete_space
+ unit
+ pynini.closure(preserve_order)
)
math = (
pynutil.delete("units: \"math\" ")
+ delete_space
+ graph_cardinal
+ delete_space
+ pynini.closure(preserve_order)
)
graph |= math
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/measure.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2023, Jim O'Regan for Språkbanken Tal
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, GraphFst, insert_space
from pynini.lib import pynutil
class FractionFst(GraphFst):
"""
Finite state transducer for verbalizing fraction
e.g. tokens { fraction { integer: "tjugotre" numerator: "fyra" denominator: "femtedel" } } ->
tjugotre och fyra femtedelar
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True, lm: bool = False):
super().__init__(name="fraction", kind="verbalize", deterministic=deterministic)
plurals = pynini.string_map([("kvart", "kvartar"), ("halv", "halva"), ("del", "delar")])
integer = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\" ")
denominators_sg = pynutil.delete("denominator: \"") + pynini.closure(NEMO_NOT_QUOTE) + pynutil.delete("\"")
denominators_pl = (
pynutil.delete("denominator: \"")
+ (pynini.closure(NEMO_NOT_QUOTE) @ pynini.cdrewrite(plurals, "", "[EOS]", NEMO_SIGMA))
+ pynutil.delete("\"")
)
self.denominators = denominators_sg | denominators_pl
either_one = pynini.union("en", "ett")
numerator_one = pynutil.delete("numerator: \"") + pynutil.delete(either_one) + pynutil.delete("\" ")
if not deterministic:
numerator_one |= pynutil.delete("numerator: \"") + either_one + pynutil.delete("\" ") + insert_space
numerator_rest = (
pynutil.delete("numerator: \"")
+ (
(pynini.closure(NEMO_NOT_QUOTE) - either_one)
@ pynini.cdrewrite(pynini.cross("ett", "en"), "[BOS]", "[EOS]", NEMO_SIGMA)
)
+ pynutil.delete("\" ")
)
graph_sg = numerator_one + denominators_sg
graph_pl = numerator_rest + insert_space + denominators_pl
graph = graph_sg | graph_pl
conjunction = pynutil.insert("och ")
if not deterministic and not lm:
conjunction = pynini.closure(conjunction, 0, 1)
integer = pynini.closure(integer + insert_space + conjunction, 0, 1)
graph = integer + graph
self.graph = graph
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/fraction.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SPACE,
GraphFst,
delete_space,
insert_space,
)
from pynini.lib import pynutil
class TelephoneFst(GraphFst):
"""
Finite state transducer for verbalizing telephone numbers, e.g.
telephone { country_code: "one" number_part: "one two three, one two three, five six seven eight" extension: "one" }
-> one, one two three, one two three, five six seven eight, one
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="telephone", kind="verbalize", deterministic=deterministic)
country_code = pynutil.delete("country_code: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_country_code = pynini.closure(country_code + delete_space + insert_space, 0, 1,)
number_part = (
pynutil.delete("number_part: \"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynini.closure(pynutil.add_weight(pynutil.delete(" "), -0.0001), 0, 1)
+ pynutil.delete("\"")
)
optional_extension = pynini.closure(
delete_space
+ insert_space
+ pynini.cross("extension: \"", "anknytning ")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\""),
0,
1,
)
graph = pynini.union(optional_country_code + number_part + optional_extension)
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/telephone.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, GraphFst, delete_space
from pynini.lib import pynutil
class OrdinalFst(GraphFst):
"""
Finite state transducer for verbalizing ordinal, e.g.
ordinal { integer: "thirteen" } } -> thirteenth
Args:
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="ordinal", kind="verbalize", deterministic=deterministic)
graph = (
pynutil.delete("integer:")
+ delete_space
+ pynutil.delete("\"")
+ pynini.closure(NEMO_NOT_QUOTE, 1)
+ pynutil.delete("\"")
)
self.graph = graph
delete_tokens = self.delete_tokens(self.graph)
self.fst = delete_tokens.optimize()
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/ordinal.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.sv.verbalizers.cardinal import CardinalFst
from nemo_text_processing.text_normalization.sv.verbalizers.date import DateFst
from nemo_text_processing.text_normalization.sv.verbalizers.decimals import DecimalFst
from nemo_text_processing.text_normalization.sv.verbalizers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.sv.verbalizers.fraction import FractionFst
from nemo_text_processing.text_normalization.sv.verbalizers.measure import MeasureFst
from nemo_text_processing.text_normalization.sv.verbalizers.money import MoneyFst
from nemo_text_processing.text_normalization.sv.verbalizers.ordinal import OrdinalFst
from nemo_text_processing.text_normalization.sv.verbalizers.telephone import TelephoneFst
from nemo_text_processing.text_normalization.sv.verbalizers.time import TimeFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finite State Archive (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
cardinal = CardinalFst(deterministic=deterministic)
cardinal_graph = cardinal.fst
ordinal = OrdinalFst(deterministic=deterministic)
ordinal_graph = ordinal.fst
decimal = DecimalFst(deterministic=deterministic)
decimal_graph = decimal.fst
fraction = FractionFst(deterministic=deterministic)
fraction_graph = fraction.fst
date = DateFst(deterministic=deterministic)
date_graph = date.fst
measure = MeasureFst(cardinal=cardinal, decimal=decimal, fraction=fraction, deterministic=deterministic)
measure_graph = measure.fst
electronic = ElectronicFst(deterministic=deterministic)
electronic_graph = electronic.fst
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
money_graph = MoneyFst(decimal=decimal, deterministic=deterministic).fst
telephone_graph = TelephoneFst(deterministic=deterministic).fst
time_graph = TimeFst(deterministic=deterministic).fst
graph = (
cardinal_graph
| measure_graph
| decimal_graph
| ordinal_graph
| date_graph
| electronic_graph
| money_graph
| fraction_graph
| whitelist_graph
| telephone_graph
| time_graph
)
self.fst = graph
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/verbalize.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from nemo_text_processing.text_normalization.sv.verbalizers.verbalize import VerbalizeFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(self, deterministic: bool = True, cache_dir: str = None, overwrite_cache: bool = False):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"sv_tn_{deterministic}_deterministic_verbalizer.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["verbalize"]
logging.info(f'VerbalizeFinalFst graph was restored from {far_file}.')
else:
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"verbalize": self.fst})
logging.info(f"VerbalizeFinalFst grammars are saved to {far_file}.")
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/verbalize_final.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-text-processing-main | nemo_text_processing/text_normalization/sv/verbalizers/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.