python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import Tacotron2Model
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
# hydra_runner is a thin NeMo wrapper around Hydra
# It looks for a config named tacotron2.yaml inside the conf folder
# Hydra parses the yaml and returns it as a Omegaconf DictConfig
@hydra_runner(config_path="conf", config_name="tacotron2")
def main(cfg):
# Define the Lightning trainer
trainer = pl.Trainer(**cfg.trainer)
# exp_manager is a NeMo construct that helps with logging and checkpointing
exp_manager(trainer, cfg.get("exp_manager", None))
# Define the Tacotron 2 model, this will construct the model as well as
# define the training and validation dataloaders
model = Tacotron2Model(cfg=cfg.model, trainer=trainer)
# Let's add a few more callbacks
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
# Call lightning trainer's fit() to train the model
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/tacotron2.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import ssl_tts
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="ssl_tts_22050")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = ssl_tts.SSLDisentangler(cfg=cfg.model, trainer=trainer)
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/ssl_tts.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import UnivNetModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf/univnet", config_name="univnet")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = UnivNetModel(cfg=cfg.model, trainer=trainer)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/univnet.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import FastPitchModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="fastpitch_align_44100")
def main(cfg):
if hasattr(cfg.model.optim, 'sched'):
logging.warning("You are using an optimizer scheduler while finetuning. Are you sure this is intended?")
if cfg.model.optim.lr > 1e-3 or cfg.model.optim.lr < 1e-5:
logging.warning("The recommended learning rate for finetuning is 2e-4")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = FastPitchModel(cfg=cfg.model, trainer=trainer)
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/fastpitch_finetune.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import Tacotron2Model
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
# hydra_runner is a thin NeMo wrapper around Hydra
# It looks for a config named tacotron2.yaml inside the conf folder
# Hydra parses the yaml and returns it as a Omegaconf DictConfig
@hydra_runner(config_path="conf", config_name="tacotron2_44100")
def main(cfg):
# Define the Lightning trainer
trainer = pl.Trainer(**cfg.trainer)
# exp_manager is a NeMo construct that helps with logging and checkpointing
exp_manager(trainer, cfg.get("exp_manager", None))
# Define the Tacotron 2 model, this will construct the model as well as
# define the training and validation dataloaders
model = Tacotron2Model(cfg=cfg.model, trainer=trainer)
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
# Let's add a few more callbacks
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
# Call lightning trainer's fit() to train the model
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/tacotron2_finetune.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import MixerTTSModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path='conf', config_name='mixer-tts')
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get('exp_manager', None))
model = MixerTTSModel(cfg=cfg.model, trainer=trainer)
trainer.callbacks.extend([pl.callbacks.LearningRateMonitor(), LogEpochTimeCallback()]) # noqa
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/mixer_tts.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.tts.models import HifiGanModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf/hifigan", config_name="hifigan")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = HifiGanModel(cfg=cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/hifigan.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import is_dataclass
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf, open_dict
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import FastPitchModel
from nemo.core import adapter_mixins
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
def update_model_config_to_support_adapter(config) -> DictConfig:
with open_dict(config):
enc_adapter_metadata = adapter_mixins.get_registered_adapter(config.input_fft._target_)
if enc_adapter_metadata is not None:
config.input_fft._target_ = enc_adapter_metadata.adapter_class_path
dec_adapter_metadata = adapter_mixins.get_registered_adapter(config.output_fft._target_)
if dec_adapter_metadata is not None:
config.output_fft._target_ = dec_adapter_metadata.adapter_class_path
pitch_predictor_adapter_metadata = adapter_mixins.get_registered_adapter(config.pitch_predictor._target_)
if pitch_predictor_adapter_metadata is not None:
config.pitch_predictor._target_ = pitch_predictor_adapter_metadata.adapter_class_path
duration_predictor_adapter_metadata = adapter_mixins.get_registered_adapter(config.duration_predictor._target_)
if duration_predictor_adapter_metadata is not None:
config.duration_predictor._target_ = duration_predictor_adapter_metadata.adapter_class_path
aligner_adapter_metadata = adapter_mixins.get_registered_adapter(config.alignment_module._target_)
if aligner_adapter_metadata is not None:
config.alignment_module._target_ = aligner_adapter_metadata.adapter_class_path
return config
def add_global_adapter_cfg(model, global_adapter_cfg):
# Convert to DictConfig from dict or Dataclass
if is_dataclass(global_adapter_cfg):
global_adapter_cfg = OmegaConf.structured(global_adapter_cfg)
if not isinstance(global_adapter_cfg, DictConfig):
global_adapter_cfg = DictConfig(global_adapter_cfg)
# Update the model.cfg with information about the new adapter global cfg
with open_dict(global_adapter_cfg), open_dict(model.cfg):
if 'adapters' not in model.cfg:
model.cfg.adapters = OmegaConf.create({})
# Add the global config for adapters to the model's internal config
model.cfg.adapters[model.adapter_global_cfg_key] = global_adapter_cfg
# Update all adapter modules (that already exist) with this global adapter config
model.update_adapter_cfg(model.cfg.adapters)
@hydra_runner(config_path="conf", config_name="fastpitch_align_44100_adapter")
def main(cfg):
if hasattr(cfg.model.optim, 'sched'):
logging.warning("You are using an optimizer scheduler while finetuning. Are you sure this is intended?")
if cfg.model.optim.lr > 1e-3 or cfg.model.optim.lr < 1e-5:
logging.warning("The recommended learning rate for finetuning is 2e-4")
trainer = pl.Trainer(**cfg.trainer)
exp_log_dir = exp_manager(trainer, cfg.get("exp_manager", None))
# Initialize FastPitchModel
model = FastPitchModel(cfg=update_model_config_to_support_adapter(cfg.model), trainer=trainer)
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
# Extract adapter parameters
with open_dict(cfg.model.adapter):
# Extract the name of the adapter (must be given for training)
adapter_name = cfg.model.adapter.pop("adapter_name", "adapter")
# Extract the name of the modules where adapters need to be added (must be given for training)
adapter_module_name = cfg.model.adapter.pop("adapter_module_name", None)
# Name of the adapter checkpoint which will be saved after training
adapter_state_dict_name = cfg.model.adapter.pop("adapter_state_dict_name", None)
# augment adapter name with module name, if not provided by user
if adapter_module_name is not None and ':' not in adapter_name:
adapter_name = f'{adapter_module_name}:{adapter_name}'
# Extract the global adapter config, if provided
adapter_global_cfg = cfg.model.adapter.pop(model.adapter_global_cfg_key, None)
# Freeze model
model.freeze()
# Setup adapters
if adapter_global_cfg is not None:
add_global_adapter_cfg(model, adapter_global_cfg)
if cfg.model.get("unfreeze_aligner", False):
for name, param in model.fastpitch.aligner.named_parameters():
param.requires_grad = True
if cfg.model.get("unfreeze_duration_predictor", False):
for name, param in model.fastpitch.duration_predictor.named_parameters():
param.requires_grad = True
if cfg.model.get("unfreeze_pitch_predictor", False):
for name, param in model.fastpitch.pitch_predictor.named_parameters():
param.requires_grad = True
# Add adapters
model.add_adapter(name=adapter_name, cfg=cfg.model.adapter)
assert model.is_adapter_available()
# enable adapters
model.set_enabled_adapters(enabled=False)
model.set_enabled_adapters(adapter_name, enabled=True)
# Set model to training mode.
model = model.train()
# Then, Unfreeze just the adapter weights that were enabled above (no part of model)
model.unfreeze_enabled_adapters()
# summarize the model
model.summarize()
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
# Save the adapter state dict after training has completed
if adapter_state_dict_name is not None:
state_path = exp_log_dir if exp_log_dir is not None else os.getcwd()
ckpt_path = os.path.join(state_path, "checkpoints")
if os.path.exists(ckpt_path):
state_path = ckpt_path
# Save the adapter modules in a seperate file
model.save_adapters(os.path.join(state_path, adapter_state_dict_name))
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/fastpitch_finetune_adapters.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import fastpitch_ssl, hifigan
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="fastpitch_ssl")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
vocoder = hifigan.HifiGanModel.load_from_checkpoint(cfg.hifi_ckpt_path).cpu()
vocoder.eval()
model = fastpitch_ssl.FastPitchModel_SSL(cfg=cfg.model, trainer=trainer, vocoder=vocoder)
if cfg.get("finetune", False):
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/fastpitch_ssl.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.tts.models import HifiGanModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf/hifigan", config_name="hifigan_44100")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = HifiGanModel(cfg=cfg.model, trainer=trainer)
model.maybe_init_from_pretrained_checkpoint(cfg=cfg)
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/hifigan_finetune.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.tts.models.vits import VitsModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="vits")
def main(cfg):
trainer = pl.Trainer(use_distributed_sampler=False, **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = VitsModel(cfg=cfg.model, trainer=trainer)
trainer.callbacks.extend([pl.callbacks.LearningRateMonitor()])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/vits.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models import WaveGlowModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="waveglow")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = WaveGlowModel(cfg=cfg.model, trainer=trainer)
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([epoch_time_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/waveglow.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from nemo.collections.tts.models import SpectrogramEnhancerModel
from nemo.core.config import hydra_runner
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="spectrogram-enhancer")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg=cfg.get("exp_manager", None))
model = SpectrogramEnhancerModel(cfg=cfg.model, trainer=trainer)
lr_logger = pl.callbacks.LearningRateMonitor()
trainer.callbacks.extend([lr_logger])
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/spectrogram_enhancer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import OmegaConf
from nemo.collections.tts.models import AudioCodecModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf/audio_codec", config_name="audio_codec")
def main(cfg):
logging.info('\nConfig Params:\n%s', OmegaConf.to_yaml(cfg, resolve=True))
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = AudioCodecModel(cfg=cfg.model, trainer=trainer)
trainer.fit(model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/audio_codec.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import re
import librosa
import soundfile as sf
import torch
from nemo.collections.tts.models import AlignerModel
from nemo.collections.tts.parts.utils.tts_dataset_utils import general_padding
"""
G2P disambiguation using an Aligner model's input embedding distances.
Does not handle OOV and leaves them as graphemes.
The output will have each token's phonemes (or graphemes) bracketed, e.g.
<\"><M AH1 L ER0><, ><M AH1 L ER0><, ><HH IY1 Z>< ><DH AH0>< ><M AE1 N><.\">
Example:
python aligner_heteronym_disambiguation.py \
--model=<model_path> \
--manifest=<manifest_path> \
--out=<output_json_path> \
--confidence=0.02 \
--verbose
"""
def get_args():
"""Retrieve arguments for disambiguation.
"""
parser = argparse.ArgumentParser("G2P disambiguation using Aligner input embedding distances.")
# TODO(jocelynh): Make this required=False with default download from NGC once ckpt uploaded
parser.add_argument('--model', required=True, type=str, help="Path to Aligner model checkpoint (.nemo file).")
parser.add_argument(
'--manifest',
required=True,
type=str,
help="Path to data manifest. Each entry should contain the path to the audio file as well as the text in graphemes.",
)
parser.add_argument(
'--out', required=True, type=str, help="Path to output file where disambiguations will be written."
)
parser.add_argument(
'--sr',
required=False,
default=22050,
type=int,
help="Target sample rate to load the dataset. Should match what the model was trained on.",
)
parser.add_argument(
'--heteronyms',
required=False,
type=str,
default='../../scripts/tts_dataset_files/heteronyms-052722',
help="Heteronyms file to specify which words should be disambiguated. All others will use default pron.",
)
parser.add_argument(
'--confidence', required=False, type=float, default=0.0, help="Confidence threshold to keep a disambiguation."
)
parser.add_argument(
'--verbose',
action='store_true',
help="If set to True, logs scores for each disambiguated word in disambiguation_logs.txt.",
)
args = parser.parse_args()
return args
def load_and_prepare_audio(aligner, audio_path, target_sr, device):
"""Loads and resamples audio to target sample rate (if necessary), and preprocesses for Aligner input.
"""
# Load audio and get length for preprocessing
audio_data, orig_sr = sf.read(audio_path)
if orig_sr != target_sr:
audio_data = librosa.core.resample(audio_data, orig_sr=orig_sr, target_sr=target_sr)
audio = torch.tensor(audio_data, dtype=torch.float, device=device).unsqueeze(0)
audio_len = torch.tensor(audio_data.shape[0], device=device).long().unsqueeze(0)
# Generate spectrogram
spec, spec_len = aligner.preprocessor(input_signal=audio, length=audio_len)
return spec, spec_len
def disambiguate_candidates(aligner, text, spec, spec_len, confidence, device, heteronyms, log_file=None):
"""Retrieves and disambiguate all candidate sentences for disambiguation of a given some text.
Assumes that the max number of candidates per word is a reasonable batch size.
Note: This could be sped up if multiple words' candidates were batched, but this is conceptually easier.
"""
# Grab original G2P result
aligner_g2p = aligner.tokenizer.g2p
base_g2p = aligner_g2p(text)
# Tokenize text
words = [word for word, _ in aligner_g2p.word_tokenize_func(text)]
### Loop Through Words ###
result_g2p = []
word_start_idx = 0
has_heteronym = False
for word in words:
# Retrieve the length of the word in the default G2P conversion
g2p_default_len = len(aligner_g2p(word))
# Check if word needs to be disambiguated
if word in heteronyms:
has_heteronym = True
# Add candidate for each ambiguous pronunciation
word_candidates = []
candidate_prons_and_lengths = []
for pron in aligner_g2p.phoneme_dict[word]:
# Replace graphemes in the base G2P result with the current variant
candidate = base_g2p[:word_start_idx] + pron + base_g2p[word_start_idx + g2p_default_len :]
candidate_tokens = aligner.tokenizer.encode_from_g2p(candidate)
word_candidates.append(candidate_tokens)
candidate_prons_and_lengths.append((pron, len(pron)))
### Inference ###
num_candidates = len(word_candidates)
# If only one candidate, just convert and continue
if num_candidates == 1:
has_heteronym = False
result_g2p.append(f"<{' '.join(candidate_prons_and_lengths[0][0])}>")
word_start_idx += g2p_default_len
continue
text_len = [len(toks) for toks in word_candidates]
text_len_in = torch.tensor(text_len, device=device).long()
# Have to pad text tokens in case different pronunciations have different lengths
max_text_len = max(text_len)
text_stack = []
for i in range(num_candidates):
padded_tokens = general_padding(
torch.tensor(word_candidates[i], device=device).long(), text_len[i], max_text_len
)
text_stack.append(padded_tokens)
text_in = torch.stack(text_stack)
# Repeat spectrogram and spec_len tensors to match batch size
spec_in = spec.repeat([num_candidates, 1, 1])
spec_len_in = spec_len.repeat([num_candidates])
with torch.no_grad():
soft_attn, _ = aligner(spec=spec_in, spec_len=spec_len_in, text=text_in, text_len=text_len_in)
# Need embedding distances and duration preds to calculate mean distance for just the one word
text_embeddings = aligner.embed(text_in).transpose(1, 2)
l2_dists = aligner.alignment_encoder.get_dist(keys=text_embeddings, queries=spec_in).sqrt()
durations = aligner.alignment_encoder.get_durations(soft_attn, text_len_in, spec_len_in).int()
# Retrieve average embedding distances
min_dist = float('inf')
max_dist = 0.0
best_candidate = None
for i in range(num_candidates):
candidate_mean_dist = aligner.alignment_encoder.get_mean_distance_for_word(
l2_dists=l2_dists[i],
durs=durations[i],
start_token=word_start_idx + (1 if aligner.tokenizer.pad_with_space else 0),
num_tokens=candidate_prons_and_lengths[i][1],
)
if log_file:
log_file.write(f"{candidate_prons_and_lengths[i][0]} -- {candidate_mean_dist}\n")
if candidate_mean_dist < min_dist:
min_dist = candidate_mean_dist
best_candidate = candidate_prons_and_lengths[i][0]
if candidate_mean_dist > max_dist:
max_dist = candidate_mean_dist
# Calculate confidence score. If below threshold, skip and use graphemes.
disamb_conf = (max_dist - min_dist) / ((max_dist + min_dist) / 2.0)
if disamb_conf < confidence:
if log_file:
log_file.write(f"Below confidence threshold: {best_candidate} ({disamb_conf})\n")
has_heteronym = False
result_g2p.append(f"<{' '.join(aligner_g2p(word))}>")
word_start_idx += g2p_default_len
continue
# Otherwise, can write disambiguated word
if log_file:
log_file.write(f"best candidate: {best_candidate} (confidence: {disamb_conf})\n")
result_g2p.append(f"<{' '.join(best_candidate)}>")
else:
if re.search("[a-zA-Z]", word) is None:
# Punctuation or space
result_g2p.append(f"<{word}>")
elif word in aligner_g2p.phoneme_dict:
# Take default pronunciation for everything else in the dictionary
result_g2p.append(f"<{' '.join(aligner_g2p.phoneme_dict[word][0])}>")
else:
# OOV
result_g2p.append(f"<{' '.join(aligner_g2p(word))}>")
# Advance to phoneme index of next word
word_start_idx += g2p_default_len
if log_file and has_heteronym:
log_file.write(f"{text}\n")
log_file.write(f"===\n{''.join(result_g2p)}\n===\n")
log_file.write(f"===============================\n")
return result_g2p, has_heteronym
def disambiguate_dataset(
aligner, manifest_path, out_path, sr, heteronyms, confidence, device, verbose, heteronyms_only=True
):
"""Disambiguates the phonemes for all words with ambiguous pronunciations in the given manifest.
"""
log_file = open('disambiguation_logs.txt', 'w') if verbose else None
with open(out_path, 'w') as f_out:
with open(manifest_path, 'r') as f_in:
count = 0
for line in f_in:
# Retrieve entry and base G2P conversion for full text
entry = json.loads(line)
# Set punct_post_process=True in order to preserve words with apostrophes
text = aligner.normalizer.normalize(entry['text'], punct_post_process=True)
text = aligner.tokenizer.text_preprocessing_func(text)
# Load and preprocess audio
audio_path = entry['audio_filepath']
spec, spec_len = load_and_prepare_audio(aligner, audio_path, sr, device)
# Get pronunciation candidates and disambiguate
disambiguated_text, has_heteronym = disambiguate_candidates(
aligner, text, spec, spec_len, confidence, device, heteronyms, log_file
)
# Skip writing entry if user only wants samples with heteronyms
if heteronyms_only and not has_heteronym:
continue
# Save entry with disambiguation
entry['disambiguated_text'] = ''.join(disambiguated_text)
f_out.write(f"{json.dumps(entry)}\n")
count += 1
if count % 100 == 0:
print(f"Finished {count} entries.")
print(f"Finished all entries, with a total of {count}.")
if log_file:
log_file.close()
def main():
args = get_args()
# Check file paths from arguments
if not os.path.exists(args.model):
print("Could not find model checkpoint file: ", args.model)
if not os.path.exists(args.manifest):
print("Could not find data manifest file: ", args.manifest)
if os.path.exists(args.out):
print("Output file already exists: ", args.out)
overwrite = input("Is it okay to overwrite it? (Y/N): ")
if overwrite.lower() != 'y':
print("Not overwriting output file, quitting.")
quit()
if not os.path.exists(args.heteronyms):
print("Could not find heteronyms list: ", args.heteronyms)
# Read heteronyms list, one per line
heteronyms = set()
with open(args.heteronyms, 'r') as f_het:
for line in f_het:
heteronyms.add(line.strip().lower())
# Load model
print("Restoring Aligner model from checkpoint...")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
aligner = AlignerModel.restore_from(args.model, map_location=device)
# Disambiguation
print("Beginning disambiguation...")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
disambiguate_dataset(aligner, args.manifest, args.out, args.sr, heteronyms, args.confidence, device, args.verbose)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/aligner_heteronym_disambiguation.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used as a CI test and shows how to chain TTS and ASR models
"""
from argparse import ArgumentParser
from math import ceil
from pathlib import Path
import librosa
import soundfile
import torch
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.asr.models import EncDecCTCModel
from nemo.collections.common.parts.preprocessing import parsers
from nemo.collections.tts.models.base import SpectrogramGenerator, Vocoder
from nemo.utils import logging
LIST_OF_TEST_STRINGS = [
"Hey, this is a test of the speech synthesis system.",
"roupell received the announcement with a cheerful countenance.",
"with thirteen dollars, eighty-seven cents when considerably greater resources were available to him.",
"Two other witnesses were able to offer partial descriptions of a man they saw in the southeast corner window.",
"'just to steady their legs a little' in other words, to add his weight to that of the hanging bodies.",
"The discussion above has already set forth examples of his expression of hatred for the United States.",
"At two:thirty-eight p.m., Eastern Standard Time, Lyndon Baines Johnson took the oath of office as the thirty-sixth President of the United States.",
"or, quote, other high government officials in the nature of a complaint coupled with an expressed or implied determination to use a means.",
"As for my return entrance visa please consider it separately. End quote.",
"it appears that Marina Oswald also complained that her husband was not able to provide more material things for her.",
"appeared in The Dallas Times Herald on November fifteen, nineteen sixty-three.",
"The only exit from the office in the direction Oswald was moving was through the door to the front stairway.",
]
def main():
parser = ArgumentParser()
parser.add_argument(
"--asr_model",
type=str,
default="QuartzNet15x5Base-En",
choices=[x.pretrained_model_name for x in EncDecCTCModel.list_available_models()],
)
parser.add_argument(
"--tts_model_spec",
type=str,
default="tts_en_tacotron2",
choices=[x.pretrained_model_name for x in SpectrogramGenerator.list_available_models()],
)
parser.add_argument(
"--tts_model_vocoder",
type=str,
default="tts_en_waveglow_88m",
choices=[x.pretrained_model_name for x in Vocoder.list_available_models()],
)
parser.add_argument("--wer_tolerance", type=float, default=1.0, help="used by test")
parser.add_argument("--trim", action="store_true")
parser.add_argument("--debug", action="store_true")
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.debug:
logging.set_verbosity(logging.DEBUG)
logging.info(f"Using NGC cloud ASR model {args.asr_model}")
asr_model = EncDecCTCModel.from_pretrained(model_name=args.asr_model)
logging.info(f"Using NGC cloud TTS Spectrogram Generator model {args.tts_model_spec}")
tts_model_spec = SpectrogramGenerator.from_pretrained(model_name=args.tts_model_spec)
logging.info(f"Using NGC cloud TTS Vocoder model {args.tts_model_vocoder}")
tts_model_vocoder = Vocoder.from_pretrained(model_name=args.tts_model_vocoder)
models = [asr_model, tts_model_spec, tts_model_vocoder]
if torch.cuda.is_available():
for i, m in enumerate(models):
models[i] = m.cuda()
for m in models:
m.eval()
asr_model, tts_model_spec, tts_model_vocoder = models
parser = parsers.make_parser(
labels=asr_model.decoder.vocabulary, name="en", unk_id=-1, blank_id=-1, do_normalize=True,
)
labels_map = dict([(i, asr_model.decoder.vocabulary[i]) for i in range(len(asr_model.decoder.vocabulary))])
tts_input = []
asr_references = []
longest_tts_input = 0
for test_str in LIST_OF_TEST_STRINGS:
tts_parsed_input = tts_model_spec.parse(test_str)
if len(tts_parsed_input[0]) > longest_tts_input:
longest_tts_input = len(tts_parsed_input[0])
tts_input.append(tts_parsed_input.squeeze())
asr_parsed = parser(test_str)
asr_parsed = ''.join([labels_map[c] for c in asr_parsed])
asr_references.append(asr_parsed)
# Pad TTS Inputs
for i, text in enumerate(tts_input):
pad = (0, longest_tts_input - len(text))
tts_input[i] = torch.nn.functional.pad(text, pad, value=68)
logging.debug(tts_input)
# Do TTS
tts_input = torch.stack(tts_input)
if torch.cuda.is_available():
tts_input = tts_input.cuda()
specs = tts_model_spec.generate_spectrogram(tokens=tts_input)
audio = []
step = ceil(len(specs) / 4)
for i in range(4):
audio.append(tts_model_vocoder.convert_spectrogram_to_audio(spec=specs[i * step : i * step + step]))
audio = [item for sublist in audio for item in sublist]
audio_file_paths = []
# Save audio
logging.debug(f"args.trim: {args.trim}")
for i, aud in enumerate(audio):
aud = aud.cpu().numpy()
if args.trim:
aud = librosa.effects.trim(aud, top_db=40)[0]
soundfile.write(f"{i}.wav", aud, samplerate=22050)
audio_file_paths.append(str(Path(f"{i}.wav")))
# Do ASR
hypotheses = asr_model.transcribe(audio_file_paths)
for i, _ in enumerate(hypotheses):
logging.debug(f"{i}")
logging.debug(f"ref:'{asr_references[i]}'")
logging.debug(f"hyp:'{hypotheses[i]}'")
wer_value = word_error_rate(hypotheses=hypotheses, references=asr_references)
if wer_value > args.wer_tolerance:
raise ValueError(f"Got WER of {wer_value}. It was higher than {args.wer_tolerance}")
logging.info(f'Got WER of {wer_value}. Tolerance was {args.wer_tolerance}')
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/test_tts_infer.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from utils import get_metrics
from nemo.collections.tts.models.base import G2PModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
python g2p_inference.py \
pretrained_model=<Path to .nemo file or pretrained model name for G2PModel from list_available_models()>" \
manifest_filepath="<Path to .json manifest>" \
output_file="<Path to .json manifest to save prediction>" \
batch_size=32 \
num_workers=4 \
pred_field=pred_text
"""
@dataclass
class TranscriptionConfig:
# Required configs
pretrained_model: str # Path to a .nemo file or Name of a pretrained model
manifest_filepath: str # Path to .json manifest file
phoneme_field: Optional[
str
] = None # name of the field in manifest_filepath for ground truth phonemes, default during training "text"
grapheme_field: Optional[str] = "text_graphemes" # name of the field in manifest_filepath for input grapheme text
# General configs
output_file: Optional[
str
] = None # Path to .json manifest file to save predictions, will be saved in "target_field"
pred_field: Optional[str] = "pred_text" # name of the field in the output_file to save predictions
batch_size: int = 32 # Batch size to use for inference
num_workers: int = 0 # Number of workers to use for DataLoader during inference
# Config for heteronyms correction
pretrained_heteronyms_model: Optional[
str
] = None # Path to a .nemo file or a Name of a pretrained model to disambiguate heteronyms (Optional)
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg: TranscriptionConfig) -> TranscriptionConfig:
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not cfg.pretrained_model:
raise ValueError(
'To run evaluation and inference script a pre-trained model or .nemo file must be provided.'
f'Choose from {G2PModel.list_available_models()} or "pretrained_model"="your_model.nemo"'
)
logging.info(
'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \
no DDP to obtain accurate results'
)
# setup GPU
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
trainer = pl.Trainer(devices=device, accelerator=accelerator, logger=False, enable_checkpointing=False)
if os.path.exists(cfg.pretrained_model):
model = G2PModel.restore_from(cfg.pretrained_model, map_location=map_location)
elif cfg.pretrained_model in G2PModel.get_available_model_names():
model = G2PModel.from_pretrained(cfg.pretrained_model, map_location=map_location)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo checkpoint or choose from {G2PModel.list_available_models()}'
)
model._cfg.max_source_len = 512
model.set_trainer(trainer)
model = model.eval()
if cfg.output_file is None:
cfg.output_file = cfg.manifest_filepath.replace(".json", "_phonemes.json")
with torch.no_grad():
model.convert_graphemes_to_phonemes(
manifest_filepath=cfg.manifest_filepath,
output_manifest_filepath=cfg.output_file,
grapheme_field=cfg.grapheme_field,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
pred_field=cfg.pred_field,
)
print(f"IPA predictions saved in {cfg.output_file}")
if cfg.phoneme_field is not None:
get_metrics(cfg.output_file, phoneme_field=cfg.phoneme_field, grapheme_field=cfg.grapheme_field)
if __name__ == '__main__':
main()
| NeMo-main | examples/tts/g2p/g2p_inference.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass, is_dataclass
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf
from nemo.collections.tts.g2p.models.heteronym_classification import HeteronymClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
This script runs inference with HeteronymClassificationModel
If the input manifest contains target "word_id", evaluation will be also performed.
To prepare dataset, see NeMo/scripts/dataset_processing/g2p/export_wikihomograph_data_to_manifest.py
Inference form manifest:
python g2p_heteronym_classification_inference.py \
manifest="<Path to .json manifest>" \
pretrained_model="<Path to .nemo file or pretrained model name from list_available_models()>" \
output_manifest="<Path to .json manifest to save prediction>" \
wordid_to_phonemes_file="<Path to a file with mapping from wordid predicted by the model to phonemes>"
Interactive inference:
python g2p_heteronym_classification_inference.py \
pretrained_model="<Path to .nemo file or pretrained model name from list_available_models()>" \
wordid_to_phonemes_file="<Path to a file with mapping from wordid predicted by the model to phonemes>" # Optional
"""
@dataclass
class TranscriptionConfig:
# Required configs
pretrained_model: str # Path to a .nemo file or Name of a pretrained model
# path to .json manifest inference, if not provided, interactive mode will be enabled
manifest: Optional[str] = None # Path to .json manifest
output_manifest: Optional[
str
] = "predictions.json" # Path to .json manifest to save prediction, will be saved in "pred_text" field
grapheme_field: str = "text_graphemes" # name of the field in .json manifest for input grapheme text
# mapping from wordid predicted by the model to phonemes, e.g.,
# "../../../scripts/tts_dataset_files/wordid_to_ipa-0.7b_nv22.10.tsv"
wordid_to_phonemes_file: Optional[str] = None
# if "word_id" targets are present in the manifest, evaluation will be performed and errors will be saved in errors_file
errors_file: Optional[str] = None # path to a file to save prediction errors
batch_size: int = 32
num_workers: int = 0
@hydra_runner(config_name="TranscriptionConfig", schema=TranscriptionConfig)
def main(cfg):
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if not cfg.pretrained_model:
raise ValueError(
'To run evaluation and inference script a pre-trained model or .nemo file must be provided.'
f'Choose from {HeteronymClassificationModel.list_available_models()} or "pretrained_model"="your_model.nemo"'
)
logging.info(
'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \
no DDP to obtain accurate results'
)
# setup GPU
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
trainer = pl.Trainer(devices=device, accelerator=accelerator, logger=False, enable_checkpointing=False)
if os.path.exists(cfg.pretrained_model):
model = HeteronymClassificationModel.restore_from(cfg.pretrained_model, map_location=map_location)
elif cfg.pretrained_model in HeteronymClassificationModel.get_available_model_names():
model = HeteronymClassificationModel.from_pretrained(cfg.pretrained_model, map_location=map_location)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo checkpoint or choose from {HeteronymClassificationModel.list_available_models()}'
)
model.set_trainer(trainer)
model = model.eval()
logging.info(f'Config Params: {model._cfg}')
if cfg.manifest is not None:
if not os.path.exists(cfg.manifest):
raise ValueError(f"{cfg.manifest} not found.")
with torch.no_grad():
model.disambiguate_manifest(
manifest=cfg.manifest,
output_manifest=cfg.output_manifest,
grapheme_field=cfg.grapheme_field,
batch_size=cfg.batch_size,
num_workers=cfg.num_workers,
)
# save predictions to a file
if cfg.errors_file is None:
cfg.errors_file = cfg.output_manifest.replace(".json", "_errors.txt")
save_errors = True
correct = 0
total = 0
with open(cfg.output_manifest, "r", encoding="utf-8") as f_preds, open(
cfg.errors_file, "w", encoding="utf-8"
) as f_errors:
for line in f_preds:
line = json.loads(line)
predictions = line["pred_wordid"]
# run evaluation if target word_id is available in the input manifest
if "word_id" in line:
targets = line["word_id"]
if isinstance(targets, str):
targets = [targets]
for idx, target_ in enumerate(targets):
total += 1
if idx >= len(predictions) or target_ != predictions[idx]:
f_errors.write(f"INPUT: {line[cfg.grapheme_field]}\n")
f_errors.write(f"PRED : {predictions[idx]} -- GT: {target_}\n")
f_errors.write("===========================\n")
else:
correct += 1
else:
save_errors = False
if save_errors:
logging.info(f"Accuracy: {round(correct / total * 100, 2)}% ({total - correct} errors out of {total})")
logging.info(f"Errors saved at {cfg.errors_file}")
else:
logging.info("No 'word_id' values found, skipping evaluation.")
if os.path.exists(cfg.errors_file):
os.remove(cfg.errors_file)
else:
print('Entering interactive mode.')
done = False
while not done:
print('Type "STOP" to exit.')
test_input = input('Input a test input:')
if test_input == "STOP":
done = True
if not done:
with torch.no_grad():
_, sentences = model.disambiguate(
sentences=[test_input],
batch_size=1,
num_workers=cfg.num_workers,
wordid_to_phonemes_file=cfg.wordid_to_phonemes_file,
)
print(sentences[0])
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/g2p/g2p_heteronym_classification_inference.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from utils import get_model
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.models.base import G2PModel
from nemo.core.config import hydra_runner
from nemo.utils import logging, model_utils
from nemo.utils.exp_manager import exp_manager
"""
This script supports training of G2PModels
(for T5G2PModel use g2p_t5.yaml, for CTCG2PModel use either g2p_conformer.yaml or g2p_t5_ctc.yaml)
# Training T5G2PModel and evaluation at the end of training:
python examples/text_processing/g2p/g2p_train_and_evaluate.py \
# (Optional: --config-path=<Path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<Path to manifest file>" \
model.validation_ds.manifest_filepath="<Path to manifest file>" \
model.test_ds.manifest_filepath="<Path to manifest file>" \
trainer.devices=1 \
do_training=True \
do_testing=True
Example of the config file: NeMo/examples/tts/g2p/conf/g2p_t5.yaml
# Training Conformer-G2P Model and evaluation at the end of training:
python examples/text_processing/g2p/g2p_train_and_evaluate.py \
# (Optional: --config-path=<Path to dir of configs> --config-name=<name of config without .yaml>) \
model.train_ds.manifest_filepath="<Path to manifest file>" \
model.validation_ds.manifest_filepath="<Path to manifest file>" \
model.test_ds.manifest_filepath="<Path to manifest file>" \
model.tokenizer.dir=<Path to pretrained tokenizer> \
trainer.devices=1 \
do_training=True \
do_testing=True
Example of the config file: NeMo/examples/text_processing/g2p/conf/g2p_conformer_ctc.yaml
# Run evaluation of the pretrained model:
python examples/text_processing/g2p/g2p_train_and_evaluate.py \
# (Optional: --config-path=<Path to dir of configs> --config-name=<name of config without .yaml>) \
pretrained_model="<Path to .nemo file or pretrained model name from list_available_models()>" \
model.test_ds.manifest_filepath="<Path to manifest file>" \
trainer.devices=1 \
do_training=False \
do_testing=True
"""
@hydra_runner(config_path="conf", config_name="g2p_t5")
def main(cfg):
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
g2p_model = None
if cfg.do_training:
g2p_model = get_model(cfg, trainer)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(g2p_model)
if cfg.do_testing:
logging.info(
'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \
no DDP to obtain accurate results'
)
# setup GPU
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
trainer = pl.Trainer(devices=device, accelerator=accelerator, logger=False, enable_checkpointing=False)
if g2p_model is None:
if os.path.exists(cfg.pretrained_model):
# restore g2p_model from .nemo file path
model_cfg = G2PModel.restore_from(restore_path=cfg.pretrained_model, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath)
logging.info(f"Restoring g2p_model : {imported_class.__name__}")
g2p_model = imported_class.restore_from(restore_path=cfg.pretrained_model, map_location=map_location)
model_name = os.path.splitext(os.path.basename(cfg.pretrained_model))[0]
logging.info(f"Restored {model_name} g2p_model from {cfg.pretrained_model}.")
elif cfg.pretrained_model in G2PModel.get_available_model_names():
# restore g2p_model by name
g2p_model = G2PModel.from_pretrained(cfg.pretrained_model, map_location=map_location)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo checkpoint or choose from {G2PModel.list_available_models()}'
)
if hasattr(cfg.model, "test_ds") and cfg.model.test_ds.manifest_filepath is not None:
g2p_model.setup_multiple_test_data(cfg.model.test_ds)
if g2p_model.prepare_test(trainer):
trainer.test(g2p_model)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/g2p/g2p_train_and_evaluate.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from nemo.collections.asr.metrics.wer import word_error_rate
from nemo.collections.tts.g2p.models.ctc import CTCG2PModel
from nemo.collections.tts.g2p.models.t5 import T5G2PModel
from nemo.utils import logging
def get_model(cfg, trainer):
"""
Get model instance
Args:
cfg: model's config file
trainer: trainer
Return:
G2PModel instance
"""
if "CTC" in cfg.name:
model = CTCG2PModel(cfg=cfg.model, trainer=trainer)
elif cfg.name == "T5G2P":
model = T5G2PModel(cfg=cfg.model, trainer=trainer)
else:
raise ValueError(f"{cfg.name} is not supported. Choose from [G2P-Conformer-CTC, T5G2P]")
return model
def get_metrics(manifest: str, pred_field="pred_text", phoneme_field="text", grapheme_field="text_graphemes"):
"""
Calculates WER and PER metrics (for duplicated grapheme entries with multiple reference values,
the best matching prediction will be used for evaluation.)
Args:
manifest: Path to .json manifest file
pred_field: name of the field in the output_file to save predictions
phoneme_field: name of the field in manifest_filepath for ground truth phonemes
grapheme_field: name of the field in manifest_filepath for input grapheme text
Returns: WER and PER values
"""
all_preds = []
all_references = []
all_graphemes = {}
with open(manifest, "r") as f:
for i, line in enumerate(f):
line = json.loads(line)
all_preds.append(line[pred_field])
all_references.append(line[phoneme_field])
if line[grapheme_field] not in all_graphemes:
all_graphemes[line[grapheme_field]] = []
all_graphemes[line[grapheme_field]].append(i)
# collect all examples with multiple phoneme options and same grapheme form, choose the one with min PER
all_graphemes = {k: v for k, v in all_graphemes.items() if len(v) > 1}
lines_to_drop = []
for phon_amb_indices in all_graphemes.values():
refs, preds = [], []
for phon_amb_indices_ in phon_amb_indices:
refs.append(all_references[phon_amb_indices_])
preds.append(all_preds[phon_amb_indices_])
pers = []
for ref_, pred_ in zip(refs, preds):
pers.append(word_error_rate(hypotheses=[pred_], references=[ref_], use_cer=True))
min_idx = pers.index(min(pers))
phon_amb_indices.pop(min_idx)
lines_to_drop.extend(phon_amb_indices)
# drop duplicated examples, only keep with min PER
all_preds = [x for i, x in enumerate(all_preds) if i not in lines_to_drop]
all_references = [x for i, x in enumerate(all_references) if i not in lines_to_drop]
wer = word_error_rate(hypotheses=all_preds, references=all_references)
per = word_error_rate(hypotheses=all_preds, references=all_references, use_cer=True)
logging.info(f"{manifest}: PER: {per * 100:.2f}%, WER: {wer * 100:.2f}%, lines: {len(all_references)}")
return wer, per
| NeMo-main | examples/tts/g2p/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from nemo.collections.common.callbacks import LogEpochTimeCallback
from nemo.collections.tts.g2p.models.heteronym_classification import HeteronymClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
This script runs training and evaluation of HeteronymClassificationModel
To prepare dataset, see NeMo/scripts/dataset_processing/g2p/export_wikihomograph_data_to_manifest.py
To run training:
python g2p_heteronym_classification_train_and_evaluate.py \
train_manifest=<Path to train manifest file>" \
validation_manifest=<Path to validation manifest file>" \
model.wordids="<Path to wordids.tsv file>" \
do_training=True
To run training and testing (once the training is complete):
python g2p_heteronym_classification_train_and_evaluate.py \
train_manifest=<Path to train manifest file>" \
validation_manifest=<Path to validation manifest file>" \
model.test_ds.dataset.manifest=<Path to test manifest file>" \
model.wordids="<Path to wordids.tsv file>" \
do_training=True \
do_testing=True
To run testing:
python g2p_heteronym_classification_train_and_evaluate.py \
do_training=False \
do_testing=True \
model.test_ds.dataset.manifest=<Path to test manifest file>" \
pretrained_model=<Path to pretrained .nemo model or from list_available_models()>
See https://github.com/google-research-datasets/WikipediaHomographData/blob/master/data/wordids.tsv for wordids file
format example
See https://github.com/NVIDIA/NeMo/blob/main/scripts/dataset_processing/g2p/export_wikihomograph_data_to_manifest.py
on how to convert WikiHomograph data for HeteronymClassificationModel training/evaluation
"""
@hydra_runner(config_path="conf", config_name="g2p_heteronym_classification.yaml")
def main(cfg):
# PTL 2.0 has find_unused_parameters as False by default, so its required to set it to True
# when there are unused parameters like in this model
if cfg.trainer.strategy == 'ddp':
cfg.trainer.strategy = "ddp_find_unused_parameters_true"
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
model = None
if cfg.do_training:
model = HeteronymClassificationModel(cfg=cfg.model, trainer=trainer)
lr_logger = pl.callbacks.LearningRateMonitor()
epoch_time_logger = LogEpochTimeCallback()
trainer.callbacks.extend([lr_logger, epoch_time_logger])
trainer.fit(model)
logging.info("Training is complete")
if cfg.do_testing:
logging.info(
'During evaluation/testing, it is currently advisable to construct a new Trainer with single GPU and \
no DDP to obtain accurate results'
)
# setup GPU
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
trainer = pl.Trainer(devices=device, accelerator=accelerator, logger=False, enable_checkpointing=False)
if model is None:
if os.path.exists(cfg.pretrained_model):
# restore model from .nemo file path
model = HeteronymClassificationModel.restore_from(restore_path=cfg.pretrained_model)
elif cfg.pretrained_model in HeteronymClassificationModel.get_available_model_names():
# restore model by name
model = HeteronymClassificationModel.from_pretrained(cfg.pretrained_model, map_location=map_location)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo checkpoint or choose from {HeteronymClassificationModel.list_available_models()}'
)
if hasattr(cfg.model, "test_ds") and cfg.model.test_ds.dataset.manifest is not None:
model.setup_test_data(cfg.model.test_ds)
trainer.test(model)
else:
logging.info("test_ds not found, skipping evaluation")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/tts/g2p/g2p_heteronym_classification_train_and_evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import ZeroShotIntentModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="zero_shot_intent_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params:\n {OmegaConf.to_yaml(cfg)}')
# initialize the model using the config file
if cfg.pretrained_model and os.path.exists(cfg.pretrained_model):
model = ZeroShotIntentModel.restore_from(cfg.pretrained_model, strict=False)
else:
raise ValueError('Provide path to the pre-trained .nemo checkpoint')
# predicting an intent of a query
queries = [
"I'd like a veggie burger and fries",
"Turn off the lights in the living room",
]
candidate_labels = ['Food order', 'Play music', 'Request for directions', 'Change lighting', 'Calendar query']
predictions = model.predict(queries, candidate_labels, batch_size=4, multi_label=True)
logging.info('The prediction results of some sample queries with the trained model:')
for query in predictions:
logging.info(json.dumps(query, indent=4))
logging.info("Inference finished!")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/zero_shot_intent_recognition/zero_shot_intent_infer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import ZeroShotIntentModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="zero_shot_intent_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params:\n {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
# initialize the model using the config file
model = ZeroShotIntentModel(cfg.model, trainer=trainer)
# training
logging.info("================================================================================================")
logging.info('Starting training...')
trainer.fit(model)
logging.info('Training finished!')
if cfg.model.nemo_path:
model.save_to(cfg.model.nemo_path)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/zero_shot_intent_recognition/zero_shot_intent_train.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
import random
from argparse import ArgumentParser
import h5py
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from sklearn.decomposition import PCA
from tqdm import tqdm
from nemo.collections.nlp.models import EntityLinkingModel
from nemo.utils import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss is required for building the index. Please install faiss-gpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def build_index(cfg: DictConfig, model: object):
"""
Builds faiss index from index dataset specified in the config.
Args:
cfg (DictConfig): Config file specifying index parameters
model (object): Encoder model
"""
# Get index dataset embeddings
# PCA model exists and index embeddings have already been PCAed, no need to re-extract/PCA them
if cfg.apply_pca and os.path.isfile(cfg.pca.pca_save_name) and os.path.isfile(cfg.pca_embeddings_save_name):
logging.info("Loading reduced dimensionality embeddings")
embeddings = h5py.File(cfg.pca_embeddings_save_name, "r")
embeddings = embeddings[cfg.index_ds.name][:]
elif os.path.isfile(cfg.embedding_save_name):
logging.info("Loading previously extracted index dataset embeddings")
embeddings = h5py.File(cfg.embedding_save_name, "r")
embeddings = embeddings[cfg.index_ds.name][:]
else:
logging.info("Encoding index dataset, this may take a while")
index_dataloader = model.setup_dataloader(cfg.index_ds, is_index_data=True)
embeddings, concept_ids = get_index_embeddings(cfg, index_dataloader, model)
# Create pca model to reduce dimensionality of index dataset and decrease memory footprint
if cfg.apply_pca:
# Need to train PCA model and apply PCA transformation with newly trained model
if not os.path.isfile(cfg.pca.pca_save_name):
logging.info("Fitting PCA model for embedding dimensionality reduction")
pca_train_set = random.sample(list(embeddings), k=int(len(embeddings) * cfg.pca.sample_fraction))
pca = PCA(n_components=cfg.pca.output_dim)
pca.fit(pca_train_set)
pkl.dump(pca, open(cfg.pca.pca_save_name, "wb"))
embeddings = reduce_embedding_dim(pca, embeddings, cfg)
# PCA model already trained, just need to reduce dimensionality of all embeddings
elif not os.path.isfile(cfg.pca_embeddings_save_name):
pca = pkl.load(open(cfg.pca.pca_save_name, "rb"))
embeddings = reduce_embedding_dim(pca, embeddings, cfg)
# Build faiss index from embeddings
logging.info(f"Training index with embedding dim size {cfg.dims} using {faiss.get_num_gpus()} gpus")
quantizer = faiss.IndexFlatL2(cfg.dims)
index = faiss.IndexIVFFlat(quantizer, cfg.dims, cfg.nlist)
index = faiss.index_cpu_to_all_gpus(index)
index.train(embeddings)
logging.info("Adding dataset embeddings to index")
for i in tqdm(range(0, embeddings.shape[0], cfg.index_batch_size)):
index.add(embeddings[i : i + cfg.index_batch_size])
logging.info("Saving index")
faiss.write_index(faiss.index_gpu_to_cpu(index), cfg.index_save_name)
logging.info("Index built and saved")
def reduce_embedding_dim(pca, embeddings, cfg):
"""Apply PCA transformation to index dataset embeddings"""
logging.info("Applying PCA transformation to entire index dataset")
embeddings = np.array(pca.transform(embeddings), dtype=np.float32)
emb_file = h5py.File(cfg.pca_embeddings_save_name, "w")
emb_file.create_dataset(cfg.index_ds.name, data=embeddings)
emb_file.close()
return embeddings
def get_index_embeddings(cfg: DictConfig, dataloader: object, model: object):
"""Use entity linking encoder to get embeddings for full index dataset"""
embeddings = []
concept_ids = []
with torch.no_grad():
for batch in tqdm(dataloader):
input_ids, token_type_ids, input_mask, batch_concept_ids = batch
input_ids = input_ids.to(device)
token_type_ids = token_type_ids.to(device)
input_mask = input_mask.to(device)
batch_embeddings = model.forward(
input_ids=input_ids, token_type_ids=token_type_ids, attention_mask=input_mask
)
embeddings.extend(batch_embeddings.detach().cpu().numpy())
concept_ids.extend(batch_concept_ids.numpy())
emb_file = h5py.File(cfg.embedding_save_name, "w")
emb_file.create_dataset(cfg.index_ds.name, data=embeddings)
emb_file.close()
pkl.dump(concept_ids, open(cfg.concept_id_save_name, "wb"))
return embeddings, concept_ids
def load_model(cfg: DictConfig, restore: bool):
"""
Loads encoder model.
Args:
cfg: Config file specifying model parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
if restore:
model = EntityLinkingModel.restore_from(cfg.nemo_path)
else:
cfg.train_ds = None
cfg.validation_ds = None
cfg.test_ds = None
model = EntityLinkingModel(cfg)
model = model.to(device)
return model
def main(cfg: DictConfig, restore: bool):
"""
Builds new index if one hasn't been built yet.
Args:
cfg: Config file specifying index parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
logging.info("Loading entity linking encoder model")
model = load_model(cfg.model, restore)
if not os.path.isfile(cfg.index.index_save_name) or (
cfg.apply_pca and not os.path.isfile(cfg.index.pca.pca_save_name)
):
logging.info("Building index")
build_index(cfg.index, model)
else:
logging.info("Index and pca model (if required) already exists. Skipping build index step.")
if not os.path.isfile(cfg.index.idx_to_id):
logging.info("Mapping entity index postions to ids")
map_idx_to_ids(cfg.index)
else:
logging.info("Map from concept index to id already exists. Skipping mapping step.")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--restore", action="store_true", help="Whether to restore encoder model weights from nemo path"
)
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="./conf/umls_medical_entity_linking_config.yaml")
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
main(cfg, args.restore)
| NeMo-main | examples/nlp/entity_linking/build_index.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Please see tutorial at Nemo/tutorials/nlp/Entity_Linking_Medical.ipynb for
# more information on entity linking and self alignment pretraining.
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.models import EntityLinkingModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="umls_medical_entity_linking_config.yaml")
def main(cfg: DictConfig) -> None:
# PTL 2.0 has find_unused_parameters as False by default, so its required to set it to True
# when there are unused parameters here
if cfg.trainer.strategy == 'ddp':
cfg.trainer.strategy = "ddp_find_unused_parameters_true"
logging.info(f"\nConfig Params:\n{OmegaConf.to_yaml(cfg)}")
trainer = Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
logging.info(f"Loading weights from pretrained model {cfg.model.language_model.pretrained_model_name}")
model = EntityLinkingModel(cfg=cfg.model, trainer=trainer)
logging.info("===========================================================================================")
logging.info('Starting training...')
trainer.fit(model)
logging.info('Training finished!')
logging.info("===========================================================================================")
if cfg.model.nemo_path:
# '.nemo' file contains the last checkpoint and the params to initialize the model
model.save_to(cfg.model.nemo_path)
logging.info(f'Model is saved into `.nemo` file: {cfg.model.nemo_path}')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/entity_linking/self_alignment_pretraining.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle as pkl
from argparse import ArgumentParser
from collections import OrderedDict
from typing import Dict
import numpy as np
import torch
from build_index import load_model
from omegaconf import DictConfig, OmegaConf
from nemo.utils import logging
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss is required for building the index. Please install faiss-gpu")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_query_embedding(query, model):
"""Use entity linking encoder to get embedding for index query"""
model_input = model.tokenizer(
query,
add_special_tokens=True,
padding=True,
truncation=True,
max_length=512,
return_token_type_ids=True,
return_attention_mask=True,
)
query_emb = model.forward(
input_ids=torch.LongTensor([model_input["input_ids"]]).to(device),
token_type_ids=torch.LongTensor([model_input["token_type_ids"]]).to(device),
attention_mask=torch.LongTensor([model_input["attention_mask"]]).to(device),
)
return query_emb
def query_index(
query: str, cfg: DictConfig, model: object, index: object, pca: object, idx2id: dict, id2string: dict,
) -> Dict:
"""
Query the nearest neighbor index of entities to find the
concepts in the index dataset that are most similar to the
query.
Args:
query (str): entity to look up in the index
cfg (DictConfig): config object to specifiy query parameters
model (EntityLinkingModel): entity linking encoder model
index (object): faiss index
pca (object): sklearn pca transformation to be applied to queries
idx2id (dict): dictionary mapping unique concept dataset index to
its CUI
id2string (dict): dictionary mapping each unqiue CUI to a
representative english description of
the concept
Returns:
A dictionary with the concept ids of the index's most similar
entities as the keys and a tuple containing the string
representation of that concept and its cosine similarity to
the query as the values.
"""
query_emb = get_query_embedding(query, model).detach().cpu().numpy()
if cfg.apply_pca:
query_emb = pca.transform(query_emb)
dist, neighbors = index.search(query_emb.astype(np.float32), cfg.query_num_factor * cfg.top_n)
dist, neighbors = dist[0], neighbors[0]
unique_ids = OrderedDict()
neighbor_idx = 0
# Many of nearest neighbors could map to the same concept id, their idx is their unique identifier
while len(unique_ids) < cfg.top_n and neighbor_idx < len(neighbors):
concept_id_idx = neighbors[neighbor_idx]
concept_id = idx2id[concept_id_idx]
# Only want one instance of each unique concept
if concept_id not in unique_ids:
concept = id2string[concept_id]
unique_ids[concept_id] = (concept, 1 - dist[neighbor_idx])
neighbor_idx += 1
unique_ids = dict(unique_ids)
return unique_ids
def main(cfg: DictConfig, restore: bool):
"""
Loads faiss index and allows commandline queries
to the index. Builds new index if one hasn't been built yet.
Args:
cfg: Config file specifying index parameters
restore: Whether to restore model weights trained
by the user. Otherwise will load weights
used before self alignment pretraining.
"""
if not os.path.isfile(cfg.index.index_save_name) or (
cfg.apply_pca and not os.path.isfile(cfg.index.pca.pca_save_name) or not os.path.isfile(cfg.index.idx_to_id)
):
logging.warning("Either no index and/or no mapping from entity idx to ids exists. Please run `build_index.py`")
return
logging.info("Loading entity linking encoder model")
model = load_model(cfg.model, restore)
logging.info("Loading index and associated files")
index = faiss.read_index(cfg.index.index_save_name)
idx2id = pkl.load(open(cfg.index.idx_to_id, "rb"))
id2string = pkl.load(open(cfg.index.id_to_string, "rb")) # Should be created during dataset prep
if cfg.index.apply_pca:
pca = pkl.load(open(cfg.index.pca.pca_save_name, "rb"))
while True:
query = input("enter index query: ")
output = query_index(query, cfg.top_n, cfg.index, model, index, pca, idx2id, id2string)
if query == "exit":
break
for concept_id in output:
concept_details = output[concept_id]
concept_id = "C" + str(concept_id).zfill(7)
print(concept_id, concept_details)
print("----------------\n")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--restore", action="store_true", help="Whether to restore encoder model weights from nemo path"
)
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="./conf/umls_medical_entity_linking_config.yaml")
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
main(cfg, args.restore)
| NeMo-main | examples/nlp/entity_linking/query_index.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import pickle as pkl
import random
from argparse import ArgumentParser
import pandas as pd
from omegaconf import OmegaConf
from tqdm import tqdm
# Info on these headers can be found here on the UMLS website https://www.ncbi.nlm.nih.gov/books/NBK9685/
# section 3.3.4 Table 1
HEADERS = [
'CUI',
'LAT',
'TS',
'LUI',
'STT',
'SUI',
'ISPREF',
'AUI',
'SAUI',
'SCUI',
'SDUI',
'SAB',
'TTY',
'CODE',
'STR',
'SRL',
'SUPPRESS',
'CVF',
]
def process_umls_training_dataset(data_path, train_save_name, val_save_name, max_pairs, train_split, headers):
"""
Generates and saves UMLS self alignment pretraining train and validation data. Takes the raw .RRF UMLS
data file and creates different pair combinations for entities with the same CUI. Each row in the output
will be formatted as 'CUI EntitySynonym1 EntitySynonym2' with each item in a row separated by tabs.
Saves two .tsv output files, one for the train split and one for the validation split.
Only data marked as English is added to the train and val splits.
Arguments:
data_path (str): path to MRCONSO.RRF UMLS data file
train_save_name (str): path to where training data will be saved
val_save_name (str): path to where validation data will be saved
max_pairs (int): max number of pairs for any one CUI added to the train
or validation splits
train_split (float): precentage of raw data to be added to train set split
headers (list): column lables within MRCONSO.RRF
"""
print("Loading training data file...")
df = pd.read_table(data_path, names=headers, index_col=False, delimiter='|')
train_file = open(train_save_name, 'w')
val_file = open(val_save_name, 'w')
cui = df["CUI"].iloc[0]
names = []
random.seed(2021)
for idx in tqdm(range(len(df))):
# Address incorrectly formatted data
if type(df["STR"].iloc[idx]) != str or "|" in df["STR"].iloc[idx]:
continue
# Collect all english concept strings matching the current CUI
if df["CUI"].iloc[idx] == cui and df["LAT"].iloc[idx] == "ENG":
concept_string = df["STR"].iloc[idx]
names.append(concept_string)
else:
# Pair off concept synonyms to make training and val sets
pairs = list(itertools.combinations(names, 2))
if len(pairs) == 0:
# Not enough concepts gathered to make a pair
cui = df["CUI"].iloc[idx]
names = [df["STR"].iloc[idx]]
continue
# Removing leading C to convert label string to int
cui = int(cui[1:])
random.shuffle(pairs)
# Keep up to max pairs number pairs for any one concept
for pair in pairs[:max_pairs]:
# Want concepts in train and val splits to be randomly selected and mutually exclusive
add_to_train = random.random()
if add_to_train <= train_split:
train_file.write(f'{cui}\t{pair[0]}\t{pair[1]}\n')
else:
val_file.write(f'{cui}\t{pair[0]}\t{pair[1]}\n')
# Switch to next concept
cui = df["CUI"].iloc[idx]
names = [df["STR"].iloc[idx]]
train_file.close()
val_file.close()
print("Finished making training and validation data")
def process_umls_index_dataset(data_path, data_savename, id2string_savename, headers):
"""
Generates data file needed to build a UMLS index and a hash table mapping each
CUI to one canonical concept string. Takes the raw .RRF data file and saves
a .tsv indec concept file as well as the a .pkl file of cui to concept string
mappings. Only data marked as English is added to the index data file.
Arguments:
data_path (str): path to MRCONSO.RRF UMLS data file
data_savename (str): path to where .tsv index data will be saved
id2string_savename (str): path to where .pkl cui to string mapping will
be saved
headers (list): column lables within MRCONSO.RRF
"""
print("Loading index data file...")
df = pd.read_table(data_path, names=headers, index_col=False, delimiter='|')
id2string = {}
with open(data_savename, "w") as outfile:
for idx, row in tqdm(df.iterrows(), total=df.shape[0]):
# Address incorrectly formatted data
if type(row["STR"]) != str or "|" in row["STR"]:
continue
cui = row["CUI"]
sent = row["STR"]
# Removing leading C to convert label string to int
cui = int(cui[1:])
# Only keeping english concepts
if row["LAT"] == "ENG":
outfile.write(f'{cui}\t{sent}\n')
# Matching each cui to one canonical string represention
if cui not in id2string and ":" not in sent:
id2string[cui] = sent
outfile.close()
pkl.dump(id2string, open(id2string_savename, "wb"))
print("Finished saving index data and id to concept mapping")
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--index", action="store_true", help="Whether to process data for building an index")
parser.add_argument("--project_dir", required=False, type=str, default=".")
parser.add_argument("--cfg", required=False, type=str, default="conf/umls_medical_entity_linking_config.yaml")
parser.add_argument(
"--max_pairs", required=False, type=int, default=50, help="Max number of train pairs for a single concepts"
)
parser.add_argument(
"--train_split", required=False, type=float, default=0.99, help="Precentage of data to add to train set"
)
args = parser.parse_args()
cfg = OmegaConf.load(args.cfg)
cfg.project_dir = args.project_dir
if args.index:
process_umls_index_dataset(cfg.index.raw_data, cfg.index.index_ds.data_file, cfg.index.id_to_string, HEADERS)
else:
process_umls_training_dataset(
cfg.model.raw_data,
cfg.model.train_ds.data_file,
cfg.model.validation_ds.data_file,
args.max_pairs,
args.train_split,
HEADERS,
)
| NeMo-main | examples/nlp/entity_linking/data/umls_dataset_processing.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.question_answering.qa_bert_model import BERTQAModel
from nemo.collections.nlp.models.question_answering.qa_gpt_model import GPTQAModel
from nemo.collections.nlp.models.question_answering.qa_s2s_model import S2SQAModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="qa_conf")
def main(cfg: DictConfig) -> None:
pl.seed_everything(42)
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_dir = exp_manager(trainer, cfg.get("exp_manager", None))
if "bert" in cfg.model.language_model.pretrained_model_name.lower():
model_class = BERTQAModel
elif "gpt" in cfg.model.language_model.pretrained_model_name.lower():
model_class = GPTQAModel
elif (
"bart" in cfg.model.language_model.pretrained_model_name.lower()
or "t5" in cfg.model.language_model.pretrained_model_name.lower()
):
model_class = S2SQAModel
if cfg.pretrained_model or (cfg.model.nemo_path and os.path.exists(cfg.model.nemo_path)):
if cfg.pretrained_model:
logging.info(f'Loading pretrained model {cfg.pretrained_model}')
model = model_class.from_pretrained(cfg.pretrained_model)
else:
logging.info(f'Restoring model from {cfg.model.nemo_path}')
model = model_class.restore_from(cfg.model.nemo_path)
if cfg.do_training:
model.setup_training_data(train_data_config=cfg.model.train_ds)
model.setup_multiple_validation_data(val_data_config=cfg.model.validation_ds)
else:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = model_class(cfg.model, trainer=trainer)
if cfg.do_training:
trainer.fit(model)
if cfg.model.nemo_path:
model.save_to(cfg.model.nemo_path)
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.file is not None:
eval_device = [cfg.trainer.devices[0]] if isinstance(cfg.trainer.devices, list) else 1
trainer = pl.Trainer(devices=eval_device, accelerator=cfg.trainer.accelerator, precision=16)
model.setup_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model)
# specifiy .json file to dump predictions. e.g. os.path.join(exp_dir, "output_nbest_file.json")
output_nbest_file = None
# specifiy .json file to dump predictions. e.g. os.path.join(exp_dir, "output_prediction_file.json")
output_prediction_file = None
inference_samples = 5 # for test purposes. To use entire inference dataset set to -1
all_preds, all_nbest = model.inference(
cfg.model.test_ds.file,
output_prediction_file=output_prediction_file,
output_nbest_file=output_nbest_file,
num_samples=inference_samples,
)
for question_id in all_preds:
print(all_preds[question_id])
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/question_answering/question_answering.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from ast import literal_eval
from tqdm import tqdm
def load_json(filepath):
with open(filepath, "r") as f:
data = json.load(f)
return data
def dump_json(filepath, data):
with open(filepath, "w") as f:
json.dump(data, f)
def get_context_from_passages(passages, keep_only_relevant_passages):
contexts = []
if keep_only_relevant_passages:
for passage in passages:
if passage["is_selected"] == 1:
contexts.append(passage["passage_text"])
else:
contexts = [passage["passage_text"] for passage in passages]
return " ".join(contexts)
def format_answers_into_squad_format(answers):
is_impossible = True if "No Answer Present." in answers else False
if is_impossible:
answers = []
else:
answers = [{"text": ans, "answer_start": -1} for ans in answers]
return answers
def convert_msmarco_to_squad_format(msmarco_data, args):
ids = list(msmarco_data["query"])
squad_data = {"data": [{"title": "MSMARCO", "paragraphs": []}], "version": "v2.1"}
for index, _id in enumerate(tqdm(ids)):
context = get_context_from_passages(msmarco_data["passages"][_id], args.keep_only_relevant_passages)
if not context:
continue
query = msmarco_data["query"][_id]
# use well formed answers if present, else use the 'answers' field
well_formed_answers = msmarco_data['wellFormedAnswers'][_id]
well_formed_answers = (
well_formed_answers if isinstance(well_formed_answers, list) else literal_eval(well_formed_answers)
)
answers = well_formed_answers if well_formed_answers else msmarco_data["answers"][_id]
answers = format_answers_into_squad_format(answers)
if args.exclude_negative_samples and (not answers):
continue
squad_data["data"][0]["paragraphs"].append(
{
"context": context,
"qas": [
{"id": index, "question": query, "answers": answers, "is_impossible": False if answers else True,}
],
}
)
return squad_data
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--msmarco_train_input_filepath", default=None, type=str, required=True)
parser.add_argument("--msmarco_dev_input_filepath", default=None, type=str, required=True)
parser.add_argument("--converted_train_save_path", default=None, type=str, required=True)
parser.add_argument("--converted_dev_save_path", default=None, type=str, required=True)
parser.add_argument(
"--exclude_negative_samples",
default=False,
type=bool,
help="whether to keep No Answer samples in the dataset",
required=False,
)
parser.add_argument(
"--keep_only_relevant_passages",
default=False,
type=bool,
help="if True, will only use passages with is_selected=True for context",
required=False,
)
args = parser.parse_args()
print("converting MS-MARCO train dataset...")
msmarco_train_data = load_json(args.msmarco_train_input_filepath)
squad_train_data = convert_msmarco_to_squad_format(msmarco_train_data, args)
dump_json(args.converted_train_save_path, squad_train_data)
print("converting MS-MARCO dev dataset...")
msmarco_dev_data = load_json(args.msmarco_dev_input_filepath)
squad_dev_data = convert_msmarco_to_squad_format(msmarco_dev_data, args)
dump_json(args.converted_dev_save_path, squad_dev_data)
if __name__ == "__main__":
"""
Please agree to the Terms of Use at:
https://microsoft.github.io/msmarco/
Download data at:
https://msmarco.blob.core.windows.net/msmarco/train_v2.1.json.gz
https://msmarco.blob.core.windows.net/msmarco/dev_v2.1.json.gz
Example usage:
python convert_msmarco_to_squad_format.py \
--msmarco_train_input_filepath=/path/to/msmarco_train_v2.1.json \
--msmarco_dev_input_filepath=/path/to/msmarco_dev_v2.1.json \
--converted_train_save_path=/path/to/msmarco_squad_format_train.json \
--converted_dev_save_path=/path/to/msmarco_squad_format_dev.json \
--exclude_negative_samples=False \
--keep_only_relevant_passages=False
"""
main()
| NeMo-main | examples/nlp/question_answering/convert_msmarco_to_squad_format.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import urllib.request
from nemo.utils import logging
class SquadDownloader:
def __init__(self, save_path):
self.save_path = save_path + '/squad'
if not os.path.exists(self.save_path):
os.makedirs(self.save_path)
if not os.path.exists(self.save_path + '/v1.1'):
os.makedirs(self.save_path + '/v1.1')
if not os.path.exists(self.save_path + '/v2.0'):
os.makedirs(self.save_path + '/v2.0')
self.download_urls = {
'https://rajpurkar.github.io/SQuAD-explorer' '/dataset/train-v1.1.json': 'v1.1/train-v1.1.json',
'https://rajpurkar.github.io/SQuAD-explorer' '/dataset/dev-v1.1.json': 'v1.1/dev-v1.1.json',
'https://rajpurkar.github.io/SQuAD-explorer' '/dataset/train-v2.0.json': 'v2.0/train-v2.0.json',
'https://rajpurkar.github.io/SQuAD-explorer' '/dataset/dev-v2.0.json': 'v2.0/dev-v2.0.json',
}
def download(self):
for item in self.download_urls:
url = item
file = self.download_urls[item]
logging.info('Downloading: %s', url)
if os.path.isfile(self.save_path + '/' + file):
logging.info('** Download file already exists, skipping download')
else:
response = urllib.request.urlopen(url)
with open(self.save_path + '/' + file, "wb") as handle:
handle.write(response.read())
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download Squad')
parser.add_argument(
'--destDir',
type=str,
required=False,
help='directory to store data',
default=os.path.split(os.path.abspath(__file__))[0],
)
args = parser.parse_args()
logging.info(args.destDir)
squad_dl = SquadDownloader(args.destDir)
squad_dl.download()
| NeMo-main | examples/nlp/question_answering/get_squad.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import IntentSlotClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="intent_slot_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params:\n {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
# initialize the model using the config file
model = IntentSlotClassificationModel(cfg.model, trainer=trainer)
# training
logging.info("================================================================================================")
logging.info('Starting training...')
trainer.fit(model)
logging.info('Training finished!')
# Stop further testing as fast_dev_run does not save checkpoints
if trainer.fast_dev_run:
return
# after model training is done, you can load the model from the saved checkpoint
# and evaluate it on a data file or on given queries.
logging.info("================================================================================================")
logging.info("Starting the testing of the trained model on test set...")
logging.info("We will load the latest model saved checkpoint from the training...")
# for evaluation and inference you can load the previously trained model saved in .nemo file
# like this in your code, but we will just reuse the trained model here
# eval_model = IntentSlotClassificationModel.restore_from(restore_path=checkpoint_path)
eval_model = model
# we will setup testing data reusing the same config (test section)
eval_model.update_data_dir_for_testing(data_dir=cfg.model.data_dir)
eval_model.setup_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model=eval_model, ckpt_path=None, verbose=False)
logging.info("Testing finished!")
# run an inference on a few examples
logging.info("======================================================================================")
logging.info("Evaluate the model on the given queries...")
# this will work well if you train the model on Assistant dataset
# for your own dataset change the examples appropriately
queries = [
'set alarm for seven thirty am',
'lower volume by fifty percent',
'what is my schedule for tomorrow',
]
pred_intents, pred_slots = eval_model.predict_from_examples(queries, cfg.model.test_ds)
logging.info('The prediction results of some sample queries with the trained model:')
for query, intent, slots in zip(queries, pred_intents, pred_slots):
logging.info(f'Query : {query}')
logging.info(f'Predicted Intent: {intent}')
logging.info(f'Predicted Slots: {slots}')
logging.info("Inference finished!")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/intent_slot_classification/intent_slot_classification.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Sample command to run the script:
python multi_label_intent_slot_classification.py \
model.data_dir=/home/user/multiatis \
model.validation_ds.prefix=dev \
model.test_ds.prefix=dev \
trainer.gpus=[0] \
+trainer.fast_dev_run=true \
exp_manager.exp_dir=checkpoints
fast_dev_run=false will save checkpoints for the model
"""
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import MultiLabelIntentSlotClassificationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="multi_label_intent_slot_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params:\n {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
# initialize the model using the config file
model = MultiLabelIntentSlotClassificationModel(cfg.model, trainer=trainer)
# training
logging.info("================================================================================================")
logging.info('Starting training...')
trainer.fit(model)
logging.info('Training finished!')
# Stop further testing as fast_dev_run does not save checkpoints
if trainer.fast_dev_run:
return
# after model training is done, you can load the model from the saved checkpoint
# and evaluate it on a data file or on given queries.
logging.info("================================================================================================")
logging.info("Starting the testing of the trained model on test set...")
logging.info("We will load the latest model saved checkpoint from the training...")
# for evaluation and inference you can load the previously trained model saved in .nemo file
# like this in your code, but we will just reuse the trained model here
# eval_model = MultiLabelIntentSlotClassificationModel.restore_from(restore_path=checkpoint_path)
eval_model = model
# we will setup testing data reusing the same config (test section)
eval_model.update_data_dir_for_testing(data_dir=cfg.model.data_dir)
eval_model.setup_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model=eval_model, ckpt_path=None, verbose=False)
logging.info("Testing finished!")
# Optimize Threshold
eval_model.optimize_threshold(cfg.model.test_ds, 'dev')
# run an inference on a few examples
logging.info("======================================================================================")
logging.info("Evaluate the model on the given queries...")
# this will work well if you train the model on ATIS dataset
# for your own dataset change the examples appropriately
queries = [
'i would like to find a flight from charlotte to las vegas that makes a stop in st. louis',
'on april first i need a ticket from tacoma to san jose departing before 7 am',
'how much is the limousine service in boston',
]
# We use the optimized threshold for predictions
pred_intents, pred_slots, pred_list = eval_model.predict_from_examples(queries, cfg.model.test_ds)
logging.info('The prediction results of some sample queries with the trained model:')
for query, intent, slots in zip(queries, pred_intents, pred_slots):
logging.info(f'Query : {query}')
logging.info(f'Predicted Intents: {intent}')
logging.info(f'Predicted Slots: {slots}')
logging.info("Inference finished!")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/intent_slot_classification/multi_label_intent_slot_classification.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script converts checkpoint .ckpt to .nemo file.
This script uses the `examples/nlp/spellchecking_asr_customization/conf/spellchecking_asr_customization_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'.
"""
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import SpellcheckingAsrCustomizationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="spellchecking_asr_customization_config")
def main(cfg: DictConfig) -> None:
logging.debug(f'Config Params: {OmegaConf.to_yaml(cfg)}')
SpellcheckingAsrCustomizationModel.load_from_checkpoint(cfg.checkpoint_path).save_to(cfg.target_nemo_path)
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/spellchecking_asr_customization/checkpoint_to_nemo.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train SpellMapper (SpellcheckingAsrCustomizationModel).
It uses the `examples/nlp/spellchecking_asr_customization/conf/spellchecking_asr_customization_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'. Probably it is worth looking
at the example config file to see the list of parameters used for training.
USAGE Example:
See `examples/nlp/spellchecking_asr_customization/run_training.sh` for training on non-tarred data.
and
`examples/nlp/spellchecking_asr_customization/run_training_tarred.sh` for training on tarred data.
One (non-tarred) training example should consist of 4 tab-separated columns:
1. text of ASR-hypothesis
2. texts of 10 candidates separated by semicolon
3. 1-based ids of correct candidates, or 0 if none
4. start/end coordinates of correct candidates (correspond to ids in third column)
Example (in one line):
a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o
d i d i e r _ s a u m o n;a s t r o n o m i e;t r i s t a n _ g u i l l o t;t r i s t e s s e;m o n a d e;c h r i s t i a n;a s t r o n o m e r;s o l o m o n;d i d i d i d i d i;m e r c y
1 3
CUSTOM 12 23;CUSTOM 28 41
"""
from helpers import MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="spellchecking_asr_customization_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params: {OmegaConf.to_yaml(cfg)}')
# Train the model
if cfg.model.do_training:
logging.info(
"================================================================================================"
)
logging.info('Start training...')
trainer, model = instantiate_model_and_trainer(cfg, MODEL, True)
spellchecking_exp_manager = cfg.get('exp_manager', None)
exp_manager(trainer, spellchecking_exp_manager)
trainer.fit(model)
logging.info('Training finished!')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/spellchecking_asr_customization/spellchecking_asr_customization_train.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to postprocess SpellMapper results and generate an updated nemo ASR manifest.
See "examples/nlp/spellchecking_asr_customization/run_infer.sh" for the whole inference pipeline.
"""
from argparse import ArgumentParser
from nemo.collections.nlp.data.spellchecking_asr_customization.utils import (
update_manifest_with_spellmapper_corrections,
)
parser = ArgumentParser(description="Postprocess SpellMapper results and generate an updated nemo ASR manifest")
parser.add_argument("--input_manifest", required=True, type=str, help="Path to input nemo ASR manifest")
parser.add_argument(
"--field_name", default="pred_text", type=str, help="Name of json field with original ASR hypothesis text"
)
parser.add_argument(
"--short2full_name",
required=True,
type=str,
help="Path to input file with correspondence between sentence fragments and full sentences",
)
parser.add_argument(
"--spellmapper_results", required=True, type=str, help="Path to input file with SpellMapper inference results"
)
parser.add_argument("--output_manifest", required=True, type=str, help="Path to output nemo ASR manifest")
parser.add_argument("--min_prob", default=0.5, type=float, help="Threshold on replacement probability")
parser.add_argument(
"--use_dp",
action="store_true",
help="Whether to use additional replacement filtering by using dynamic programming",
)
parser.add_argument(
"--replace_hyphen_to_space",
action="store_true",
help="Whether to use space instead of hyphen in replaced fragments",
)
parser.add_argument(
"--ngram_mappings", type=str, required=True, help="File with ngram mappings, only needed if use_dp=true"
)
parser.add_argument(
"--min_dp_score_per_symbol",
default=-1.5,
type=float,
help="Minimum dynamic programming sum score averaged by hypothesis length",
)
args = parser.parse_args()
update_manifest_with_spellmapper_corrections(
input_manifest_name=args.input_manifest,
short2full_name=args.short2full_name,
output_manifest_name=args.output_manifest,
spellmapper_results_name=args.spellmapper_results,
min_prob=args.min_prob,
replace_hyphen_to_space=args.replace_hyphen_to_space,
field_name=args.field_name,
use_dp=args.use_dp,
ngram_mappings=args.ngram_mappings,
min_dp_score_per_symbol=args.min_dp_score_per_symbol,
)
print("Resulting manifest saved to: ", args.output_manifest)
| NeMo-main | examples/nlp/spellchecking_asr_customization/postprocess_and_update_manifest.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to run inference with the SpellcheckingAsrCustomizationModel.
An input line should consist of 4 tab-separated columns:
1. text of ASR-hypothesis
2. texts of 10 candidates separated by semicolon
3. 1-based ids of non-dummy candidates
4. approximate start/end coordinates of non-dummy candidates (correspond to ids in third column)
Example input (in one line):
t h e _ t a r a s i c _ o o r d a _ i s _ a _ p a r t _ o f _ t h e _ a o r t a _ l o c a t e d _ i n _ t h e _ t h o r a x
h e p a t i c _ c i r r h o s i s;u r a c i l;c a r d i a c _ a r r e s t;w e a n;a p g a r;p s y c h o m o t o r;t h o r a x;t h o r a c i c _ a o r t a;a v f;b l o c k a d e d
1 2 6 7 8 9 10
CUSTOM 6 23;CUSTOM 4 10;CUSTOM 4 15;CUSTOM 56 62;CUSTOM 5 19;CUSTOM 28 31;CUSTOM 39 48
Each line in SpellMapper output is tab-separated and consists of 4 columns:
1. ASR-hypothesis (same as in input)
2. 10 candidates separated with semicolon (same as in input)
3. fragment predictions, separated with semicolon, each prediction is a tuple (start, end, candidate_id, probability)
4. letter predictions - candidate_id predicted for each letter (this is only for debug purposes)
Example output (in one line):
t h e _ t a r a s i c _ o o r d a _ i s _ a _ p a r t _ o f _ t h e _ a o r t a _ l o c a t e d _ i n _ t h e _ t h o r a x
h e p a t i c _ c i r r h o s i s;u r a c i l;c a r d i a c _ a r r e s t;w e a n;a p g a r;p s y c h o m o t o r;t h o r a x;t h o r a c i c _ a o r t a;a v f;b l o c k a d e d
56 62 7 0.99998;4 20 8 0.95181;12 20 8 0.44829;4 17 8 0.99464;12 17 8 0.97645
8 8 8 0 8 8 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 7 7 7 7 7 7
USAGE Example:
1. Train a model, or use a pretrained checkpoint.
2. Run on a single file:
python nemo/examples/nlp/spellchecking_asr_customization/spellchecking_asr_customization_infer.py \
pretrained_model=${PRETRAINED_NEMO_CHECKPOINT} \
model.max_sequence_len=512 \
inference.from_file=input.txt \
inference.out_file=output.txt \
inference.batch_size=16 \
lang=en
or on multiple files:
python ${NEMO_PATH}/examples/nlp/spellchecking_asr_customization/spellchecking_asr_customization_infer.py \
pretrained_model=${PRETRAINED_NEMO_CHECKPOINT} \
model.max_sequence_len=512 \
+inference.from_filelist=filelist.txt \
+inference.output_folder=output_folder \
inference.batch_size=16 \
lang=en
This script uses the `/examples/nlp/spellchecking_asr_customization/conf/spellchecking_asr_customization_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'.
"""
import os
from helpers import MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="spellchecking_asr_customization_config")
def main(cfg: DictConfig) -> None:
logging.debug(f'Config Params: {OmegaConf.to_yaml(cfg)}')
if cfg.pretrained_model is None:
raise ValueError("A pre-trained model should be provided.")
_, model = instantiate_model_and_trainer(cfg, MODEL, False)
if cfg.model.max_sequence_len != model.max_sequence_len:
model.max_sequence_len = cfg.model.max_sequence_len
model.builder._max_seq_length = cfg.model.max_sequence_len
input_filenames = []
output_filenames = []
if "from_filelist" in cfg.inference and "output_folder" in cfg.inference:
filelist_file = cfg.inference.from_filelist
output_folder = cfg.inference.output_folder
with open(filelist_file, "r", encoding="utf-8") as f:
for line in f:
path = line.strip()
input_filenames.append(path)
folder, name = os.path.split(path)
output_filenames.append(os.path.join(output_folder, name))
else:
text_file = cfg.inference.from_file
logging.info(f"Running inference on {text_file}...")
if not os.path.exists(text_file):
raise ValueError(f"{text_file} not found.")
input_filenames.append(text_file)
output_filenames.append(cfg.inference.out_file)
dataloader_cfg = {
"batch_size": cfg.inference.get("batch_size", 8),
"num_workers": cfg.inference.get("num_workers", 4),
"pin_memory": cfg.inference.get("num_workers", False),
}
for input_filename, output_filename in zip(input_filenames, output_filenames):
if not os.path.exists(input_filename):
logging.info(f"Skip non-existing {input_filename}.")
continue
model.infer(dataloader_cfg, input_filename, output_filename)
logging.info(f"Predictions saved to {output_filename}.")
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/spellchecking_asr_customization/spellchecking_asr_customization_infer.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to prepare input for SpellMapper inference from a nemo ASR manifest.
It splits sentences to shorter fragments, runs candidate retrieval and generates input in the required format.
It produces two output files:
1. File with correspondence between sentence fragments and full sentences.
2. File that will serve as input for SpellMapper inference.
See "examples/nlp/spellchecking_asr_customization/run_infer.sh" for the whole inference pipeline.
"""
from argparse import ArgumentParser
from nemo.collections.nlp.data.spellchecking_asr_customization.utils import (
extract_and_split_text_from_manifest,
get_candidates,
load_index,
)
parser = ArgumentParser(description="Prepare input for SpellMapper inference from a nemo ASR manifest")
parser.add_argument("--manifest", required=True, type=str, help="Path to input manifest file")
parser.add_argument(
"--custom_vocab_index", required=True, type=str, help="Path to input file with custom vocabulary index"
)
parser.add_argument(
"--big_sample",
required=True,
type=str,
help="Path to input file with big sample of phrases to sample dummy candidates if there less than 10 are found by retrieval",
)
parser.add_argument(
"--short2full_name",
required=True,
type=str,
help="Path to output file with correspondence between sentence fragments and full sentences",
)
parser.add_argument(
"--output_name",
required=True,
type=str,
help="Path to output file that will serve as input for SpellMapper inference",
)
parser.add_argument("--field_name", default="pred_text", type=str, help="Name of json field with ASR hypothesis text")
parser.add_argument("--len_in_words", default=16, type=int, help="Maximum fragment length in words")
parser.add_argument(
"--step_in_words",
default=8,
type=int,
help="Step in words for moving to next fragment. If less than len_in_words, fragments will intersect",
)
args = parser.parse_args()
# Split ASR hypotheses to shorter fragments, because SpellMapper can't handle arbitrarily long sequences.
# The correspondence between short and original fragments is saved to a file and will be used at post-processing.
extract_and_split_text_from_manifest(
input_name=args.manifest,
output_name=args.short2full_name,
field_name=args.field_name,
len_in_words=args.len_in_words,
step_in_words=args.step_in_words,
)
# Load index of custom vocabulary from file
phrases, ngram2phrases = load_index(args.custom_vocab_index)
# Load big sample of phrases to sample dummy candidates if there less than 10 are found by retrieval
big_sample_of_phrases = set()
with open(args.big_sample, "r", encoding="utf-8") as f:
for line in f:
phrase, freq = line.strip().split("\t")
if int(freq) > 50: # do not want to use frequent phrases as dummy candidates
continue
if len(phrase) < 6 or len(phrase) > 15: # do not want to use too short or too long phrases as dummy candidates
continue
big_sample_of_phrases.add(phrase)
big_sample_of_phrases = list(big_sample_of_phrases)
# Generate input for SpellMapper inference
out = open(args.output_name, "w", encoding="utf-8")
with open(args.short2full_name, "r", encoding="utf-8") as f:
for line in f:
short_sent, _ = line.strip().split("\t")
sent = "_".join(short_sent.split())
letters = list(sent)
candidates = get_candidates(ngram2phrases, phrases, letters, big_sample_of_phrases)
if len(candidates) == 0:
continue
if len(candidates) != 10:
raise ValueError("expect 10 candidates, got: ", len(candidates))
# We add two columns with targets and span_info.
# They have same format as during training, but start and end positions are APPROXIMATE, they will be adjusted when constructing BertExample.
targets = []
span_info = []
for idx, c in enumerate(candidates):
if c[1] == -1:
continue
targets.append(str(idx + 1)) # targets are 1-based
start = c[1]
# ensure that end is not outside sentence length (it can happen because c[2] is candidate length used as approximation)
end = min(c[1] + c[2], len(letters))
span_info.append("CUSTOM " + str(start) + " " + str(end))
out.write(
" ".join(letters)
+ "\t"
+ ";".join([x[0] for x in candidates])
+ "\t"
+ " ".join(targets)
+ "\t"
+ ";".join(span_info)
+ "\n"
)
out.close()
| NeMo-main | examples/nlp/spellchecking_asr_customization/prepare_input_from_manifest.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to create an index of custom vocabulary and save it to file.
See "examples/nlp/spellchecking_asr_customization/run_infer.sh" for the whole inference pipeline.
"""
from argparse import ArgumentParser
from nemo.collections.nlp.data.spellchecking_asr_customization.utils import get_index, load_ngram_mappings
parser = ArgumentParser(description="Create an index of custom vocabulary and save it to file")
parser.add_argument(
"--input_name", required=True, type=str, help="Path to input file with custom vocabulary (plain text)"
)
parser.add_argument(
"--ngram_mappings", required=True, type=str, help="Path to input file with n-gram mapping vocabulary"
)
parser.add_argument("--output_name", required=True, type=str, help="Path to output file with custom vocabulary index")
parser.add_argument("--min_log_prob", default=-4.0, type=float, help="Threshold on log probability")
parser.add_argument(
"--max_phrases_per_ngram",
default=500,
type=int,
help="Threshold on number of phrases that can be stored for one n-gram key in index. Keys with more phrases are discarded.",
)
parser.add_argument(
"--max_misspelled_freq", default=125000, type=int, help="Threshold on maximum frequency of misspelled n-gram"
)
args = parser.parse_args()
# Load custom vocabulary
custom_phrases = set()
with open(args.input_name, "r", encoding="utf-8") as f:
for line in f:
phrase = line.strip()
custom_phrases.add(" ".join(list(phrase.replace(" ", "_"))))
print("Size of customization vocabulary:", len(custom_phrases))
# Load n-gram mappings vocabulary
ngram_mapping_vocab, ban_ngram = load_ngram_mappings(args.ngram_mappings, max_misspelled_freq=args.max_misspelled_freq)
# Generate index of custom phrases
phrases, ngram2phrases = get_index(
custom_phrases,
ngram_mapping_vocab,
ban_ngram,
min_log_prob=args.min_log_prob,
max_phrases_per_ngram=args.max_phrases_per_ngram,
)
# Save index to file
with open(args.output_name, "w", encoding="utf-8") as out:
for ngram in ngram2phrases:
for phrase_id, begin, size, logprob in ngram2phrases[ngram]:
phrase = phrases[phrase_id]
out.write(ngram + "\t" + phrase + "\t" + str(begin) + "\t" + str(size) + "\t" + str(logprob) + "\n")
| NeMo-main | examples/nlp/spellchecking_asr_customization/create_custom_vocab_index.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Tuple
import pytorch_lightning as pl
from omegaconf import DictConfig
from nemo.collections.nlp.models import SpellcheckingAsrCustomizationModel
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.utils import logging
__all__ = ["MODEL", "MODEL_NAMES", "instantiate_model_and_trainer"]
MODEL = "spellchecking"
MODEL_NAMES = [MODEL]
def instantiate_model_and_trainer(
cfg: DictConfig, model_name: str, do_training: bool
) -> Tuple[pl.Trainer, SpellcheckingAsrCustomizationModel]:
""" Function for instantiating a model and a trainer
Args:
cfg: The config used to instantiate the model and the trainer.
model_name: A str indicates the model direction, currently only 'itn'.
do_training: A boolean flag indicates whether the model will be trained or evaluated.
Returns:
trainer: A PyTorch Lightning trainer
model: A SpellcheckingAsrCustomizationModel
"""
if model_name not in MODEL_NAMES:
raise ValueError(f"{model_name} is unknown model type")
# Get configs for the corresponding models
trainer_cfg = cfg.get("trainer")
model_cfg = cfg.get("model")
pretrained_cfg = cfg.get("pretrained_model", None)
trainer = pl.Trainer(**trainer_cfg)
if not pretrained_cfg:
logging.info(f"Initializing {model_name} model")
if model_name == MODEL:
model = SpellcheckingAsrCustomizationModel(model_cfg, trainer=trainer)
else:
raise ValueError(f"{model_name} is unknown model type")
elif os.path.exists(pretrained_cfg):
logging.info(f"Restoring pretrained {model_name} model from {pretrained_cfg}")
save_restore_connector = NLPSaveRestoreConnector()
model = SpellcheckingAsrCustomizationModel.restore_from(
pretrained_cfg, save_restore_connector=save_restore_connector
)
else:
logging.info(f"Loading pretrained model {pretrained_cfg}")
if model_name == MODEL:
if pretrained_cfg not in SpellcheckingAsrCustomizationModel.get_available_model_names():
raise (
ValueError(
f"{pretrained_cfg} not in the list of available Tagger models."
f"Select from {SpellcheckingAsrCustomizationModel.list_available_models()}"
)
)
model = SpellcheckingAsrCustomizationModel.from_pretrained(pretrained_cfg)
else:
raise ValueError(f"{model_name} is unknown model type")
# Setup train and validation data
if do_training:
model.setup_training_data(train_data_config=cfg.data.train_ds)
model.setup_validation_data(val_data_config=cfg.data.validation_ds)
logging.info(f"Model {model_name} -- Device {model.device}")
return trainer, model
| NeMo-main | examples/nlp/spellchecking_asr_customization/helpers.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to create a tarred dataset for SpellcheckingAsrCustomizationModel.
This script uses the `/examples/nlp/spellchecking_asr_customization/conf/spellchecking_asr_customization_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'. Probably it is worth looking
at the example config file to see the list of parameters used for training.
USAGE Example:
1. Obtain a processed dataset
2. Run:
python ${NEMO_PATH}/examples/nlp/spellchecking_asr_customization/create_tarred_dataset.py \
lang=${LANG} \
data.train_ds.data_path=${DATA_PATH}/train.tsv \
model.language_model.pretrained_model_name=${LANGUAGE_MODEL} \
model.label_map=${DATA_PATH}/label_map.txt \
+output_tar_file=tarred/part1.tar \
+take_first_n_lines=100000
"""
import pickle
import tarfile
from io import BytesIO
from helpers import MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="spellchecking_asr_customization_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params: {OmegaConf.to_yaml(cfg)}')
logging.info("Start creating tar file from " + cfg.data.train_ds.data_path + " ...")
_, model = instantiate_model_and_trainer(
cfg, MODEL, True
) # instantiate model like for training because we may not have pretrained model
dataset = model._train_dl.dataset
archive = tarfile.open(cfg.output_tar_file, mode="w")
max_lines = int(cfg.take_first_n_lines)
for i in range(len(dataset)):
if i >= max_lines:
logging.info("Reached " + str(max_lines) + " examples")
break
(
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
) = dataset[i]
# do not store masks as they are just arrays of 1
content = {
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
"input_ids_for_subwords": input_ids_for_subwords,
"input_mask_for_subwords": input_mask_for_subwords,
"segment_ids_for_subwords": segment_ids_for_subwords,
"character_pos_to_subword_pos": character_pos_to_subword_pos,
"labels_mask": labels_mask,
"labels": labels,
"spans": spans,
}
b = BytesIO()
pickle.dump(content, b)
b.seek(0)
tarinfo = tarfile.TarInfo(name="example_" + str(i) + ".pkl")
tarinfo.size = b.getbuffer().nbytes
archive.addfile(tarinfo=tarinfo, fileobj=b)
archive.close()
logging.info("Tar file " + cfg.output_tar_file + " created!")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/spellchecking_asr_customization/create_tarred_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given NMT model's .nemo file(s), this script can be used to translate text.
USAGE Example:
1. Obtain text file in src language. You can use sacrebleu to obtain standard test sets like so:
sacrebleu -t wmt14 -l de-en --echo src > wmt14-de-en.src
2. Translate:
python nmt_transformer_infer.py --model=[Path to .nemo file(s)] --srctext=wmt14-de-en.src --tgtout=wmt14-de-en.pre
"""
import json
from argparse import ArgumentParser
import torch
import nemo.collections.nlp as nemo_nlp
from nemo.collections.nlp.modules.common.transformer import (
BeamSearchSequenceGenerator,
BeamSearchSequenceGeneratorWithLanguageModel,
EnsembleBeamSearchSequenceGenerator,
)
from nemo.utils import logging
def translate_text(
models, args, src_text, tgt_text, tgt_text_all, src_texts, all_scores, all_timing, ensemble_generator
):
if len(models) > 1:
src_ids, src_mask = models[0].prepare_inference_batch(src_text)
best_translations = ensemble_generator(src_ids, src_mask, return_beam_scores=args.write_scores)
if args.write_scores:
all_results, scores, best_translations = (
best_translations[0],
best_translations[1],
best_translations[2],
)
scores = scores.view(-1).data.cpu().numpy().tolist()
all_scores += scores
src_texts += [item for item in src_text for i in range(args.beam_size)]
all_results = models[0].ids_to_postprocessed_text(
all_results, models[0].decoder_tokenizer, models[0].target_processor
)
tgt_text_all += all_results
best_translations = models[0].ids_to_postprocessed_text(
best_translations, models[0].decoder_tokenizer, models[0].target_processor
)
tgt_text += best_translations
else:
model = models[0]
best_translations = model.translate(
text=src_text,
source_lang=args.source_lang,
target_lang=args.target_lang,
return_beam_scores=args.write_scores,
log_timing=args.write_timing,
)
if args.write_timing:
*best_translations, timing_dict = best_translations
all_timing.append(timing_dict)
else:
best_translations = (best_translations,)
if args.write_scores:
all_results, scores, best_translations = (
best_translations[0],
best_translations[1],
best_translations[2],
)
all_scores += scores
src_texts += [item for item in src_text for i in range(args.beam_size)]
tgt_text_all += all_results
else:
best_translations = best_translations[0]
tgt_text += best_translations
print(f"Translated {len(tgt_text)} sentences")
def main():
parser = ArgumentParser()
parser.add_argument(
"--model",
type=str,
required=True,
help="Path to .nemo model file(s). If ensembling, provide comma separated paths to multiple models.",
)
parser.add_argument("--srctext", type=str, required=True, help="Path to the file to translate.")
parser.add_argument(
"--tgtout", type=str, required=True, help="Path to the file where translations are to be written."
)
parser.add_argument(
"--batch_size", type=int, default=256, help="Number of sentences to batch together while translatiing."
)
parser.add_argument("--beam_size", type=int, default=4, help="Beam size.")
parser.add_argument(
"--len_pen", type=float, default=0.6, help="Length Penalty. Ref: https://arxiv.org/abs/1609.08144"
)
parser.add_argument(
"--max_delta_length",
type=int,
default=5,
help="Stop generating if target sequence length exceeds source length by this number.",
)
parser.add_argument(
"--target_lang",
type=str,
default=None,
help="Target language identifier ex: en,de,fr,es etc. If both `--target_lang` and `--source_lang` are "
"not set, then target language processing will be done the same way as during model training. If "
"`--target_lang` parameter is not set but `--source_lang` parameter is set, then target language "
"processing will not be performed. If `--target_lang` equals 'ignore', then target language processing "
"will not be performed regardless of value of `--source_lang` parameter.",
)
parser.add_argument(
"--source_lang",
type=str,
default=None,
help="Source language identifier ex: en,de,fr,es etc. If both `--target_lang` and `--source_lang` are "
"not set, then source language processing will be done the same way as during model training. If "
"`--source_lang` parameter is not set but `--target_lang` parameter is set, then source language "
"processing will not be performed. If `--source_lang` equals 'ignore', then source language processing "
"will not be performed regardless of value of `--target_lang` parameter.",
)
parser.add_argument(
"--write_scores",
action="store_true",
help="Whether to write a separate file with scores not including length penalties corresponding to each beam hypothesis (.score suffix)",
)
parser.add_argument(
"--write_timing",
action="store_true",
help="Whether to write a separate file with detailed timing info (.timing.json suffix)",
)
# shallow fusion specific parameters
parser.add_argument(
"--lm_model",
type=str,
default=None,
help="Optional path to an LM model that has the same tokenizer as NMT models for shallow fuison. Note: If using --write_scores, it will add LM scores as well.",
)
parser.add_argument(
"--fusion_coef", type=float, default=0.07, help="Weight assigned to LM scores during shallow fusion."
)
args = parser.parse_args()
torch.set_grad_enabled(False)
logging.info("Attempting to initialize from .nemo file")
models = []
for model_path in args.model.split(','):
if not model_path.endswith('.nemo'):
raise NotImplementedError(f"Only support .nemo files, but got: {model_path}")
model = nemo_nlp.models.machine_translation.MTEncDecModel.restore_from(restore_path=model_path).eval()
models.append(model)
if (len(models) > 1) and (args.write_timing):
raise RuntimeError("Cannot measure timing when more than 1 model is used")
src_text = []
tgt_text = []
tgt_text_all = []
src_texts = []
all_scores = []
all_timing = []
if torch.cuda.is_available():
models = [model.cuda() for model in models]
if args.lm_model is not None:
lm_model = nemo_nlp.models.language_modeling.TransformerLMModel.restore_from(restore_path=args.lm_model).eval()
else:
lm_model = None
if len(models) > 1:
ensemble_generator = EnsembleBeamSearchSequenceGenerator(
encoders=[model.encoder for model in models],
embeddings=[model.decoder.embedding for model in models],
decoders=[model.decoder.decoder for model in models],
log_softmaxes=[model.log_softmax for model in models],
max_sequence_length=512,
beam_size=args.beam_size,
bos=models[0].decoder_tokenizer.bos_id,
pad=models[0].decoder_tokenizer.pad_id,
eos=models[0].decoder_tokenizer.eos_id,
len_pen=args.len_pen,
max_delta_length=args.max_delta_length,
language_model=lm_model,
fusion_coef=args.fusion_coef,
)
else:
model = models[0]
ensemble_generator = None
if lm_model is not None:
model.beam_search = BeamSearchSequenceGeneratorWithLanguageModel(
embedding=model.decoder.embedding,
decoder=model.decoder.decoder,
log_softmax=model.log_softmax,
bos=model.decoder_tokenizer.bos_id,
pad=model.decoder_tokenizer.pad_id,
eos=model.decoder_tokenizer.eos_id,
language_model=lm_model,
fusion_coef=args.fusion_coef,
max_sequence_length=model.decoder.max_sequence_length,
beam_size=args.beam_size,
len_pen=args.len_pen,
max_delta_length=args.max_delta_length,
)
else:
model.beam_search = BeamSearchSequenceGenerator(
embedding=model.decoder.embedding,
decoder=model.decoder.decoder,
log_softmax=model.log_softmax,
bos=model.decoder_tokenizer.bos_id,
pad=model.decoder_tokenizer.pad_id,
eos=model.decoder_tokenizer.eos_id,
max_sequence_length=model.decoder.max_sequence_length,
beam_size=args.beam_size,
len_pen=args.len_pen,
max_delta_length=args.max_delta_length,
)
logging.info(f"Translating: {args.srctext}")
with open(args.srctext, 'r') as src_f:
for line in src_f:
src_text.append(line.strip())
if len(src_text) == args.batch_size:
# warmup when measuring timing
if args.write_timing and (not all_timing):
print("running a warmup batch")
translate_text(
models=models,
args=args,
src_text=src_text,
tgt_text=[],
tgt_text_all=[],
src_texts=[],
all_scores=[],
all_timing=[],
ensemble_generator=ensemble_generator,
)
translate_text(
models=models,
args=args,
src_text=src_text,
tgt_text=tgt_text,
tgt_text_all=tgt_text_all,
src_texts=src_texts,
all_scores=all_scores,
all_timing=all_timing,
ensemble_generator=ensemble_generator,
)
src_text = []
if len(src_text) > 0:
translate_text(
models=models,
args=args,
src_text=src_text,
tgt_text=tgt_text,
tgt_text_all=tgt_text_all,
src_texts=src_texts,
all_scores=all_scores,
all_timing=all_timing,
ensemble_generator=ensemble_generator,
)
with open(args.tgtout, 'w') as tgt_f:
for line in tgt_text:
tgt_f.write(line + "\n")
if args.write_scores:
with open(args.tgtout + '.score', 'w') as tgt_f_scores:
for line, score, inp in zip(tgt_text_all, all_scores, src_texts):
tgt_f_scores.write(inp + "\t" + line + "\t" + str(score) + "\n")
if args.write_timing:
# collect list of dicts to a dict of lists
timing_dict = {}
if len(all_timing):
for k in all_timing[0].keys():
timing_dict[k] = [t[k] for t in all_timing]
with open(args.tgtout + '.timing.json', 'w') as timing_fh:
json.dump(timing_dict, timing_fh)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/machine_translation/nmt_transformer_infer.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nemo.collections.nlp.data.machine_translation.preproc_mt_data import MTDataPreproc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NMT dataset pre-processing')
parser.add_argument(
'--tokenizer_name', type=str, default='yttm', help='Supports yttm, sentencepiece and HuggingFace tokenizers',
)
parser.add_argument('--tokenizer_model', type=str, default=None, help='Path to tokenizer model')
parser.add_argument('--bpe_droput', type=float, default=0.0, help='BPE dropout to use')
parser.add_argument('--clean', action="store_true", help='Whether to clean dataset based on length diff')
parser.add_argument('--pkl_file_prefix', type=str, default='parallel', help='Prefix for tar and pickle files')
parser.add_argument('--fname', type=str, required=True, help='Path to monolingual data file')
parser.add_argument('--out_dir', type=str, required=True, help='Path to store dataloader and tokenizer models')
parser.add_argument('--max_seq_length', type=int, default=512, help='Max Sequence Length')
parser.add_argument('--min_seq_length', type=int, default=1, help='Min Sequence Length')
parser.add_argument('--tokens_in_batch', type=int, default=16000, help='# Tokens per batch per GPU')
parser.add_argument(
'--lines_per_dataset_fragment',
type=int,
default=1000000,
help='Number of lines to consider for bucketing and padding',
)
parser.add_argument(
'--num_batches_per_tarfile',
type=int,
default=1000,
help='Number of batches (pickle files) within each tarfile',
)
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
if args.tokenizer_name in ["yttm", "sentencepiece"] and not os.path.exists(args.tokenizer_model):
assert FileNotFoundError("Could not find tokenizer model %s" % (args.tokenizer))
tokenizer_model = MTDataPreproc.get_monolingual_tokenizer(
tokenizer_name=args.tokenizer_name, tokenizer_model=args.tokenizer_model, bpe_dropout=args.bpe_droput
)
MTDataPreproc.preprocess_monolingual_dataset(
clean=args.clean,
fname=args.fname,
out_dir=args.out_dir,
tokenizer=tokenizer_model,
max_seq_length=args.max_seq_length,
min_seq_length=args.min_seq_length,
tokens_in_batch=args.tokens_in_batch,
lines_per_dataset_fragment=args.lines_per_dataset_fragment,
num_batches_per_tarfile=args.num_batches_per_tarfile,
pkl_file_prefix=args.pkl_file_prefix,
global_rank=0,
world_size=1,
)
| NeMo-main | examples/nlp/machine_translation/create_tarred_monolingual_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.multiprocessing as mp
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import ModelSummary
from pytorch_lightning.plugins.environments import TorchElasticEnvironment
from pytorch_lightning.trainer.connectors.checkpoint_connector import _CheckpointConnector
from nemo.collections.nlp.models.language_modeling.megatron_bart_model import MegatronBARTModel
from nemo.collections.nlp.models.language_modeling.megatron_t5_model import MegatronT5Model
from nemo.collections.nlp.models.machine_translation.megatron_nmt_model import MegatronNMTModel
from nemo.collections.nlp.parts.nlp_overrides import (
GradScaler,
MegatronHalfPrecisionPlugin,
NLPDDPStrategy,
NLPSaveRestoreConnector,
PipelineMixedPrecisionPlugin,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
mp.set_start_method("spawn", force=True)
@hydra_runner(config_path="conf", config_name="aayn_base_megatron")
def main(cfg) -> None:
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'\n{OmegaConf.to_yaml(cfg)}')
megatron_amp_o2 = cfg.model.get('megatron_amp_O2', False)
plugins = []
strategy = NLPDDPStrategy(
no_ddp_communication_hook=True,
gradient_as_bucket_view=cfg.model.gradient_as_bucket_view,
find_unused_parameters=False,
)
if cfg.trainer.precision in [16, '16', 'bf16', '16-mixed', 'bf16-mixed']:
scaler = None
if cfg.trainer.precision in [16, '16', '16-mixed']:
scaler = GradScaler(
init_scale=cfg.model.get('native_amp_init_scale', 2 ** 32),
growth_interval=cfg.model.get('native_amp_growth_interval', 1000),
hysteresis=cfg.model.get('hysteresis', 2),
)
# MixedPrecisionPlugin in PTL >= 2.0 requires precision to be 16-mixed or bf16-mixed
plugin_precision = '16-mixed'
else:
plugin_precision = 'bf16-mixed'
if megatron_amp_o2:
plugins.append(MegatronHalfPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
else:
plugins.append(PipelineMixedPrecisionPlugin(precision=plugin_precision, device='cuda', scaler=scaler))
if cfg.get('cluster_type', None) == 'BCP':
plugins.append(TorchElasticEnvironment())
trainer = Trainer(plugins=plugins, strategy=strategy, **cfg.trainer, callbacks=[ModelSummary(max_depth=3)])
exp_manager(trainer, cfg.exp_manager)
# update resume from checkpoint found by exp_manager
if cfg.model.resume_from_checkpoint is not None:
trainer.ckpt_path = cfg.model.resume_from_checkpoint
logging.info(f'Resuming training from checkpoint: {trainer.ckpt_path}')
trainer._checkpoint_connector = _CheckpointConnector(trainer)
if hasattr(cfg.model, 'pretrained_model_path') and cfg.model.pretrained_model_path is not None:
if not hasattr(cfg.model, 'pretrained_model_type'):
raise ValueError(f"Pretrained model type must be in [T5, BART].")
assert cfg.model.pretrained_model_type in ['T5', 'BART']
if cfg.model.pretrained_model_type == 'T5':
pretrained_cfg = MegatronT5Model.restore_from(
cfg.model.pretrained_model_path, trainer=trainer, return_config=True
)
else:
pretrained_cfg = MegatronBARTModel.restore_from(
cfg.model.pretrained_model_path, trainer=trainer, return_config=True
)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.masked_softmax_fusion = False
# Set source and target language/multilingual
pretrained_cfg.src_language = cfg.model.src_language
pretrained_cfg.tgt_language = cfg.model.tgt_language
pretrained_cfg.multilingual = cfg.model.multilingual
pretrained_cfg.shared_tokenizer = True
# Max generation delta
pretrained_cfg.max_generation_delta = cfg.model.max_generation_delta
# Set label smoothing
pretrained_cfg.label_smoothing = cfg.model.label_smoothing
# Set tokenizer paths:
pretrained_cfg.encoder_tokenizer = pretrained_cfg.tokenizer
pretrained_cfg.decoder_tokenizer = pretrained_cfg.tokenizer
# Pre-trained models should use the legacy sentencepiece tokenizer ex: mT5
pretrained_cfg.encoder_tokenizer.sentencepiece_legacy = True
pretrained_cfg.decoder_tokenizer.sentencepiece_legacy = True
# Override dropout
# Old pre-trained checkpoints do not have separate encoder/decoder configurations, so replicate the config to encoder/decoder.
if not hasattr(pretrained_cfg, 'encoder'):
assert not hasattr(pretrained_cfg, 'decoder')
logging.warning(
"No separate configuration for encoder, found in pretrained model, using encoder dropout settings everywhere."
)
pretrained_cfg.hidden_dropout = cfg.model.encoder.hidden_dropout
pretrained_cfg.attention_dropout = cfg.model.encoder.attention_dropout
else:
assert hasattr(pretrained_cfg, 'decoder') and hasattr(pretrained_cfg, 'encoder')
pretrained_cfg.encoder.hidden_dropout = cfg.model.encoder.hidden_dropout
pretrained_cfg.encoder.attention_dropout = cfg.model.encoder.attention_dropout
pretrained_cfg.decoder.hidden_dropout = cfg.model.decoder.hidden_dropout
pretrained_cfg.decoder.attention_dropout = cfg.model.decoder.attention_dropout
# Override precision
pretrained_cfg.precision = trainer.precision # Set above from trainer.precision
# Override micro/global batch
pretrained_cfg.micro_batch_size = cfg.model.micro_batch_size
pretrained_cfg.global_batch_size = cfg.model.global_batch_size
# O2 AMP
pretrained_cfg.megatron_amp_O2 = cfg.model.get('megatron_amp_O2', False)
# Override data and global/micro batch size.
pretrained_cfg.train_ds = cfg.model.train_ds
pretrained_cfg.train_ds.micro_batch_size = cfg.model.micro_batch_size
pretrained_cfg.train_ds.global_batch_size = cfg.model.global_batch_size
if hasattr(cfg.model, 'validation_ds'):
pretrained_cfg.validation_ds = cfg.model.validation_ds
else:
raise AttributeError(f"No validation dataset found in config.")
if hasattr(cfg.model, 'test_ds'):
pretrained_cfg.test_ds = cfg.model.test_ds
# Class target for the new class being restored.
pretrained_cfg.target = (
"nemo.collections.nlp.models.machine_translation.megatron_nmt_model.MegatronNMTModel"
)
# Optimizer overrides.
pretrained_cfg.optim = cfg.model.optim
model = MegatronNMTModel.restore_from(
cfg.model.pretrained_model_path,
trainer=trainer,
override_config_path=pretrained_cfg,
save_restore_connector=NLPSaveRestoreConnector(),
)
else:
model = MegatronNMTModel(cfg.model, trainer)
trainer.fit(model)
trainer.validate(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/machine_translation/megatron_nmt_training.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from nemo.collections.nlp.data.machine_translation.preproc_mt_data import MTDataPreproc
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='NMT dataset pre-processing')
parser.add_argument('--shared_tokenizer', action="store_true", help='Whether to share encoder/decoder tokenizers')
parser.add_argument('--clean', action="store_true", help='Whether to clean dataset based on length diff')
parser.add_argument('--tar_file_prefix', type=str, default='parallel', help='Prefix for tar files')
parser.add_argument('--src_fname', type=str, required=True, help='Path to the source file')
parser.add_argument('--tgt_fname', type=str, required=True, help='Path to the target file')
parser.add_argument('--out_dir', type=str, required=True, help='Path to store dataloader and tokenizer models')
parser.add_argument('--encoder_model_name', type=str, default=None, help='For use with pretrained encoders')
parser.add_argument(
'--decoder_model_name', type=str, default=None, help='For use with pretrained decoders (not yet supported)'
)
parser.add_argument(
'--encoder_tokenizer_model', type=str, default='None', help='Path to pre-trained encoder tokenizer model'
)
parser.add_argument(
'--encoder_tokenizer_name',
type=str,
default='yttm',
help='Encoder BPE Tokenizer Name, Options: [yttm, sentencepiece]',
)
parser.add_argument('--encoder_tokenizer_vocab_size', type=int, default=32000, help='Encoder Vocab size after BPE')
parser.add_argument(
'--encoder_tokenizer_coverage', type=float, default=0.999, help='Encoder Character coverage for BPE'
)
parser.add_argument('--encoder_tokenizer_bpe_dropout', type=float, default=0.1, help='Encoder BPE dropout prob')
parser.add_argument(
'--encoder_tokenizer_r2l', action="store_true", help='Whether to return encoded sequence from right to left'
)
parser.add_argument(
'--encoder_tokenizer_legacy',
action="store_true",
help='Whether to use legacy tokenizer implementation of sentencepiece',
)
parser.add_argument(
'--decoder_tokenizer_model', type=str, default='None', help='Path to pre-trained decoder tokenizer model'
)
parser.add_argument(
'--decoder_tokenizer_name',
type=str,
default='yttm',
help='Encoder BPE Tokenizer Name, Options: [yttm, sentencepiece]',
)
parser.add_argument('--decoder_tokenizer_vocab_size', type=int, default=32000, help='Encoder Vocab size after BPE')
parser.add_argument(
'--decoder_tokenizer_coverage', type=float, default=0.999, help='Encoder Character coverage for BPE'
)
parser.add_argument('--decoder_tokenizer_bpe_dropout', type=float, default=0.1, help='Encoder BPE dropout prob')
parser.add_argument(
'--decoder_tokenizer_r2l', action="store_true", help='Whether to return encoded sequence from right to left'
)
parser.add_argument(
'--decoder_tokenizer_legacy',
action="store_true",
help='Whether to use legacy tokenizer implementation of sentencepiece',
)
parser.add_argument('--max_seq_length', type=int, default=512, help='Max Sequence Length')
parser.add_argument('--min_seq_length', type=int, default=1, help='Min Sequence Length')
parser.add_argument('--tokens_in_batch', type=int, default=16000, help='# Tokens per batch per GPU')
parser.add_argument('--coverage', type=float, default=0.999, help='BPE character coverage [0-1]')
parser.add_argument(
'--lines_per_dataset_fragment',
type=int,
default=1000000,
help='Number of lines to consider for bucketing and padding',
)
parser.add_argument(
'--num_batches_per_tarfile',
type=int,
default=1000,
help='Number of batches (pickle files) within each tarfile',
)
parser.add_argument(
'--n_preproc_jobs', type=int, default=-2, help='Number of processes to use for creating the tarred dataset.',
)
parser.add_argument(
'--byte_fallback',
action="store_true",
help='Whether to use byte fallback with sentencepiece for BPE tokenization.',
)
parser.add_argument(
'--split_digits', action="store_true", help='Whether to split digits while tokenizing with sentencepiece.'
)
parser.add_argument(
'--no_split_by_whitespace',
action="store_true",
help='If True, this will not respect whitepsaces while learning BPE merges.',
)
args = parser.parse_args()
if not os.path.exists(args.out_dir):
os.mkdir(args.out_dir)
if (
args.encoder_tokenizer_model != 'None'
and args.decoder_tokenizer_model == 'None'
or args.decoder_tokenizer_model != 'None'
and args.encoder_tokenizer_model == 'None'
):
if args.shared_tokenizer:
raise ValueError(
'''
If using a pre-trained shared tokenizer,
both encoder and decoder tokenizers must be the same
'''
)
else:
raise ValueError('Both encoder and decoder pre-trained tokenizer models must be specified')
if args.encoder_tokenizer_model == 'None' and args.decoder_tokenizer_model == 'None':
encoder_tokenizer_model, decoder_tokenizer_model = MTDataPreproc.train_tokenizers(
out_dir=args.out_dir,
src_fname=args.src_fname,
tgt_fname=args.tgt_fname,
shared_tokenizer=args.shared_tokenizer,
encoder_tokenizer_name=args.encoder_tokenizer_name,
encoder_tokenizer_vocab_size=args.encoder_tokenizer_vocab_size,
encoder_tokenizer_coverage=args.encoder_tokenizer_coverage,
decoder_tokenizer_name=args.decoder_tokenizer_name,
decoder_tokenizer_vocab_size=args.decoder_tokenizer_vocab_size,
decoder_tokenizer_coverage=args.decoder_tokenizer_coverage,
global_rank=0,
byte_fallback=args.byte_fallback,
split_digits=args.split_digits,
split_by_whitespace=not args.no_split_by_whitespace,
)
else:
encoder_tokenizer_model, decoder_tokenizer_model = args.encoder_tokenizer_model, args.decoder_tokenizer_model
encoder_tokenizer, decoder_tokenizer = MTDataPreproc.get_enc_dec_tokenizers(
encoder_tokenizer_name=args.encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=args.encoder_tokenizer_bpe_dropout,
encoder_r2l=args.encoder_tokenizer_r2l,
decoder_tokenizer_name=args.decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=args.decoder_tokenizer_bpe_dropout,
decoder_r2l=args.decoder_tokenizer_r2l,
encoder_tokenizer_legacy=args.encoder_tokenizer_legacy,
decoder_tokenizer_legacy=args.decoder_tokenizer_legacy,
)
_, _ = MTDataPreproc.preprocess_parallel_dataset(
clean=args.clean,
src_fname=args.src_fname,
tgt_fname=args.tgt_fname,
out_dir=args.out_dir,
encoder_tokenizer_name=args.encoder_tokenizer_name,
encoder_model_name=args.encoder_model_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=args.encoder_tokenizer_bpe_dropout,
encoder_tokenizer_r2l=args.encoder_tokenizer_r2l,
decoder_tokenizer_name=args.decoder_tokenizer_name,
decoder_model_name=args.decoder_model_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_tokenizer_r2l=args.decoder_tokenizer_r2l,
decoder_bpe_dropout=args.decoder_tokenizer_bpe_dropout,
max_seq_length=args.max_seq_length,
min_seq_length=args.min_seq_length,
tokens_in_batch=args.tokens_in_batch,
lines_per_dataset_fragment=args.lines_per_dataset_fragment,
num_batches_per_tarfile=args.num_batches_per_tarfile,
tar_file_prefix=args.tar_file_prefix,
global_rank=0,
world_size=1,
n_jobs=args.n_preproc_jobs,
encoder_tokenizer_legacy=args.encoder_tokenizer_legacy,
decoder_tokenizer_legacy=args.decoder_tokenizer_legacy,
)
| NeMo-main | examples/nlp/machine_translation/create_tarred_parallel_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script implemts Noisy Channel Reranking (NCR) - https://arxiv.org/abs/1908.05731
Given .nemo files for a, reverse model (target -> source) and transformer LM (target LM) NMT model's .nemo file,
this script can be used to re-rank a forward model's (source -> target) beam candidates.
This script can be used in two ways 1) Given the score file generated by `nmt_transformer_infer.py`, re-rank beam candidates and
2) Given NCR score file generated by 1), Re-rank beam candidates based only on cached scores in the ncr file. This is meant to tune NCR coeficients.
Pre-requisite: Generating translations using `nmt_transformer_infer.py`
1. Obtain text file in src language. You can use sacrebleu to obtain standard test sets like so:
sacrebleu -t wmt14 -l de-en --echo src > wmt14-de-en.src
2. Translate using `nmt_transformer_infer.py` with a large beam size.:
python nmt_transformer_infer.py --model=[Path to .nemo file(s)] --srctext=wmt14-de-en.src --tgtout=wmt14-de-en.translations --beam_size 15 --write_scores
USAGE Example (case 1):
Re-rank beam candidates:
python noisy_channel_reranking.py \
--reverse_model=[Path to .nemo file] \
--language_model=[Path to .nemo file] \
--srctext=wmt14-de-en.translations.scores \
--tgtout=wmt14-de-en.ncr.translations \
--forward_model_coef=1.0 \
--reverse_model_coef=0.7 \
--target_lm_coef=0.05 \
--write_scores \
USAGE Example (case 2):
Re-rank beam candidates using cached score file only
python noisy_channel_reranking.py \
--cached_score_file=wmt14-de-en.ncr.translations.scores \
--forward_model_coef=1.0 \
--reverse_model_coef=0.7 \
--target_lm_coef=0.05 \
--tgtout=wmt14-de-en.ncr.translations \
"""
from argparse import ArgumentParser
import numpy as np
import torch
import nemo.collections.nlp as nemo_nlp
from nemo.utils import logging
def score_fusion(args, forward_scores, rev_scores, lm_scores, src_lens, tgt_lens):
"""
Fuse forward, reverse and language model scores.
"""
fused_scores = []
for forward_score, rev_score, lm_score, src_len, tgt_len in zip(
forward_scores, rev_scores, lm_scores, src_lens, tgt_lens
):
score = 0
forward_score = forward_score / tgt_len if args.length_normalize_scores else forward_score
score += args.forward_model_coef * forward_score
rev_score = rev_score / src_len if args.length_normalize_scores else rev_score
score += args.reverse_model_coef * rev_score
lm_score = lm_score / tgt_len if args.length_normalize_scores else lm_score
score += args.target_lm_coef * lm_score
if args.len_pen is not None:
score = score / (((5 + tgt_len) / 6) ** args.len_pen)
fused_scores.append(score)
return fused_scores
def main():
parser = ArgumentParser()
parser.add_argument(
"--reverse_model",
type=str,
help="Path to .nemo model file(s). If ensembling, provide comma separated paths to multiple models.",
)
parser.add_argument(
"--language_model", type=str, help="Optional path to an LM model that has the same tokenizer as NMT models.",
)
parser.add_argument(
"--forward_model_coef",
type=float,
default=1.0,
help="Weight assigned to the forward NMT model for re-ranking.",
)
parser.add_argument(
"--reverse_model_coef",
type=float,
default=0.7,
help="Weight assigned to the reverse NMT model for re-ranking.",
)
parser.add_argument(
"--target_lm_coef", type=float, default=0.07, help="Weight assigned to the target LM model for re-ranking.",
)
parser.add_argument(
"--srctext",
type=str,
default=None,
help="Path to a TSV file containing forward model scores of the format source \t beam_candidate_i \t forward_score",
)
parser.add_argument(
"--cached_score_file",
type=str,
default=None,
help="Path to a TSV file containing cached scores for each beam candidate. Format source \t target \t forward_score \t reverse_score \t lm_score \t src_len \t tgt_len",
)
parser.add_argument(
"--tgtout", type=str, required=True, help="Path to the file where re-ranked translations are to be written."
)
parser.add_argument(
"--beam_size",
type=int,
default=4,
help="Beam size with which forward model translations were generated. IMPORTANT: mismatch can lead to wrong results and an incorrect number of generated translations.",
)
parser.add_argument(
"--target_lang", type=str, default=None, help="Target language identifier ex: en,de,fr,es etc."
)
parser.add_argument(
"--source_lang", type=str, default=None, help="Source language identifier ex: en,de,fr,es etc."
)
parser.add_argument(
"--write_scores", action="store_true", help="Whether to write forward, reverse and lm scores to a file."
)
parser.add_argument(
"--length_normalize_scores",
action="store_true",
help="If true, it will divide forward, reverse and lm scores by the corresponding sequence length.",
)
parser.add_argument(
"--len_pen",
type=float,
default=None,
help="Apply a length penalty based on target lengths to the final NCR score.",
)
args = parser.parse_args()
torch.set_grad_enabled(False)
if args.cached_score_file is None:
reverse_models = []
for model_path in args.reverse_model.split(','):
if not model_path.endswith('.nemo'):
raise NotImplementedError(f"Only support .nemo files, but got: {model_path}")
model = nemo_nlp.models.machine_translation.MTEncDecModel.restore_from(restore_path=model_path).eval()
model.eval_loss_fn.reduction = 'none'
reverse_models.append(model)
lm_model = nemo_nlp.models.language_modeling.TransformerLMModel.restore_from(
restore_path=args.language_model
).eval()
if args.srctext is not None and args.cached_score_file is not None:
raise ValueError("Only one of --srctext or --cached_score_file must be provided.")
if args.srctext is None and args.cached_score_file is None:
raise ValueError("Neither --srctext nor --cached_score_file were provided.")
if args.srctext is not None:
logging.info(f"Re-ranking: {args.srctext}")
else:
logging.info(f"Re-ranking from cached score file only: {args.cached_score_file}")
if args.cached_score_file is None:
if torch.cuda.is_available():
reverse_models = [model.cuda() for model in reverse_models]
lm_model = lm_model.cuda()
src_text = []
tgt_text = []
all_reverse_scores = []
all_lm_scores = []
all_forward_scores = []
all_src_lens = []
all_tgt_lens = []
# Chceck args if re-ranking from cached score file.
if args.cached_score_file is not None:
if args.write_scores:
raise ValueError("--write_scores cannot be provided with a cached score file.")
if args.reverse_model is not None:
raise ValueError(
"--reverse_model cannot be provided with a cached score file since it assumes reverse scores already present in the cached file."
)
if args.language_model is not None:
raise ValueError(
"--language_model cannot be provided with a cached score file since it assumes language model scores already present in the cached file."
)
if args.srctext is not None:
# Compute reverse scores and LM scores from the provided models since cached scores file is not provided.
with open(args.srctext, 'r') as src_f:
count = 0
for line in src_f:
src_text.append(line.strip().split('\t'))
if len(src_text) == args.beam_size:
# Source and target sequences are flipped for the reverse direction model.
src_texts = [item[1] for item in src_text]
tgt_texts = [item[0] for item in src_text]
src, src_mask = reverse_models[0].prepare_inference_batch(src_texts)
tgt, tgt_mask = reverse_models[0].prepare_inference_batch(tgt_texts, target=True)
src_lens = src_mask.sum(1).data.cpu().tolist()
tgt_lens = tgt_mask.sum(1).data.cpu().tolist()
forward_scores = [float(item[2]) for item in src_text]
# Ensemble of reverse model scores.
nmt_lls = []
for model in reverse_models:
nmt_log_probs = model(src, src_mask, tgt[:, :-1], tgt_mask[:, :-1])
nmt_nll = model.eval_loss_fn(log_probs=nmt_log_probs, labels=tgt[:, 1:])
nmt_ll = nmt_nll.view(nmt_log_probs.size(0), nmt_log_probs.size(1)).sum(1) * -1.0
nmt_ll = nmt_ll.data.cpu().numpy().tolist()
nmt_lls.append(nmt_ll)
reverse_scores = np.stack(nmt_lls).mean(0)
# LM scores.
if lm_model is not None:
# Compute LM score for the src of the reverse model.
lm_log_probs = lm_model(src[:, :-1], src_mask[:, :-1])
lm_nll = model.eval_loss_fn(log_probs=lm_log_probs, labels=src[:, 1:])
lm_ll = lm_nll.view(lm_log_probs.size(0), lm_log_probs.size(1)).sum(1) * -1.0
lm_ll = lm_ll.data.cpu().numpy().tolist()
else:
lm_ll = None
lm_scores = lm_ll
all_reverse_scores.extend(reverse_scores)
all_lm_scores.extend(lm_scores)
all_forward_scores.extend(forward_scores)
# Swapping source and target here back again since this is what gets written to the file.
all_src_lens.extend(tgt_lens)
all_tgt_lens.extend(src_lens)
fused_scores = score_fusion(args, forward_scores, reverse_scores, lm_scores, src_lens, tgt_lens)
tgt_text.append(src_texts[np.argmax(fused_scores)])
src_text = []
count += 1
print(f'Reranked {count} sentences')
else:
# Use reverse and LM scores from the cached scores file to re-rank.
with open(args.cached_score_file, 'r') as src_f:
count = 0
for line in src_f:
src_text.append(line.strip().split('\t'))
if len(src_text) == args.beam_size:
if not all([len(item) == 7 for item in src_text]):
raise IndexError(
"All lines did not contain exactly 5 fields. Format - src_txt \t tgt_text \t forward_score \t reverse_score \t lm_score \t src_len \t tgt_len"
)
src_texts = [item[0] for item in src_text]
tgt_texts = [item[1] for item in src_text]
forward_scores = [float(item[2]) for item in src_text]
reverse_scores = [float(item[3]) for item in src_text]
lm_scores = [float(item[4]) for item in src_text]
src_lens = [float(item[5]) for item in src_text]
tgt_lens = [float(item[6]) for item in src_text]
fused_scores = score_fusion(args, forward_scores, reverse_scores, lm_scores, src_lens, tgt_lens)
tgt_text.append(tgt_texts[np.argmax(fused_scores)])
src_text = []
count += 1
print(f'Reranked {count} sentences')
with open(args.tgtout, 'w') as tgt_f:
for line in tgt_text:
tgt_f.write(line + "\n")
# Write scores file
if args.write_scores:
with open(args.tgtout + '.scores', 'w') as tgt_f, open(args.srctext, 'r') as src_f:
src_lines = []
for line in src_f:
src_lines.append(line.strip().split('\t'))
if not (len(all_reverse_scores) == len(all_lm_scores) == len(all_forward_scores) == len(src_lines)):
raise ValueError(
f"Length of scores files do not match. {len(all_reverse_scores)} != {len(all_lm_scores)} != {len(all_forward_scores)} != {len(src_lines)}. This is most likely because --beam_size is set incorrectly. This needs to be set to the same value that was used to generate translations."
)
for f, r, lm, sl, tl, src in zip(
all_forward_scores, all_reverse_scores, all_lm_scores, all_src_lens, all_tgt_lens, src_lines
):
tgt_f.write(
src[0]
+ '\t'
+ src[1]
+ '\t'
+ str(f)
+ '\t'
+ str(r)
+ '\t'
+ str(lm)
+ '\t'
+ str(sl)
+ '\t'
+ str(tl)
+ '\n'
)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/machine_translation/noisy_channel_reranking.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
from omegaconf import OmegaConf
from omegaconf.omegaconf import MISSING
from pytorch_lightning import Trainer
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.core.config.modelPT import NemoConfig
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import logging
from nemo.utils.config_utils import update_model_config
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
"""
Usage:
python enc_dec_nmt_finetune.py \
model_path=/raid/models/de_en_24x6.nemo \
trainer.devices=2 \
~trainer.max_epochs \
+trainer.max_steps=4500 \
+trainer.val_check_interval=500 \
model.train_ds.tgt_file_name=/raid/data/train_lang_filtered.en \
model.train_ds.src_file_name=/raid/data/train_lang_filtered.de \
model.train_ds.tokens_in_batch=6000 \
model.validation_ds.tgt_file_name=/raid/data/2015.norm.tok.en \
model.validation_ds.src_file_name=/raid/data/2015.norm.tok.de \
model.validation_ds.tokens_in_batch=4000 \
model.test_ds.tgt_file_name=/raid/data/2015.en \
model.test_ds.src_file_name=/raid/data/2015.de \
+exp_manager.exp_dir=/raid/results/finetune-test \
+exp_manager.create_checkpoint_callback=True \
+exp_manager.checkpoint_callback_params.monitor=val_sacreBLEU \
+exp_manager.checkpoint_callback_params.mode=max \
+exp_manager.checkpoint_callback_params.save_best_model=true
"""
@dataclass
class MTFineTuneConfig(NemoConfig):
name: Optional[str] = 'MTEncDec'
model_path: str = MISSING
do_training: bool = True
do_testing: bool = False
model: MTEncDecModelConfig = MTEncDecModelConfig()
trainer: Optional[TrainerConfig] = TrainerConfig()
exp_manager: Optional[ExpManagerConfig] = ExpManagerConfig(name='MTEncDec', files_to_copy=[])
@hydra_runner(config_path="conf", config_name="aayn_finetune")
def main(cfg: MTFineTuneConfig) -> None:
# merge default config with user specified config
default_cfg = MTFineTuneConfig()
default_cfg.model = MTEncDecModel.restore_from(restore_path=cfg.model_path, return_config=True)
del default_cfg.model.optim, default_cfg.model.train_ds, default_cfg.model.validation_ds, default_cfg.model.test_ds
cfg = update_model_config(default_cfg, cfg, drop_missing_subconfigs=False)
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
# training is managed by PyTorch Lightning
trainer_cfg = OmegaConf.to_container(cfg.trainer)
trainer_cfg.pop('strategy', None)
trainer = Trainer(strategy=NLPDDPStrategy(), **trainer_cfg)
# experiment logs, checkpoints, and auto-resume are managed by exp_manager and PyTorch Lightning
exp_manager(trainer, cfg.exp_manager)
# everything needed to train translation models is encapsulated in the NeMo MTEncdDecModel
mt_model = MTEncDecModel.restore_from(restore_path=cfg.model_path, override_config_path=cfg.model, trainer=trainer)
mt_model.setup_training_data(cfg.model.train_ds)
mt_model.setup_multiple_validation_data(val_data_config=cfg.model.validation_ds)
logging.info("\n\n************** Model parameters and their sizes ***********")
for name, param in mt_model.named_parameters():
print(name, param.size())
logging.info("***********************************************************\n\n")
if cfg.do_training:
trainer.fit(mt_model)
if cfg.do_testing:
mt_model.setup_multiple_test_data(test_data_config=cfg.model.test_ds)
trainer.test(mt_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/machine_translation/enc_dec_nmt_finetune.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given NMT model's .nemo file(s), this script can be used to translate text.
USAGE Example:
1. Obtain text file in src language. You can use sacrebleu to obtain standard test sets like so:
sacrebleu -t wmt14 -l de-en --echo src > wmt14-de-en.src
2. Translate:
python nmt_transformer_infer.py --model=[Path to .nemo file(s)] --srctext=wmt14-de-en.src --tgtout=wmt14-de-en.pre
"""
import os
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.models.machine_translation.megatron_nmt_model import MegatronNMTModel
from nemo.collections.nlp.modules.common.megatron.megatron_init import fake_initialize_model_parallel
from nemo.collections.nlp.modules.common.megatron.utils import ApexGuardDefaults
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.model_utils import inject_model_parallel_rank
try:
from apex.transformer.pipeline_parallel.utils import _reconfigure_microbatch_calculator
HAVE_APEX = True
except (ImportError, ModuleNotFoundError):
ModelType = ApexGuardDefaults()
HAVE_APEX = False
@hydra_runner(config_path="conf", config_name="nmt_megatron_infer")
def main(cfg) -> None:
# trainer required for restoring model parallel models
trainer = Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
assert (
cfg.trainer.devices * cfg.trainer.num_nodes
== cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
), "devices * num_nodes should equal tensor_model_parallel_size * pipeline_model_parallel_size"
app_state = AppState()
app_state.model_parallel_size = cfg.tensor_model_parallel_size * cfg.pipeline_model_parallel_size
(
app_state.tensor_model_parallel_rank,
app_state.pipeline_model_parallel_rank,
app_state.model_parallel_size,
app_state.data_parallel_size,
app_state.pipeline_model_parallel_split_rank,
app_state.virtual_pipeline_model_parallel_rank,
) = fake_initialize_model_parallel(
world_size=app_state.model_parallel_size,
rank=trainer.global_rank,
tensor_model_parallel_size_=cfg.tensor_model_parallel_size,
pipeline_model_parallel_size_=cfg.pipeline_model_parallel_size,
pipeline_model_parallel_split_rank_=cfg.pipeline_model_parallel_split_rank,
)
if cfg.model_file is not None:
if not os.path.exists(cfg.model_file):
raise ValueError(f"Model file {cfg.model_file} does not exist")
pretrained_cfg = MegatronNMTModel.restore_from(cfg.model_file, trainer=trainer, return_config=True)
OmegaConf.set_struct(pretrained_cfg, True)
with open_dict(pretrained_cfg):
pretrained_cfg.precision = trainer.precision
model = MegatronNMTModel.restore_from(
restore_path=cfg.model_file,
trainer=trainer,
save_restore_connector=NLPSaveRestoreConnector(),
override_config_path=pretrained_cfg,
)
elif cfg.checkpoint_dir is not None:
checkpoint_path = inject_model_parallel_rank(os.path.join(cfg.checkpoint_dir, cfg.checkpoint_name))
model = MegatronNMTModel.load_from_checkpoint(checkpoint_path, hparams_file=cfg.hparams_file, trainer=trainer)
else:
raise ValueError("need at least a nemo file or checkpoint dir")
model.freeze()
logging.info(f"Translating: {cfg.srctext}")
src_text = []
translations = []
with open(cfg.srctext, 'r') as src_f, open(cfg.tgtout, 'w') as tgt_f:
for line in src_f:
src_text.append(line.strip())
if len(src_text) == cfg.batch_size:
translations = model.translate(
text=src_text, source_lang=cfg.source_lang, target_lang=cfg.target_lang,
)
for translation in translations:
tgt_f.write(translation + "\n")
src_text = []
if len(src_text) > 0:
translations = model.translate(text=src_text, source_lang=cfg.source_lang, target_lang=cfg.target_lang,)
for translation in translations:
tgt_f.write(translation + "\n")
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | examples/nlp/machine_translation/nmt_transformer_infer_megatron.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.data.machine_translation.preproc_mt_data import MTDataPreproc
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.core.config.modelPT import NemoConfig
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import logging
from nemo.utils.config_utils import update_model_config
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
"""
Usage:
1. If you need to start docker and install NeMo, otherwise skip this step:
a. ```docker run --gpus all -it --rm -v /home/okuchaiev/repos/NeMo/:/NeMo -p 6006:6006 -v /mnt:/mnt --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:20.11-py3```
b. ```cd /NeMo```
c. ```./reinstall.sh```
2. Train a new tokenizer (or use pre-trained one):
```yttm bpe --data /mnt/D1/Data/NMT/wmt16_de_en/train.clean.en-de.shuffled.common --model tokenizer.BPE.8192.model --vocab_size 8192```
(To use WANDB, optionally, do login first)
``wandb login [YOUR WANDB login]``
3. Start training:
(This example for "base" model on 2 GPUs for 150000 steps with batch size of 12500 tokens per GPU)
python enc_dec_nmt.py \
--config-path=conf \
--config-name=aayn_base \
trainer.devices=[0,1] \
~trainer.max_epochs \
+trainer.max_steps=150000 \
model.beam_size=4 \
model.max_generation_delta=5 \
model.label_smoothing=0.1 \
model.encoder_tokenizer.tokenizer_model=tokenizer.BPE.8192.model \
model.decoder_tokenizer.tokenizer_model=tokenizer.BPE.8192.model \
model.encoder.num_layers=6 \
model.encoder.hidden_size=512 \
model.encoder.inner_size=2048 \
model.encoder.num_attention_heads=8 \
model.encoder.ffn_dropout=0.1 \
model.decoder.num_layers=6 \
model.decoder.hidden_size=512 \
model.decoder.inner_size=2048 \
model.decoder.num_attention_heads=8 \
model.decoder.ffn_dropout=0.1 \
model.train_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/train.clean.de.shuffled \
model.train_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/train.clean.en.shuffled \
model.train_ds.tokens_in_batch=12500 \
model.validation_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.ref \
model.validation_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.src \
model.validation_ds.tokens_in_batch=8192 \
model.test_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.ref \
model.test_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.src \
model.optim.lr=0.001 \
model.optim.sched.warmup_ratio=0.05 \
+exp_manager.create_wandb_logger=True \
+exp_manager.wandb_logger_kwargs.name=TEST-nmt-base \
+exp_manager.wandb_logger_kwargs.project=nmt-de-en \
+exp_manager.create_checkpoint_callback=True \
+exp_manager.checkpoint_callback_params.monitor=val_sacreBLEU \
+exp_manager.exp_dir=nmt_base \
+exp_manager.checkpoint_callback_params.mode=max
"""
@dataclass
class MTEncDecConfig(NemoConfig):
name: Optional[str] = 'MTEncDec'
do_training: bool = True
do_testing: bool = False
model: MTEncDecModelConfig = MTEncDecModelConfig()
trainer: Optional[TrainerConfig] = TrainerConfig()
exp_manager: Optional[ExpManagerConfig] = ExpManagerConfig(name='MTEncDec', files_to_copy=[])
@hydra_runner(config_path="conf", config_name="aayn_base")
def main(cfg: MTEncDecConfig) -> None:
# merge default config with user specified config
default_cfg = MTEncDecConfig()
cfg = update_model_config(default_cfg, cfg)
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
# training is managed by PyTorch Lightning
trainer_cfg = OmegaConf.to_container(cfg.trainer)
trainer_cfg.pop('strategy', None)
trainer = Trainer(strategy=NLPDDPStrategy(), **trainer_cfg)
# tokenizers will be trained and and tarred training data will be created if needed
# model config is then updated
if cfg.model.preproc_out_dir is not None:
MTDataPreproc(cfg=cfg.model, trainer=trainer)
# experiment logs, checkpoints, and auto-resume are managed by exp_manager and PyTorch Lightning
exp_manager(trainer, cfg.exp_manager)
# everything needed to train translation models is encapsulated in the NeMo MTEncdDecModel
mt_model = MTEncDecModel(cfg.model, trainer=trainer)
logging.info("\n\n************** Model parameters and their sizes ***********")
for name, param in mt_model.named_parameters():
print(name, param.size())
logging.info("***********************************************************\n\n")
if cfg.do_training:
trainer.fit(mt_model)
# Reset for PTL 2.0 as test uses the same ckpt as train via previously set self._ckpt_path
trainer.ckpt_path = None
if cfg.do_testing:
trainer.test(mt_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/machine_translation/enc_dec_nmt.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import torch
import torch.multiprocessing as mp
from torch.utils.data import DataLoader
from nemo.collections.nlp.data.language_modeling import TarredSentenceDataset
from nemo.collections.nlp.data.machine_translation import TarredTranslationDataset
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
from nemo.utils import logging
def get_args():
parser = ArgumentParser(description='Batch translation of sentences from a pre-trained model on multiple GPUs')
parser.add_argument("--model", type=str, required=True, help="Path to the .nemo translation model file")
parser.add_argument(
"--text2translate", type=str, required=True, help="Path to the pre-processed tarfiles for translation"
)
parser.add_argument("--result_dir", type=str, required=True, help="Folder to write translation results")
parser.add_argument(
"--twoside", action="store_true", help="Set flag when translating the source side of a parallel dataset"
)
parser.add_argument(
'--metadata_path', type=str, required=True, help="Path to the JSON file that contains dataset info"
)
parser.add_argument('--topk', type=int, default=500, help="Value of k for topk sampling")
parser.add_argument('--src_language', type=str, required=True, help="Source lang ID for detokenization")
parser.add_argument('--tgt_language', type=str, required=True, help="Target lang ID for detokenization")
parser.add_argument(
'--reverse_lang_direction',
action="store_true",
help="Reverse source and target language direction for parallel dataset",
)
parser.add_argument('--n_gpus', type=int, default=1, help="Number of GPUs to use")
args = parser.parse_args()
return args
def translate(rank, world_size, args):
if args.model.endswith(".nemo"):
logging.info("Attempting to initialize from .nemo file")
model = MTEncDecModel.restore_from(restore_path=args.model, map_location=f"cuda:{rank}")
elif args.model.endswith(".ckpt"):
logging.info("Attempting to initialize from .ckpt file")
model = MTEncDecModel.load_from_checkpoint(checkpoint_path=args.model, map_location=f"cuda:{rank}")
model.replace_beam_with_sampling(topk=args.topk)
model.eval()
if args.twoside:
dataset = TarredTranslationDataset(
text_tar_filepaths=args.text2translate,
metadata_path=args.metadata_path,
encoder_tokenizer=model.encoder_tokenizer,
decoder_tokenizer=model.decoder_tokenizer,
shuffle_n=100,
shard_strategy="scatter",
world_size=world_size,
global_rank=rank,
reverse_lang_direction=args.reverse_lang_direction,
)
else:
dataset = TarredSentenceDataset(
text_tar_filepaths=args.text2translate,
metadata_path=args.metadata_path,
tokenizer=model.encoder_tokenizer,
shuffle_n=100,
shard_strategy="scatter",
world_size=world_size,
global_rank=rank,
)
loader = DataLoader(dataset, batch_size=1)
result_dir = os.path.join(args.result_dir, f'rank{rank}')
os.makedirs(result_dir, exist_ok=True)
originals_file_name = os.path.join(result_dir, 'originals.txt')
translations_file_name = os.path.join(result_dir, 'translations.txt')
num_translated_sentences = 0
with open(originals_file_name, 'w') as of, open(translations_file_name, 'w') as tf:
for batch_idx, batch in enumerate(loader):
for i in range(len(batch)):
if batch[i].ndim == 3:
batch[i] = batch[i].squeeze(dim=0)
batch[i] = batch[i].to(rank)
if args.twoside:
src_ids, src_mask, _, _, _ = batch
else:
src_ids, src_mask = batch
if batch_idx % 100 == 0:
logging.info(
f"{batch_idx} batches ({num_translated_sentences} sentences) were translated by process with "
f"rank {rank}"
)
num_translated_sentences += len(src_ids)
inputs, translations = model.batch_translate(src=src_ids, src_mask=src_mask)
for src, translation in zip(inputs, translations):
of.write(src + '\n')
tf.write(translation + '\n')
def main() -> None:
args = get_args()
world_size = torch.cuda.device_count() if args.n_gpus == -1 else args.n_gpus
mp.spawn(translate, args=(world_size, args), nprocs=world_size, join=True)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/machine_translation/translate_ddp.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional
from omegaconf import OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.nlp.data.machine_translation.preproc_mt_data import MTDataPreproc
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_bottleneck_model import MTBottleneckModel
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTBottleneckModelConfig
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.core.config.modelPT import NemoConfig
from nemo.core.config.pytorch_lightning import TrainerConfig
from nemo.utils import logging
from nemo.utils.config_utils import update_model_config
from nemo.utils.exp_manager import ExpManagerConfig, exp_manager
"""
Usage:
1. If you need to start docker and install NeMo, otherwise skip this step:
a. ```docker run --gpus all -it --rm -v /home/okuchaiev/repos/NeMo/:/NeMo -p 6006:6006 -v /mnt:/mnt --shm-size=16g --ulimit memlock=-1 --ulimit stack=67108864 --device=/dev/snd nvcr.io/nvidia/pytorch:20.11-py3```
b. ```cd /NeMo```
c. ```./reinstall.sh```
2. Train a new tokenizer (or use pre-trained one):
```yttm bpe --data /mnt/D1/Data/NMT/wmt16_de_en/train.clean.en-de.shuffled.common --model tokenizer.BPE.8192.model --vocab_size 8192```
(To use WANDB, optionally, do login first)
``wandb login [YOUR WANDB login]``
3. Start training:
(This example for "base" model on 2 GPUs for 150000 steps with batch size of 12500 tokens per GPU)
python enc_dec_nmt-bottleneck.py \
--config-path=conf \
--config-name=aayn_bottleneck \
trainer.devices=[0,1] \
~trainer.max_epochs \
+trainer.max_steps=150000 \
model.beam_size=4 \
model.max_generation_delta=256 \
model.label_smoothing=0.1 \
model.model_type=nll \
model.non_recon_warmup_batches=7500 \
model.encoder_tokenizer.tokenizer_model=tokenizer.BPE.8192.model \
model.decoder_tokenizer.tokenizer_model=tokenizer.BPE.8192.model \
model.encoder.arch=perceiver \
model.encoder.hidden_steps=32 \
model.encoder.hidden_blocks=2 \
model.encoder.hidden_init_method=bridge \
model.encoder.num_layers=6 \
model.encoder.hidden_size=512 \
model.encoder.inner_size=2048 \
model.encoder.num_attention_heads=8 \
model.encoder.ffn_dropout=0.1 \
model.decoder.num_layers=6 \
model.decoder.hidden_size=512 \
model.decoder.inner_size=2048 \
model.decoder.num_attention_heads=8 \
model.decoder.ffn_dropout=0.1 \
model.train_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/train.clean.de.shuffled \
model.train_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/train.clean.en.shuffled \
model.train_ds.tokens_in_batch=12500 \
model.validation_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.ref \
model.validation_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.src \
model.validation_ds.tokens_in_batch=8192 \
model.test_ds.src_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.ref \
model.test_ds.tgt_file_name=/mnt/D1/Data/NMT/wmt16_de_en/wmt14-en-de.src \
model.optim.lr=0.001 \
model.optim.sched.warmup_ratio=0.05 \
+exp_manager.create_wandb_logger=True \
+exp_manager.wandb_logger_kwargs.name=TEST-nmt-base \
+exp_manager.wandb_logger_kwargs.project=nmt-de-en \
+exp_manager.create_checkpoint_callback=True \
+exp_manager.checkpoint_callback_params.monitor=val_sacreBLEU \
+exp_manager.exp_dir=nmt_base \
+exp_manager.checkpoint_callback_params.mode=max
"""
@dataclass
class MTBottleneckConfig(NemoConfig):
name: Optional[str] = 'MTBottleneck'
do_training: bool = True
do_testing: bool = False
model: MTBottleneckModelConfig = MTBottleneckModelConfig()
trainer: Optional[TrainerConfig] = TrainerConfig()
exp_manager: Optional[ExpManagerConfig] = ExpManagerConfig(name='MTBottleneck', files_to_copy=[])
@hydra_runner(config_path="conf", config_name="aayn_bottleneck")
def main(cfg: MTBottleneckConfig) -> None:
# merge default config with user specified config
default_cfg = MTBottleneckConfig()
cfg = update_model_config(default_cfg, cfg)
logging.info("\n\n************** Experiment configuration ***********")
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
# training is managed by PyTorch Lightning
trainer_cfg = OmegaConf.to_container(cfg.trainer)
trainer_cfg.pop('strategy', None)
trainer = Trainer(strategy=NLPDDPStrategy(), **trainer_cfg)
# tokenizers will be trained and and tarred training data will be created if needed
# model config is then updated
if cfg.model.preproc_out_dir is not None:
MTDataPreproc(cfg=cfg.model, trainer=trainer)
# experiment logs, checkpoints, and auto-resume are managed by exp_manager and PyTorch Lightning
exp_manager(trainer, cfg.exp_manager)
# everything needed to train translation models is encapsulated in the NeMo MTEncdDecModel
mt_model = MTBottleneckModel(cfg.model, trainer=trainer)
logging.info("\n\n************** Model parameters and their sizes ***********")
for name, param in mt_model.named_parameters():
print(name, param.size())
logging.info("***********************************************************\n\n")
if cfg.do_training:
trainer.fit(mt_model)
if cfg.do_testing:
trainer.test(mt_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/machine_translation/enc_dec_nmt-bottleneck.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import BertDPRModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="bert_ir_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
bert_dpr_model = BertDPRModel(cfg.model, trainer=trainer)
trainer.fit(bert_dpr_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/information_retrieval/bert_dpr.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import BertJointIRModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="bert_ir_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
bert_joint_ir_model = BertJointIRModel(cfg.model, trainer=trainer)
trainer.fit(bert_joint_ir_model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/information_retrieval/bert_joint_ir.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
import pandas as pd
def construct_negatives(input_file, output_file, num_passages, num_negatives):
qrels = pd.read_csv(input_file, delimiter="\t", header=None)
with open(output_file, "w") as f:
for i in range(len(qrels)):
query_id, rel_passage_id = qrels[0][i], qrels[2][i]
negatives = np.random.randint(num_passages, size=num_negatives)
output_ids = [query_id, rel_passage_id] + negatives.tolist()
output_str = [str(id_) for id_ in output_ids]
print("\t".join(output_str), file=f)
def main():
parser = argparse.ArgumentParser(description="Negative passages construction")
parser.add_argument("--data", type=str, default="msmarco_dataset", help="path to folder with data")
parser.add_argument("--num_passages", type=int, default=8841823, help="total number of passages")
parser.add_argument("--num_negatives", type=int, default=10, help="number of negatives per positive")
args = parser.parse_args()
for mode in ["train", "dev"]:
construct_negatives(
input_file=f"{args.data}/qrels.{mode}.tsv",
output_file=f"{args.data}/query2passages.{mode}.tsv",
num_passages=args.num_passages,
num_negatives=args.num_negatives,
)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/information_retrieval/construct_random_negatives.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from ast import literal_eval
import ijson
def main(filename):
with open(filename, 'r') as file:
objects = ijson.kvitems(file, 'wellFormedAnswers')
valid_old_key_to_new_key = {}
new_key = 0
for key, well_formed_answer in objects:
value = well_formed_answer if isinstance(well_formed_answer, list) else literal_eval(well_formed_answer)
if len(value) > 0:
valid_old_key_to_new_key[key] = str(new_key)
new_key += 1
filtered_data = {}
fieldnames = ['query', 'query_type', 'answers', 'wellFormedAnswers', 'passages']
for fieldname in fieldnames:
add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key)
with open(filename, 'w') as fw:
json.dump(filtered_data, fw)
def add_data(filename, filtered_data, fieldname, valid_old_key_to_new_key):
with open(filename, 'r') as f:
objects = ijson.kvitems(f, fieldname)
filtered_data[fieldname] = {
valid_old_key_to_new_key[key]: query for key, query in objects if key in valid_old_key_to_new_key
}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--filename")
args = parser.parse_args()
main(args.filename)
| NeMo-main | examples/nlp/dialogue/remove_ms_marco_samples_without_wellFormedAnswers.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import numpy as np
from nemo.collections.nlp.metrics.dialogue_metrics import DialogueGenerationMetrics
def read_jsonl(filename):
with open(filename, 'r', encoding="UTF-8") as f:
docs = [json.loads(line) for line in f.readlines()]
return docs
def get_incorrect_labels(docs):
incorrect_labels_docs = []
for doc in docs:
if doc["ground_truth_labels"] != doc["generated_labels"]:
incorrect_labels_docs.append(
{
"input": doc["input"],
"ground_truth_labels": doc["ground_truth_labels"],
"generated_labels": doc["generated_labels"],
}
)
return incorrect_labels_docs
def get_incorrect_slots(docs):
incorrect_slots_docs = []
for doc in docs:
if doc["ground_truth_slots"] != doc["generated_slots"]:
incorrect_slots_docs.append(
{
"input": doc["input"],
"ground_truth_slots": doc["ground_truth_slots"],
"generated_slots": doc["generated_slots"],
}
)
return incorrect_slots_docs
def sort_by_f1(docs):
for i in range(len(docs)):
doc = docs[i]
generated_field = doc["generated"]
ground_truth_field = doc["ground_truth"]
generated_field = remove_punctation(generated_field.lower())
ground_truth_field = remove_punctation(ground_truth_field.lower())
p, r, f1 = DialogueGenerationMetrics._get_one_f1(generated_field, ground_truth_field)
docs[i]["f1"] = f1
docs[i]["generated"] = generated_field
docs[i]["ground_truth"] = ground_truth_field
docs.sort(key=lambda x: x["f1"])
return docs
def remove_punctation(sentence):
return re.sub(r'[^\w\s]', '', sentence)
def generation_main(filename):
docs = read_jsonl(filename)
docs = sort_by_f1(docs)
bleu = DialogueGenerationMetrics.get_bleu(
[doc["generated"] for doc in docs], [doc["ground_truth"] for doc in docs]
)
acc = np.mean([int(doc["generated"] == doc["ground_truth"]) for doc in docs]) * 100
f1 = np.mean([doc["f1"] for doc in docs])
print("Token level F1 is {:.3}".format(f1))
print("BLEU is {:.3}".format(bleu))
print("Exact match accuracy is {:.3}".format(acc))
for i in range(0):
print(docs[i])
def classification_main(filename):
docs = read_jsonl(filename)
incorrect_labels_docs = get_incorrect_labels(docs)
incorrect_slots_docs = get_incorrect_slots(docs)
print("{} / {} have incorrect labels".format(len(incorrect_labels_docs), len(docs)))
print("{} / {} have incorrect slots".format(len(incorrect_slots_docs), len(docs)))
for doc in incorrect_labels_docs:
print(doc)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--prediction_filename")
parser.add_argument("--mode", choices=['generation', 'classification'], default='classification')
args = parser.parse_args()
if args.mode == 'classification':
classification_main(args.prediction_filename)
else:
generation_main(args.prediction_filename)
| NeMo-main | examples/nlp/dialogue/analyse_prediction_results.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example of how to train and test dialogue models in NeMo.
***Setting the configs***
The model and the PT trainer are defined in a config file that declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - model, loss, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/dialogue_state_tracking/conf/dialog_config.yaml` config file
by default. You may update the config file from the file directly. The other option is to set another config file via command-line arguments by `--config-name=CONFIG_FILE_PATH'.
***Model Training***
python dialogue.py
do_training=True
model.dataset.data_dir=<DATA_DIR_WITH_JSON_DATA>
model.dataset.dialogues_example_dir=<DAT_DIR_FOR_CACHING_INTERMEDIATE_AND_SAVING_PREDICTIONS>
model.dataset.task=<TASK - see conf/dialogue_config.yaml for full list> e.g. sgd
model.language_model.pretrained_model_name=<TASK - see conf/dialogue_config.yaml for full list> e.g. gpt2
trainer.devices=[<DEVICE_IDS_TO_USE>]
***Model Evaluation***
command as above, change do_training=False
"""
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.dialogue.dialogue_gpt_classification_model import DialogueGPTClassificationModel
from nemo.collections.nlp.models.dialogue.dialogue_gpt_generation_model import DialogueGPTGenerationModel
from nemo.collections.nlp.models.dialogue.dialogue_nearest_neighbour_model import DialogueNearestNeighbourModel
from nemo.collections.nlp.models.dialogue.dialogue_s2s_generation_model import DialogueS2SGenerationModel
from nemo.collections.nlp.models.dialogue.dialogue_zero_shot_intent_model import DialogueZeroShotIntentModel
from nemo.collections.nlp.models.dialogue.intent_slot_classification_model import IntentSlotClassificationModel
from nemo.collections.nlp.models.dialogue.sgdqa_model import SGDQAModel
from nemo.collections.nlp.modules.common.megatron.megatron_utils import compute_model_parallel_rank
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.app_state import AppState
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="dialogue_config")
def main(cfg: DictConfig) -> None:
pl.seed_everything(42)
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
try:
strategy = NLPDDPStrategy(no_ddp_communication_hook=True, find_unused_parameters=True,)
except (ImportError, ModuleNotFoundError):
strategy = None
trainer = pl.Trainer(**cfg.trainer, strategy=strategy)
exp_manager(trainer, cfg.get("exp_manager", None))
app_state = AppState()
app_state.data_parallel_size = cfg.model.data_parallel_size
if cfg.model.tensor_model_parallel_size > 1:
app_state.model_parallel_size = cfg.model.tensor_model_parallel_size
app_state.tensor_model_parallel_rank = compute_model_parallel_rank(
trainer.local_rank, app_state.model_parallel_size
)
if 'bert' in cfg.model.language_model.pretrained_model_name:
if cfg.model.dataset.task == 'sgd':
if cfg.model.original_nemo_checkpoint is not None:
model_class = DialogueZeroShotIntentModel
else:
model_class = SGDQAModel
elif cfg.model.dataset.task in ['zero_shot', 'design']:
model_class = DialogueZeroShotIntentModel
else:
model_class = IntentSlotClassificationModel
elif 'gpt' in cfg.model.language_model.pretrained_model_name.lower():
if cfg.model.dataset.task in ['ms_marco', 'mellon_qa']:
model_class = DialogueGPTGenerationModel
else:
model_class = DialogueGPTClassificationModel
elif (
'bart' in cfg.model.language_model.pretrained_model_name.lower()
or 't5' in cfg.model.language_model.pretrained_model_name.lower()
):
# please use bf16/32 with t5-large and above
# see https://github.com/huggingface/transformers/pull/10956
model_class = DialogueS2SGenerationModel
elif 'sentence-transformers' in cfg.model.language_model.pretrained_model_name.lower():
model_class = DialogueNearestNeighbourModel
if cfg.pretrained_model or (cfg.model.nemo_path and os.path.exists(cfg.model.nemo_path)):
if cfg.pretrained_model:
logging.info(f'Loading pretrained model {cfg.pretrained_model}')
model = model_class.from_pretrained(cfg.pretrained_model)
else:
logging.info(f'Restoring model from {cfg.model.nemo_path}')
model = model_class.restore_from(cfg.model.nemo_path, trainer=trainer)
if cfg.do_training:
model.setup_training_data(train_data_config=cfg.model.train_ds)
model.setup_multiple_validation_data(val_data_config=cfg.model.validation_ds)
else:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = model_class(cfg.model, trainer=trainer)
if cfg.do_training:
trainer.fit(model)
if cfg.model.nemo_path:
if not os.path.exists(cfg.model.nemo_path):
model.save_to(cfg.model.nemo_path)
else:
updated_nemo_path = cfg.model.nemo_path.replace(".nemo", "_new.nemo")
logging.warning("nemo path exists, saving at {} instead".format(updated_nemo_path))
model.save_to(updated_nemo_path)
else:
data_dir = cfg.model.dataset.get('data_dir', None)
dialogues_example_dir = cfg.model.dataset.get('dialogues_example_dir', None)
if data_dir is None or dialogues_example_dir is None:
raise ValueError('No dataset directory provided. Skipping evaluation. ')
elif not os.path.exists(data_dir):
raise ValueError(f'{data_dir} is not found, skipping evaluation on the test set.')
else:
if hasattr(model, "update_data_dirs"):
model.update_data_dirs(data_dir=data_dir, dialogues_example_dir=dialogues_example_dir)
model._cfg.dataset = cfg.model.dataset
if hasattr(cfg.model, 'test_ds') and cfg.model.test_ds.ds_item is not None:
model.setup_multiple_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/dialogue/dialogue.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train a ThutmoseTaggerModel for inverse text normalization(ITN).
This script uses the `/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'. Probably it is worth looking
at the example config file to see the list of parameters used for training.
USAGE Example:
1. Obtain a processed dataset
2. Run:
python ${NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py \
lang=${LANG} \
data.validation_ds.data_path=${DATA_PATH}/valid.tsv \
data.train_ds.data_path=${DATA_PATH}/train.tsv \
data.train_ds.batch_size=128 \
data.train_ds.num_workers=8 \
model.language_model.pretrained_model_name=${LANGUAGE_MODEL} \
model.label_map=${DATA_PATH}/label_map.txt \
model.semiotic_classes=${DATA_PATH}/semiotic_classes.txt \
model.optim.lr=3e-5 \
trainer.devices=[1] \
trainer.num_nodes=1 \
trainer.accelerator=gpu \
trainer.strategy=ddp \
trainer.max_epochs=5
Information on the arguments:
Most arguments in the example config file are quite self-explanatory (e.g.,
`model.optim.lr` refers to the learning rate for training the model).
Some arguments we want to mention are:
+ lang: The language of the dataset.
+ model.language_model.pretrained_model_name: This is the backbone BERT model (depends on the language)
e.g. bert-base-uncased (English), DeepPavlov/rubert-base-cased (Russian)
"""
from helpers import ITN_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="thutmose_tagger_itn_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params: {OmegaConf.to_yaml(cfg)}')
# Train the model
if cfg.model.do_training:
logging.info(
"================================================================================================"
)
logging.info('Start training...')
trainer, model = instantiate_model_and_trainer(cfg, ITN_MODEL, True)
thutmose_tagger_exp_manager = cfg.get('exp_manager', None)
exp_manager(trainer, thutmose_tagger_exp_manager)
trainer.fit(model)
logging.info('Training finished!')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/normalization_as_tagging_train.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to run itn inference with the ThutmoseTaggerModel.
The inference works on a raw file (no labels required).
Each line of the input file represents a single example for inference.
Specify inference.from_file and inference.batch_size parameters.
USAGE Example:
1. Train a model, or use a pretrained checkpoint.
2. Run:
export TOKENIZERS_PARALLELISM=false
python ${NEMO_PATH}/examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py \
pretrained_model=./training.nemo \
inference.from_file=./input.txt \
inference.out_file=./output.txt \
model.max_sequence_len=1024 #\
inference.batch_size=128
This script uses the `/examples/nlp/text_normalization_as_tagging/conf/thutmose_tagger_itn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'.
"""
import os
from helpers import ITN_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="thutmose_tagger_itn_config")
def main(cfg: DictConfig) -> None:
logging.debug(f'Config Params: {OmegaConf.to_yaml(cfg)}')
if cfg.pretrained_model is None:
raise ValueError("A pre-trained model should be provided.")
_, model = instantiate_model_and_trainer(cfg, ITN_MODEL, False)
text_file = cfg.inference.from_file
logging.info(f"Running inference on {text_file}...")
if not os.path.exists(text_file):
raise ValueError(f"{text_file} not found.")
with open(text_file, "r", encoding="utf-8") as f:
lines = f.readlines()
batch_size = cfg.inference.get("batch_size", 8)
batch, all_preds = [], []
for i, line in enumerate(lines):
s = spoken_preprocessing(line) # this is the same input transformation as in corpus preparation
batch.append(s.strip())
if len(batch) == batch_size or i == len(lines) - 1:
outputs = model._infer(batch)
for x in outputs:
all_preds.append(x)
batch = []
if len(all_preds) != len(lines):
raise ValueError(
"number of input lines and predictions is different: predictions="
+ str(len(all_preds))
+ "; lines="
+ str(len(lines))
)
out_file = cfg.inference.out_file
with open(f"{out_file}", "w", encoding="utf-8") as f_out:
f_out.write("\n".join(all_preds))
logging.info(f"Predictions saved to {out_file}.")
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/normalization_as_tagging_infer.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Tuple
import pytorch_lightning as pl
from omegaconf import DictConfig
from nemo.collections.nlp.models import ThutmoseTaggerModel
from nemo.utils import logging
__all__ = ["ITN_MODEL", "MODEL_NAMES", "instantiate_model_and_trainer"]
ITN_MODEL = "itn"
MODEL_NAMES = [ITN_MODEL]
def instantiate_model_and_trainer(
cfg: DictConfig, model_name: str, do_training: bool
) -> Tuple[pl.Trainer, ThutmoseTaggerModel]:
""" Function for instantiating a model and a trainer
Args:
cfg: The config used to instantiate the model and the trainer.
model_name: A str indicates the model direction, currently only 'itn'.
do_training: A boolean flag indicates whether the model will be trained or evaluated.
Returns:
trainer: A PyTorch Lightning trainer
model: A ThutmoseTaggerModel
"""
if model_name not in MODEL_NAMES:
raise ValueError(f"{model_name} is unknown model type")
# Get configs for the corresponding models
trainer_cfg = cfg.get("trainer")
model_cfg = cfg.get("model")
pretrained_cfg = cfg.get("pretrained_model", None)
trainer = pl.Trainer(**trainer_cfg)
if not pretrained_cfg:
logging.info(f"Initializing {model_name} model")
if model_name == ITN_MODEL:
model = ThutmoseTaggerModel(model_cfg, trainer=trainer)
else:
raise ValueError(f"{model_name} is unknown model type")
elif os.path.exists(pretrained_cfg):
logging.info(f"Restoring pretrained {model_name} model from {pretrained_cfg}")
model = ThutmoseTaggerModel.restore_from(pretrained_cfg)
else:
logging.info(f"Loading pretrained model {pretrained_cfg}")
if model_name == ITN_MODEL:
if pretrained_cfg not in ThutmoseTaggerModel.get_available_model_names():
raise (
ValueError(
f"{pretrained_cfg} not in the list of available Tagger models."
f"Select from {ThutmoseTaggerModel.list_available_models()}"
)
)
model = ThutmoseTaggerModel.from_pretrained(pretrained_cfg)
else:
raise ValueError(f"{model_name} is unknown model type")
# Setup train and validation data
if do_training:
model.setup_training_data(train_data_config=cfg.data.train_ds)
model.setup_validation_data(val_data_config=cfg.data.validation_ds)
logging.info(f"Model {model_name} -- Device {model.device}")
return trainer, model
| NeMo-main | examples/nlp/text_normalization_as_tagging/helpers.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to sample each label from the labeled files.
"""
import sys
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Sample labels")
parser.add_argument("--filename", required=True, type=str, help='File with input data')
parser.add_argument("--max_count", required=True, type=int, help='Count')
args = parser.parse_args()
vocab = Counter()
out_sample = open(args.filename + ".sample_" + str(args.max_count), "w", encoding="utf-8")
out_rest = open(args.filename + ".rest_" + str(args.max_count), "w", encoding="utf-8")
n = 0
with open(args.filename, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 2:
print("Warning: bad format in line: " + str(n) + ": " + line, file=sys.stderr)
continue
tags = parts[1].split(" ")
ok = False
for t in tags:
if t not in vocab:
vocab[t] = 0
if vocab[t] < args.max_count:
ok = True
vocab[t] += 1
if ok:
out_sample.write(line)
else:
out_rest.write(line)
n += 1
out_sample.close()
out_rest.close()
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/sample_each_label.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to get label vocab from train and dev labeled files.
"""
import sys
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Get label vocab")
parser.add_argument("--train_filename", required=True, type=str, help='File with training data')
parser.add_argument("--dev_filename", required=True, type=str, help='File with development data')
parser.add_argument("--out_filename", required=True, type=str, help='Output file')
args = parser.parse_args()
vocab = Counter()
n = 0
for fn in [args.train_filename, args.dev_filename]:
with open(fn, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 2:
print("Warning: bad format in line: " + str(n) + ": " + line, file=sys.stderr)
continue
tags = parts[1].split(" ")
for t in tags:
if t == "<SELF>":
vocab["KEEP"] += 1
elif t == "<DELETE>":
vocab["DELETE"] += 1
else:
vocab["DELETE|" + t] += 1
n += 1
print("len(vocab)=", len(vocab))
with open(args.out_filename, "w", encoding="utf-8") as out:
out.write("KEEP\n")
out.write("DELETE\n")
for t, freq in vocab.most_common(10000000):
if t == "KEEP":
continue
if t == "DELETE":
continue
out.write(t + "\n")
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/get_label_vocab.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to after google_data_preprocessing_before_alignment.py
to obtain separate "parallel" corpora for each semiotic class.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`).
Then there will a folder named `en_with_types`.
3. Run python google_data_preprocessing_before_alignment.py
which will produce a file data.tsv in its --output-dir
4. [Optional]. sort -u and rewrite data.tsv
5. Clone https://github.com/moses-smt/giza-pp.git, run "make" from its root folder.
6. Run this script
python ${NEMO}/examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py \
--data_dir=<--output-dir from the previous step> \
--out_dir=<destination directory for giza alignment folders> \
--giza_dir=/.../giza-pp/GIZA++-v2 \
--mckls_binary=/.../giza-pp/mkcls-v2/mkcls \
--lang={en,ru}
Each corpus will be stored within <--data-dir> in the subdirectory with the name of the semiotic class,
containing files ready to be fed to Giza++:
src - written form, tokenized as characters
dst - spoken form, tokenized as words
run.sh - script for running Giza++
"""
from argparse import ArgumentParser
from collections import Counter
from os import listdir, mkdir
from os.path import isdir, join
from shutil import rmtree
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment
parser = ArgumentParser(description='Split corpus to subcorpora for giza alignment')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--out_dir', type=str, required=True, help='Path to output folder')
parser.add_argument('--giza_dir', type=str, required=True, help='Path to folder with GIZA++ binaries')
parser.add_argument('--mckls_binary', type=str, required=True, help='Path to mckls binary')
parser.add_argument('--lang', type=str, required=True, help='Language')
args = parser.parse_args()
def prepare_subcorpora_from_data() -> None:
"""Preprocess a corpus in Google TN Dataset format, extract TN-ITN phrase pairs, prepare input for GIZA++ alignment.
"""
semiotic_vcb = Counter()
cache_vcb = {}
filenames = []
for fn in listdir(args.data_dir + "/train"):
filenames.append(args.data_dir + "/train/" + fn)
for fn in listdir(args.data_dir + "/dev"):
filenames.append(args.data_dir + "/dev/" + fn)
for fn in filenames:
with open(fn, "r", encoding="utf-8") as f:
# Loop through each line of the file
for line in f:
parts = line.strip().split("\t")
if len(parts) < 3:
continue
if len(parts) != 3:
raise ValueError("Expect 3 parts, got " + str(len(parts)))
semiotic_class, written, spoken = parts[0], parts[1].strip(), parts[2].strip()
if spoken == "<self>":
continue
semiotic_class = semiotic_class.casefold()
semiotic_vcb[semiotic_class] += 1
classdir = join(args.out_dir, semiotic_class)
if not isdir(classdir):
mkdir(classdir)
src, dst, _, _ = get_src_and_dst_for_alignment(semiotic_class, written, spoken, args.lang)
if src == "" or dst == "":
continue
if len(src.split(" ")) >= 100:
continue
if semiotic_class not in cache_vcb:
cache_vcb[semiotic_class] = Counter()
cache_vcb[semiotic_class][(src, dst)] += 1
for sem in semiotic_vcb:
classdir = join(args.out_dir, sem)
if not isdir(classdir):
raise ValueError("No such directory: " + classdir)
print(classdir, " has ", semiotic_vcb[sem], " instances")
with open(join(classdir, "run.sh"), "w") as out:
out.write("GIZA_PATH=\"" + args.giza_dir + "\"\n")
out.write("MKCLS=\"" + args.mckls_binary + "\"\n")
out.write("\n")
out.write("${GIZA_PATH}/plain2snt.out src dst\n")
out.write("${MKCLS} -m2 -psrc -c15 -Vsrc.classes opt >& mkcls1.log\n")
out.write("${MKCLS} -m2 -pdst -c15 -Vdst.classes opt >& mkcls2.log\n")
out.write("${GIZA_PATH}/snt2cooc.out src.vcb dst.vcb src_dst.snt > src_dst.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S src.vcb -T dst.vcb -C src_dst.snt -coocurrencefile src_dst.cooc -p0 0.98 -o GIZA++ >& GIZA++.log\n"
)
out.write("##reverse direction\n")
out.write("${GIZA_PATH}/snt2cooc.out dst.vcb src.vcb dst_src.snt > dst_src.cooc\n")
out.write(
"${GIZA_PATH}/GIZA++ -S dst.vcb -T src.vcb -C dst_src.snt -coocurrencefile dst_src.cooc -p0 0.98 -o GIZA++reverse >& GIZA++reverse.log\n"
)
out_src = open(join(classdir, "src"), 'w', encoding="utf-8")
out_dst = open(join(classdir, "dst"), 'w', encoding="utf-8")
out_freq = open(join(classdir, "freq"), 'w', encoding="utf-8")
for src, dst in cache_vcb[sem]:
freq = cache_vcb[sem][(src, dst)]
out_src.write(src + "\n")
out_dst.write(dst + "\n")
out_freq.write(str(freq) + "\n")
out_freq.close()
out_dst.close()
out_src.close()
# Main code
if __name__ == '__main__':
for name in listdir(args.out_dir):
path = join(args.out_dir, name)
if isdir(path):
rmtree(path)
# Processing
prepare_subcorpora_from_data()
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_for_alignment.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to combine joined giza alignments and Google Text Normalization dataset
to produce training corpus for the ThutmoseTaggerModel.
"""
import glob
import os
from argparse import ArgumentParser
from collections import Counter
from typing import Dict, Optional, TextIO, Tuple
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import get_src_and_dst_for_alignment
from nemo.utils import logging
parser = ArgumentParser(description="Produce data for the ThutmoseTaggerModel")
parser.add_argument(
"--mode",
required=True,
type=str,
help='Mode, one of ["get_replacement_vocab", "filter_by_vocab", "get_labeled_corpus"]',
)
parser.add_argument(
"--data_dir", required=True, type=str, help='Path to data directory with files like output-00000-of-00100.tsv'
)
parser.add_argument(
"--giza_dir", required=True, type=str, help='Path to directory with class folders like ordinal, date etc'
)
parser.add_argument(
"--alignment_filename", required=True, type=str, help='Name of alignment file, like "itn.out", "itn.out.vocab2000"'
)
parser.add_argument("--out_filename", required=True, type=str, help='Output file')
parser.add_argument("--vocab_filename", required=True, type=str, help='Vocab name')
parser.add_argument("--lang", required=True, type=str, help="Language")
args = parser.parse_args()
def process_file_itn(inputname: str, out: TextIO, keys2replacements: Dict[str, str]) -> None:
"""Processes one file in Google TN Dataset format to get the labeled data for ThutmoseTaggerModel
Args:
inputname: name of input file
out: output stream
keys2replacements: Mapping from (semiotic class, spoken, written) to the segmented written form,
which is aligned one-to-one to spoken words (this is the result obtained from Giza++ alignment pipeline)
"""
words = []
tags = []
semiotic_info = []
sent_is_ok = True
with open(inputname, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("<eos>"):
if sent_is_ok and len(words) > 0:
out.write(" ".join(words) + "\t" + " ".join(tags) + "\t" + ";".join(semiotic_info) + "\n")
words = []
tags = []
semiotic_info = []
sent_is_ok = True
else:
cls, written, spoken = line.strip().split("\t")
if spoken == "sil":
continue
if spoken == "<self>":
words.append(written.casefold())
tags.append("<SELF>")
continue
src, dst, same_begin, same_end = get_src_and_dst_for_alignment(
cls.casefold(), written, spoken, args.lang
)
same_from_begin = [] if same_begin == "" else same_begin.split(" ")
same_from_end = [] if same_end == "" else same_end.split(" ")
key = cls.casefold() + "\t" + src + "\t" + dst
if key in keys2replacements:
replacements = keys2replacements[key].split(" ")
spoken_words = dst.split(" ")
for w, r in zip(
same_from_begin + spoken_words + same_from_end, same_from_begin + replacements + same_from_end
):
words.append(w)
if cls == "LETTERS" or cls == "PLAIN":
if w == r:
tags.append("<SELF>")
else:
tags.append(r)
elif w == r.replace("_", ""):
tags.append("<SELF>")
else:
tags.append(r)
semiotic_info.append(
cls
+ " "
+ str(len(words) - len(spoken_words) - len(same_from_begin) - len(same_from_end))
+ " "
+ str(len(words))
)
else:
sent_is_ok = False
def process_line(semiotic_class: str, line: str) -> Optional[Tuple[str, str, str, int]]:
"""A helper function to read the file with alignment results"""
parts = line.strip().split("\t")
if len(parts) != 6:
return None
freq = int(parts[0])
if parts[1] != "good:":
return None
src, dst, leftside_align, rightside_align = parts[2], parts[3], parts[4], parts[5]
align = rightside_align
if semiotic_class == "letters" or semiotic_class == "plain":
align = leftside_align
return src, dst, align, freq
def get_replacement_vocab() -> None:
"""Loops through the files with alignment results in each semiotic class subfolder, counts frequencies of different
replacement segments.
"""
full_vocab = Counter()
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
for fn in alignment_files:
fn_parts = fn.split("/")
if len(fn_parts) < 2:
raise ValueError("Bad filename: " + fn)
semiotic_class = fn_parts[-2]
class_vocab = Counter()
with open(fn, "r", encoding="utf-8") as f:
for line in f:
t = process_line(semiotic_class, line)
if t is None:
continue
src, dst, replacement, freq = t
inputs = src.split(" ")
replacements = replacement.split(" ")
if len(inputs) != len(replacements):
raise ValueError("Length mismatch in: " + line)
for inp, rep in zip(inputs, replacements):
if inp == rep: # skip same words
continue
full_vocab[rep] += freq
class_vocab[rep] += freq
with open(args.vocab_filename + "." + semiotic_class, "w", encoding="utf-8") as out:
for k, v in class_vocab.most_common(1000000000):
out.write(k + "\t" + str(v) + "\n")
with open(args.vocab_filename, "w", encoding="utf-8") as out:
for k, v in full_vocab.most_common(1000000000):
out.write(k + "\t" + str(v) + "\n")
def filter_by_vocab() -> None:
"""Given a restricted vocabulary of replacements,
loops through the files with alignment results in each semiotic class subfolder,
discards the examples containing a replacement which is not in our restricted vocabulary.
"""
if not os.path.exists(args.vocab_filename):
raise ValueError(f"Alignments dir {args.giza_dir} does not exist")
# load vocab from file
vocab = {}
with open(args.vocab_filename, "r", encoding="utf-8") as f:
for line in f:
k, v = line.strip().split("\t")
vocab[k] = int(v)
print("len(vocab)=", len(vocab))
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
for fn in alignment_files:
fn_parts = fn.split("/")
if len(fn_parts) < 2:
raise ValueError("Bad filename: " + fn)
semiotic_class = fn_parts[-2]
out = open(args.giza_dir + "/" + semiotic_class + "/" + args.out_filename, "w", encoding="utf-8")
with open(fn, "r", encoding="utf-8") as f:
for line in f:
t = process_line(semiotic_class, line)
if t is None:
continue
src, dst, replacement, freq = t
ok = True
for s, r in zip(src.split(" "), replacement.split(" ")):
if s != r and r not in vocab:
ok = False
if ok:
out.write(semiotic_class + "\t" + src + "\t" + dst + "\t" + replacement + "\n")
out.close()
def get_labeled_corpus() -> None:
"""Loops through the files with alignment results in each semiotic class subfolder,
collects a mapping from (semiotic class, spoken, written) to the segmented written form,
which is aligned one-to-one to spoken words.
Then loops through the files in Google TN Dataset format to get the labeled data for ThutmoseTaggerModel.
It extracts the whole sentences and substitutes the semiotic spans to their aligned form from the dictionary.
"""
if not os.path.exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
keys2replacements = {}
alignment_files = glob.glob(args.giza_dir + "/*/" + args.alignment_filename)
if len(alignment_files) == 0:
raise ValueError("Did not found any such files: " + args.giza_dir + "/*/" + args.alignment_filename)
for af in alignment_files:
with open(af, "r", encoding="utf-8") as f:
for line in f:
cls, src, dst, replacements = line.strip().split("\t")
key = cls + "\t" + dst + "\t" + src
if key in keys2replacements and keys2replacements[key] != replacements:
logging.warning("keys2replacements[key] != replacements", keys2replacements[key], replacements)
keys2replacements[key] = replacements
print("size of phrase-to-replacements dictionary =", len(keys2replacements))
out = open(args.out_filename, "w", encoding="utf-8")
input_paths = sorted([os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir)])
for inputname in input_paths:
process_file_itn(inputname, out, keys2replacements)
out.close()
def main() -> None:
if not os.path.exists(args.giza_dir):
raise ValueError(f"Alignments dir {args.giza_dir} does not exist")
if args.mode == "get_replacement_vocab":
get_replacement_vocab()
elif args.mode == "filter_by_vocab":
filter_by_vocab()
elif args.mode == "get_labeled_corpus":
get_labeled_corpus()
else:
raise ValueError("unknown mode: " + args.mode)
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/prepare_corpora_after_alignment.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to filter sentences containing bad examples from Google TN Dataset.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import exists, isfile, join
from typing import Set
parser = ArgumentParser(description="Filter Google TN Dataset by error vocabulary")
parser.add_argument(
"--data_dir", required=True, type=str, help='Path to data directory with files like output-00000-of-00100.tsv'
)
parser.add_argument(
"--out_dir", required=True, type=str, help='Output data directory, same files (with some sentences filtered)'
)
parser.add_argument("--errors_vocab_filename", required=True, type=str, help='File with error vocabulary')
parser.add_argument("--lang", required=True, type=str, help="Language")
args = parser.parse_args()
def filter_file(inp_filename: str, out_filename: str, error_vcb: Set) -> None:
"""Filter out whole sentences containing bad itn conversions. The output format is the same as input.
Args:
inp_filename: Name of input file in Google TN Dataset format.
out_filename: Name of output file in Google TN Dataset format.
error_vcb: Set of tuples with erroneous conversion, e.g. ("CARDINAL", "two", "132")
"""
out = open(out_filename, "w", encoding="utf-8")
sent_lines = []
sent_is_ok = True
with open(inp_filename, "r", encoding="utf-8") as f:
for line in f:
sent_lines.append(line.strip())
if line.startswith("<eos>"):
if sent_is_ok and len(sent_lines) > 1: # there should be at least one line except <eos>
out.write("\n".join(sent_lines) + "\n")
sent_lines = []
sent_is_ok = True
else:
cls, written, spoken = line.strip().split("\t")
k = (cls, spoken.casefold(), written.casefold())
if k in error_vcb:
sent_is_ok = False
out.close()
def main() -> None:
if not exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
# load errors vocabulary
error_vcb = set()
with open(args.errors_vocab_filename, "r", encoding="utf-8") as f:
for line in f:
cls, spoken, written = line.strip().split("\t")
k = (cls, spoken, written)
error_vcb.add(k)
for subdir in listdir(args.data_dir):
mkdir(join(args.out_dir, subdir))
for filename in listdir(join(args.data_dir, subdir)):
if not filename.startswith('output'):
continue
inp_filename = join(args.data_dir, subdir, filename)
out_filename = join(args.out_dir, subdir, filename)
if not isfile(inp_filename):
continue
filter_file(inp_filename, out_filename, error_vcb)
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/filter_sentences_with_errors.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used after GIZA++ alignment to extract final alignments for each semiotic class.
"""
import re
from argparse import ArgumentParser
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import (
check_monotonicity,
fill_alignment_matrix,
get_targets,
get_targets_from_back,
)
parser = ArgumentParser(description='Extract final alignments from GIZA++ alignments')
parser.add_argument('--mode', type=str, required=True, help='tn or itn')
parser.add_argument('--giza_dir', type=str, required=True, help='Path to folder with GIZA++ alignment')
parser.add_argument(
'--giza_suffix', type=str, required=True, help='suffix of alignment files, e.g. \"Ahmm.5\", \"A3.final\"'
)
parser.add_argument('--out_filename', type=str, required=True, help='Output file')
parser.add_argument('--lang', type=str, required=True, help="Language")
args = parser.parse_args()
def main() -> None:
g = open(args.giza_dir + "/GIZA++." + args.giza_suffix, "r", encoding="utf-8")
f = open(args.giza_dir + "/GIZA++reverse." + args.giza_suffix, "r", encoding="utf-8")
target_inner_delimiter = ""
if args.mode == "tn":
g, f = f, g
target_inner_delimiter = "_"
out = open(args.giza_dir + "/" + args.out_filename, "w", encoding="utf-8")
cache = {}
good_count, not_mono_count, not_covered_count, exception_count = 0, 0, 0, 0
n = 0
while True:
n += 3
if n % 10000 == 0:
print(n, "lines processed")
fline1 = f.readline().strip()
fline2 = f.readline().strip()
fline3 = f.readline().strip()
gline1 = g.readline().strip()
gline2 = g.readline().strip()
gline3 = g.readline().strip()
if fline1 == "" and gline1 == "":
break
cache_key = fline1 + "\t" + fline2 + "\t" + gline1 + "\t" + gline2
if cache_key in cache:
out.write(cache[cache_key] + "\n")
continue
if fline1 == "" or gline1 == "" or fline2 == "" or gline2 == "" or fline3 == "" or gline3 == "":
raise ValueError("Empty line: " + str(n))
try:
matrix, srctokens, dsttokens = fill_alignment_matrix(fline2, fline3, gline2, gline3)
except Exception:
print(fline1)
print(fline2)
print(fline3)
print(gline1)
print(gline2)
print(gline3)
exception_count += 1
out_str = "-exception:\t" + fline2 + "\t" + gline2
out.write(out_str + "\n")
continue
else:
matrix[matrix <= 2] = 0 # leave only 1-to-1 alignment points
if check_monotonicity(matrix):
targets = get_targets(matrix, dsttokens, delimiter=target_inner_delimiter)
targets_from_back = get_targets_from_back(matrix, dsttokens, delimiter=target_inner_delimiter)
if len(targets) != len(srctokens):
raise ValueError(
"targets length doesn't match srctokens length: len(targets)="
+ str(len(targets))
+ "; len(srctokens)="
+ str(len(srctokens))
)
leftside_align = " ".join(targets)
rightside_align = " ".join(targets_from_back)
rightside_align = rightside_align.replace("<DELETE> <DELETE> _11100_", "_11 <DELETE> 100_")
leftside_align = leftside_align.replace("<DELETE> <DELETE> _11100_", "_11 <DELETE> 100_")
# _1 4000_ => _14 000_
# 1 5,000 => 15 ,000
rightside_align = re.sub(r"^_1 ([\d])(,?000)", r"_1\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"^_1 ([\d])(,?000)", r"_1\g<1> \g<2>", leftside_align)
# "_2 10 0_" => "_2 <DELETE> 100_"
rightside_align = re.sub(r"([\d]) 10 0_", r"\g<1> <DELETE> 100_", rightside_align)
leftside_align = re.sub(r"([\d]) 10 0_", r"\g<1> <DELETE> 100_", leftside_align)
if srctokens[0] in [
"ten",
"twenty",
"thirty",
"forty",
"fifty",
"sixty",
"seventy",
"eighty",
"ninety",
]:
# ten thousand sixty _1 00 60_ => _10 0 60_
rightside_align = re.sub(r"^(_\d) 00 (\d)", r"\g<1>0 0 \g<2>", rightside_align)
leftside_align = re.sub(r"^(_\d) 00 (\d)", r"\g<1>0 0 \g<2>", leftside_align)
# ten thousand sixty three _1 0, 06 3_ => _10 ,0 6 3_
rightside_align = re.sub(r"([ _]\d) 0, 0(\d)", r"\g<1>0 ,0 \g<2>", rightside_align)
leftside_align = re.sub(r"([ _]\d) 0, 0(\d)", r"\g<1>0 ,0 \g<2>", leftside_align)
# _3 0, 7 7 4=> _30 , 7 7 4_
rightside_align = re.sub(r"(\d) 0, ", r"\g<1>0 , ", rightside_align)
leftside_align = re.sub(r"(\d) 0, ", r"\g<1>0 , ", leftside_align)
# _1 1, 1 <DELETE> 40_ => _11 , 1 <DELETE> 40_
rightside_align = re.sub(r"1 1, (\d)", r"11 , \g<1>", rightside_align)
leftside_align = re.sub(r"1 1, (\d)", r"11 , \g<1>", leftside_align)
if re.match(r".+надцат", srctokens[0]) or srctokens[0] in [
"ten",
"eleven",
"twelve",
"thirteen",
"fourteen",
"fifteen",
"sixteen",
"seventeen",
"eighteen",
"nineteen",
]:
# "_1 <DELETE> 12 14_" -> "_11 <DELETE> 2 14_"
rightside_align = re.sub(
r"^(_1) (<DELETE>) ([\d])([\d])", r"\g<1>\g<3> \g<2> \g<4>", rightside_align
)
leftside_align = re.sub(
r"^(_1) (<DELETE>) ([\d])([\d])", r"\g<1>\g<3> \g<2> \g<4>", leftside_align
)
# "_1 10 10_" -> "_11 0 10_"
rightside_align = re.sub(r"^_1 ([\d])0 ([\d] ?[\d])", r"_1\g<1> 0 \g<2>", rightside_align)
leftside_align = re.sub(r"^_1 ([\d])0 ([\d] ?[\d])", r"_1\g<1> 0 \g<2>", leftside_align)
if args.giza_dir.endswith("decimal") and args.lang == "ru":
# "_1 <DELETE> 0, 5_" => "_10 <DELETE> , 5_" #десять целых и пять десятых
rightside_align = re.sub(
r"(\d) (<DELETE>) ([0123456789])(,) ([\d])", r"\g<1>\g<3> \g<2> \g<4> \g<5>", rightside_align
)
leftside_align = re.sub(
r"(\d) (<DELETE>) ([0123456789])(,) ([\d])", r"\g<1>\g<3> \g<2> \g<4> \g<5>", leftside_align
)
if args.giza_dir.endswith("decimal") and args.lang == "en":
# "_7 0. 7_" => _70 . 7_
rightside_align = re.sub(r"^(_\d) 0\. (\d)", r"\g<1>0 . \g<2>", rightside_align)
leftside_align = re.sub(r"^(_\d) 0\. (\d)", r"\g<1>0 . \g<2>", leftside_align)
if args.giza_dir.endswith("money") and args.lang == "en":
# "_1 , 000__£<<" => "_1 ,000_ _£<<"
rightside_align = re.sub(r"(\d) , 000_(_[£$€])", r"\g<1> ,000_ \g<2>", rightside_align)
leftside_align = re.sub(r"(\d) , 000_(_[£$€])", r"\g<1> ,000_ \g<2>", leftside_align)
if args.giza_dir.endswith("money"):
# "_5 <DELETE> 000000__иен_" => "_5 000000_ _иен_"
rightside_align = re.sub(
r"([\d]) <DELETE> 000000_(_[^\d])", r"\g<1> 000000_ \g<2>", rightside_align
)
leftside_align = re.sub(r"([\d]) <DELETE> 000000_(_[^\d])", r"\g<1> 000000_ \g<2>", leftside_align)
# _5_ <DELETE> _m__£<< => "_5_ _m_ _£<<"
rightside_align = re.sub(
r"([\d]_) <DELETE> (_[mk]_)(_[^\d])", r"\g<1> \g<2> \g<3>", rightside_align
)
leftside_align = re.sub(r"([\d]_) <DELETE> (_[mk]_)(_[^\d])", r"\g<1> \g<2> \g<3>", leftside_align)
# "_3 <DELETE> 0__m__£<<" => "_30 _m_ _£<<"
rightside_align = re.sub(
r"([\d]) <DELETE> 0_(_[mk]_)(_[^\d])", r"\g<1>0 \g<2> \g<3>", rightside_align
)
leftside_align = re.sub(
r"([\d]) <DELETE> 0_(_[mk]_)(_[^\d])", r"\g<1>0 \g<2> \g<3>", leftside_align
)
# "_15 <DELETE> 000__руб._" => "_15 000_ _руб._"
rightside_align = re.sub(r"([\d]) <DELETE> (000_)(_[^\d])", r"\g<1> \g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) <DELETE> (000_)(_[^\d])", r"\g<1> \g<2> \g<3>", leftside_align)
# "_2 5 0 000__$<<" => "_2 50 000_ _$<<"
rightside_align = re.sub(r"([\d]) 0 000_(_[^\d])", r"\g<1>0 000_ \g<2>", rightside_align)
leftside_align = re.sub(r"([\d]) 0 000_(_[^\d])", r"\g<1>0 000_ \g<2>", leftside_align)
# "_5 0 0000__$_" => "_500 000_ _$_"
rightside_align = re.sub(r"([\d]) 0 0000_(_[^\d])", r"\g<1>00 000_ \g<2>", rightside_align)
leftside_align = re.sub(r"([\d]) 0 0000_(_[^\d])", r"\g<1>00 000_ \g<2>", leftside_align)
# "_1 000__руб._" => "_1000_ _руб._"
rightside_align = re.sub(r"_1 000_(_[^\d])", r"_1000_ \g<1>", rightside_align)
leftside_align = re.sub(r"_1 000_(_[^\d])", r"_1000_ \g<1>", leftside_align)
# replace cases like "2 0__января" with "20_ _января"
leftside_align = re.sub(r"([\d]) (00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", rightside_align)
# "_3 <DELETE> 0__september_ _2 014_" => "_30_ <DELETE> _september_ _2 014_"
# "_3 <DELETE> 00__тыс.__руб._" => "_300_ <DELETE> _тыс.__руб._"
leftside_align = re.sub(
r"([\d]) <DELETE> (00?_)(_[^\d])", r"\g<1>\g<2> <DELETE> \g<3>", leftside_align
)
rightside_align = re.sub(
r"([\d]) <DELETE> (00?_)(_[^\d])", r"\g<1>\g<2> <DELETE> \g<3>", rightside_align
)
# "_october_ _2 0,2 015_" => "_october_ _20 ,2 015_"
leftside_align = re.sub(r"([\d]) (0),(\d)", r"\g<1>\g<2> ,\g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (0),(\d)", r"\g<1>\g<2> ,\g<3>", rightside_align)
# "_3 0_.10. _1 9 4 3_" => "_30_ .10. _1 9 4 3_"
leftside_align = re.sub(r"([\d]) (0_)(\.[\d])", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) (0_)(\.[\d])", r"\g<1>\g<2> \g<3>", rightside_align)
# replace cases like "_1 0000_" with "_10 000_"
# replace cases like "_5 00000_" with "_500 000_"
rightside_align = re.sub(r"([\d]) ([0][0]?)(000000000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000000000_)", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) ([0][0]?)(000000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000000_)", r"\g<1>\g<2> \g<3>", leftside_align)
rightside_align = re.sub(r"([\d]) ([0][0]?)(000_)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?)(000_)", r"\g<1>\g<2> \g<3>", leftside_align)
# "_4 00,000_" -> "_400 ,000_"
rightside_align = re.sub(r"([\d]) ([0][0]?),(000_)", r"\g<1>\g<2> ,\g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([0][0]?),(000_)", r"\g<1>\g<2> ,\g<3>", leftside_align)
# "_9 3 ,0__²_> _км_" => "_9 3 ,0__²_> _км_"
rightside_align = re.sub(r"([\d]) (,00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) (,00?_)(_[^\d])", r"\g<1>\g<2> \g<3>", leftside_align)
# "_0 <DELETE> , <DELETE> <DELETE> 01__г_" => "_0 <DELETE> , 01 <DELETE> _г_"
rightside_align = re.sub(
r"(,) <DELETE> <DELETE> 01_(_[^\d])", r"\g<1> 01_ <DELETE> \g<2>", rightside_align
)
leftside_align = re.sub(
r"(,) <DELETE> <DELETE> 01_(_[^\d])", r"\g<1> 01_ <DELETE> \g<2>", leftside_align
)
# "_0 <DELETE> , 7 6 <DELETE> <DELETE> 1__км_" => "_0 <DELETE> , 7 6 1_ <DELETE> _км_"
rightside_align = re.sub(
r"(,) (\d) (\d) <DELETE> <DELETE> 1_(_[^\d])",
r"\g<1> \g<2> \g<3> 1_ <DELETE> \g<4>",
rightside_align,
)
leftside_align = re.sub(
r"(,) (\d) (\d) <DELETE> <DELETE> 1_(_[^\d])",
r"\g<1> \g<2> \g<3> 1_ <DELETE> \g<4>",
leftside_align,
)
# "_5 <DELETE> 0000__рублей_" => "_50 000_ рублей"
rightside_align = re.sub(
r"([\d]) <DELETE> ([0][0]?)(000_)(_)", r"\g<1>\g<2> \g<3> \g<4>", rightside_align
)
leftside_align = re.sub(
r"([\d]) <DELETE> ([0][0]?)(000_)(_)", r"\g<1>\g<2> \g<3> \g<4>", leftside_align
)
# "_1 <DELETE> 115_" -> "_1 1 15_"
rightside_align = re.sub(r"<DELETE> ([1])([1][\d])", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> ([1])([1][\d])", r"\g<1> \g<2>", leftside_align)
# "_1 <DELETE> 990-х_" -> "_1 9 90-х_"
rightside_align = re.sub(r"<DELETE> (9)(90)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (9)(90)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (8)(80)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (8)(80)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (7)(70)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (7)(70)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (6)(60)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (6)(60)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (5)(50)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (5)(50)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (4)(40)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (4)(40)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (3)(30)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (3)(30)", r"\g<1> \g<2>", leftside_align)
rightside_align = re.sub(r"<DELETE> (2)(20)", r"\g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> (2)(20)", r"\g<1> \g<2>", leftside_align)
# восемь ноль ноль ноль ноль ноль ноль ноль _8 0 0 0 0 0 0 0_
# _8 <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_
rightside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_",
r"0 0 0 0 0 0 0_",
rightside_align,
)
leftside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 0000000_",
r"0 0 0 0 0 0 0_",
leftside_align,
)
# _8 <DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_
rightside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_", r"0 0 0 0 0 0_", rightside_align
)
leftside_align = re.sub(
r"<DELETE> <DELETE> <DELETE> <DELETE> <DELETE> 000000_", r"0 0 0 0 0 0_", leftside_align
)
# _8 <DELETE> <DELETE> <DELETE> <DELETE> 00000_
rightside_align = re.sub(r"<DELETE> <DELETE> <DELETE> <DELETE> 00000_", r"0 0 0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> <DELETE> <DELETE> 00000_", r"0 0 0 0 0_", leftside_align)
# _8 <DELETE> <DELETE> <DELETE> 0000_
rightside_align = re.sub(r"<DELETE> <DELETE> <DELETE> 0000_", r"0 0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> <DELETE> 0000_", r"0 0 0 0_", leftside_align)
# _8 <DELETE> <DELETE> 000_
rightside_align = re.sub(r"<DELETE> <DELETE> 000_", r"0 0 0_", rightside_align)
leftside_align = re.sub(r"<DELETE> <DELETE> 000_", r"0 0 0_", leftside_align)
# "_2 <DELETE> <DELETE> 010/11" => "_2 0 10 /11"
rightside_align = re.sub(
r"<DELETE> <DELETE> (0)([1][\d])/([\d])", r"\g<1> \g<2> /\g<3>", rightside_align
)
leftside_align = re.sub(
r"<DELETE> <DELETE> (0)([1][\d])/([\d])", r"\g<1> \g<2> /\g<3>", leftside_align
)
# "_2 0 <DELETE> 11/12_" => "_2 0 11 /12_"
rightside_align = re.sub(r"<DELETE> ([\d]+)/([\d])", r"\g<1> /\g<2>", rightside_align)
leftside_align = re.sub(r"<DELETE> ([\d]+)/([\d])", r"\g<1> /\g<2>", leftside_align)
# "_2 0 1 0/2 0 11_" => "_2 0 10 /2 0 11_"
rightside_align = re.sub(r"([\d]) ([\d]+)/([\d])", r"\g<1>\g<2> /\g<3>", rightside_align)
leftside_align = re.sub(r"([\d]) ([\d]+)/([\d])", r"\g<1>\g<2> /\g<3>", leftside_align)
# "_5 0%_" => "_50 %_"
# "_1 00%_" => "_100 %_"
# "_1 00,00%_" => "_100,00 %_"
rightside_align = re.sub(r"([\d]) ([0,]+)%", r"\g<1>\g<2> %", rightside_align)
leftside_align = re.sub(r"([\d]) ([0,]+)%", r"\g<1>\g<2> %", leftside_align)
# ATTENTION: keep the order of next two rules
# "_2 0½_" => "_20 ½_"
rightside_align = re.sub(r"([\d]) ([\d]+)½", r"\g<1>\g<2> ½", rightside_align)
leftside_align = re.sub(r"([\d]) ([\d]+)½", r"\g<1>\g<2> ½", leftside_align)
# "_1 ½_ <DELETE> <DELETE> <DELETE>" => "_1 <DELETE> <DELETE> <DELETE> ½_" #одна целая и одна вторая
rightside_align = re.sub(
r"([\d]) (_?½_)? <DELETE> <DELETE> <DELETE>",
r"\g<1> <DELETE> <DELETE> <DELETE> \g<2>",
rightside_align,
)
leftside_align = re.sub(
r"([\d]) (_?½_)? <DELETE> <DELETE> <DELETE>",
r"\g<1> <DELETE> <DELETE> <DELETE> \g<2>",
leftside_align,
)
if args.lang == "en" and srctokens[-1] == "half":
# _2 <DELETE> 1/ 2_ => _2 <DELETE> <DELETE> ½_
rightside_align = re.sub(r"(\d) <DELETE> 1/ 2_$", r"\g<1> <DELETE> <DELETE> ½_", rightside_align)
leftside_align = re.sub(r"(\d) <DELETE> 1/ 2_$", r"\g<1> <DELETE> <DELETE> ½_", leftside_align)
# "_1 50_ <DELETE> _тыс.__руб._" => "_1 50_ _тыс._ _руб._"
rightside_align = re.sub(r"_ <DELETE> (_[^\d]+_)(_[^\d]+_)", r"_ \g<1> \g<2>", rightside_align)
leftside_align = re.sub(r"_ <DELETE> (_[^\d]+_)(_[^\d]+_)", r"_ \g<1> \g<2>", leftside_align)
# _1000 000__$_ => "_1000000_ _$_"
rightside_align = re.sub(r"_1000 000_(_[^\d])", r"_1000000_ \g<1>", rightside_align)
leftside_align = re.sub(r"_1000 000_(_[^\d])", r"_1000000_ \g<1>", leftside_align)
if args.giza_dir.endswith("date") and args.lang == "en":
# "_1 2_ <DELETE> _november_ _2 014_" => " <DELETE> _12_ <DELETE> _november_ _2 014_"
if srctokens[0] == "the":
leftside_align = re.sub(r"^_1 (\d_)", r"<DELETE> _1\g<1>", leftside_align)
rightside_align = re.sub(r"^_1 (\d_)", r"<DELETE> _1\g<1>", rightside_align)
# "<DELETE> <DELETE> _12,2012_" => "_12_ ,20 12_"
leftside_align = re.sub(r"^<DELETE> <DELETE> _12,2012_", r"_12_ ,20 12_", leftside_align)
rightside_align = re.sub(r"^<DELETE> <DELETE> _12,2012_", r"_12_ ,20 12_", rightside_align)
# "<DELETE> _1,20 14_" => "_1 ,20 14_"
leftside_align = re.sub(r"^<DELETE> _1,(\d)", r"_1 ,\g<1>", leftside_align)
rightside_align = re.sub(r"^<DELETE> _1,(\d)", r"_1 ,\g<1>", rightside_align)
# "_2 <DELETE> 1,20 14_" => "_2 1 ,20 14_"
leftside_align = re.sub(r"<DELETE> 1,(\d)", r"1 ,\g<1>", leftside_align)
rightside_align = re.sub(r"<DELETE> 1,(\d)", r"1 ,\g<1>", rightside_align)
# <DELETE> _11,19 9 7_ => _11 ,19 9 7_
leftside_align = re.sub(r"<DELETE> _11,(\d)", r"_11 ,\g<1>", leftside_align)
rightside_align = re.sub(r"<DELETE> _11,(\d)", r"_11 ,\g<1>", rightside_align)
if len(srctokens) >= 2 and srctokens[-2] == "twenty":
# "<DELETE> <DELETE> _12,200 9_" => "_12 ,20 09_"
leftside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,20 0\g<1>", leftside_align
)
rightside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,20 0\g<1>", rightside_align
)
# "_april_ _2 015_" => "_april_ _20 15_"
leftside_align = re.sub(r"2 0(\d\d_)$", r"20 \g<1>", leftside_align)
rightside_align = re.sub(r"2 0(\d\d_)$", r"20 \g<1>", rightside_align)
elif len(srctokens) >= 2 and srctokens[-2] == "thousand":
# "<DELETE> <DELETE> _12,200 9_" => "_12 ,2 00 9_"
leftside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,2 00 \g<1>", leftside_align
)
rightside_align = re.sub(
r"^<DELETE> <DELETE> _12,200 (\d_)", r"_12_ ,2 00 \g<1>", rightside_align
)
# thirtieth twenty fifteen _3 0th__,20 15_ => _30th_ _,20 15_
leftside_align = re.sub(r"(\d) 0th_(_,\d)", r"\g<1>0th_ \g<2>", leftside_align)
rightside_align = re.sub(r"(\d) 0th_(_,\d)", r"\g<1>0th_ \g<2>", rightside_align)
if args.giza_dir.endswith("date") and args.lang == "ru":
# тысяча девятьсот шестидесятого года _1 9 6 0_ => _1 9 60_ <DELETE>
if srctokens[-1] == "года":
leftside_align = re.sub(r"(\d) 0_", r"\g<1>0_ <DELETE>", leftside_align)
rightside_align = re.sub(r"(\d) 0_", r"\g<1>0_ <DELETE>", rightside_align)
if args.giza_dir.endswith("time"):
if srctokens[-1] == "hundred":
# fifteen hundred <DELETE> _15:00_
rightside_align = re.sub(r"<DELETE> (_\d\d:)00_", r"\g<1> 00_", rightside_align)
leftside_align = re.sub(r"<DELETE> (_\d\d:)00_", r"\g<1> 00_", leftside_align)
# !! Do not change the order of next two rules
# twenty one hundred _2 1:00_ <DELETE>
rightside_align = re.sub(r"(_\d) (\d:)00_ <DELETE>", r"\g<1> \g<2> 00_", rightside_align)
leftside_align = re.sub(r"(_\d) (\d:)00_ <DELETE>", r"\g<1> \g<2> 00_", leftside_align)
# twenty hundred _2 0:00_
rightside_align = re.sub(r"(_\d) (\d:)00_", r"\g<1>\g<2> 00_", rightside_align)
leftside_align = re.sub(r"(_\d) (\d:)00_", r"\g<1>\g<2> 00_", leftside_align)
if srctokens[-1] == "o'clock":
# nine o'clock <DELETE> _09:00_ => "_09:00_ <DELETE>"
rightside_align = re.sub(r"^<DELETE> ([^ ])$", r"\g<1> <DELETE>", rightside_align)
leftside_align = re.sub(r"^<DELETE> ([^ ])$", r"\g<1> <DELETE>", leftside_align)
# "_1 1:3 3_" => "_11: 3 3_"
rightside_align = re.sub(r"_(\d) (\d:)(\d)", r"\g<1>\g<2> \g<3>", rightside_align)
leftside_align = re.sub(r"_(\d) (\d:)(\d)", r"\g<1>\g<2> \g<3>", leftside_align)
ban = False
if args.giza_dir.endswith("ordinal"):
if dsttokens[0] == "_—": # тысяча девятьсот сорок пятом _— 1 9 4 5_
ban = True
# ban roman numbers with at least two symbols, because we do not split them to parts
for t in rightside_align.split():
if re.match(r"^_?[ivxl][ivxl]+_?$", t):
ban = True
# ban cases like "_11/05/2013_", "_2005-11-25_", because they are source of incorrect alignments
if args.giza_dir.endswith("date") and args.lang == "en":
if "/" in rightside_align or "-" in rightside_align:
ban = True
# ban brackets
if "(" in rightside_align or ")" in rightside_align:
ban = True
if ban:
out_str = (
"ban:\t"
+ " ".join(srctokens)
+ "\t"
+ " ".join(dsttokens)
+ "\t"
+ leftside_align
+ "\t"
+ rightside_align
)
else:
out_str = (
"good:\t"
+ " ".join(srctokens)
+ "\t"
+ " ".join(dsttokens)
+ "\t"
+ leftside_align
+ "\t"
+ rightside_align
)
out.write(out_str + "\n")
cache[cache_key] = out_str
else:
out_str = "-mon:\t" + " ".join(srctokens) + "\t" + " ".join(dsttokens)
out.write(out_str + "\n")
cache[cache_key] = out_str
not_mono_count += 1
f.close()
g.close()
out.close()
# Main code
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/dataset_preparation/extract_giza_alignments.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to construct a vocabulary of multiple references
"""
from argparse import ArgumentParser
from collections import Counter
from os import listdir
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
parser = ArgumentParser(description="Get reference vocabulary from corpus (it will be used in testing)")
parser.add_argument("--data_dir", type=str, required=True, help="Path to folder with data")
parser.add_argument("--out_filename", type=str, required=True, help="Path to output file")
args = parser.parse_args()
if __name__ == "__main__":
vcb = {}
filenames = []
for fn in listdir(args.data_dir + "/train"):
filenames.append(args.data_dir + "/train/" + fn)
for fn in listdir(args.data_dir + "/dev"):
filenames.append(args.data_dir + "/dev/" + fn)
for fn in filenames:
print("Processing ", fn)
with open(fn, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) < 3:
continue
if len(parts) != 3:
raise ValueError("Expect 3 parts, got " + str(len(parts)))
semiotic_class, written, spoken = parts[0], parts[1].strip().casefold(), parts[2].strip().casefold()
spoken = spoken_preprocessing(spoken)
if spoken == "<self>":
continue
if spoken == "" or written == "":
continue
if len(spoken.split(" ")) >= 100:
continue
k = (semiotic_class, spoken)
if k not in vcb:
vcb[k] = Counter()
vcb[k][written] += 1
with open(args.out_filename, "w", encoding="utf-8") as out:
for sem, spoken in vcb:
for written in vcb[(sem, spoken)]:
out.write(sem + "\t" + spoken + "\t" + written + "\t" + str(vcb[(sem, spoken)][written]) + "\n")
out.write(sem + "\t" + spoken + "\t" + spoken + "\t1\n")
| NeMo-main | examples/nlp/text_normalization_as_tagging/evaluation/get_multi_reference_vocab.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to prepare test corpus for the ThutmoseTaggerModel from Google Text Normalization dataset.
"""
import os
import re
from argparse import ArgumentParser
from collections import Counter
from typing import Dict, TextIO, Tuple
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import spoken_preprocessing
parser = ArgumentParser(description="Text Normalization Data Preprocessing for English")
parser.add_argument(
"--data_dir", required=True, type=str, help="Path to data directory with files like output-00000-of-00100.tsv"
)
parser.add_argument("--reference_vocab", required=True, type=str, help="Multi Reference vocabulary")
parser.add_argument("--output_file", required=True, type=str, help="Output file")
parser.add_argument(
"--sampling_count", required=True, type=int, help="Number of examples per class, you want, use -1 for all examples"
)
args = parser.parse_args()
def process_file(
inputname: str,
out: TextIO,
out_raw: TextIO,
reference_vcb: Dict[Tuple[str, str], Dict[str, int]],
sampling_vcb: Dict[str, int],
) -> None:
words = []
reference_words = [] # size may be different
semiotic_info = []
raw_lines = []
sent_ok = True if args.sampling_count == -1 else False
with open(inputname, "r", encoding="utf-8") as f:
for line in f:
if line.startswith("<eos>"):
if len(words) > 0 and sent_ok:
out.write(
" ".join(words) + "\t" + " ".join(reference_words) + "\t" + ";".join(semiotic_info) + "\n"
)
out_raw.write("\n".join(raw_lines) + "\n" + line)
words = []
reference_words = []
semiotic_info = []
raw_lines = []
sent_ok = True if args.sampling_count == -1 else False
else:
raw_lines.append(line.strip())
cls, written, spoken = line.strip().split("\t")
spoken = spoken_preprocessing(spoken)
written = written.casefold()
references = set()
if spoken == "sil":
continue
if spoken == "<self>":
words.append(written)
reference_words.append(written)
# if reference is <self>, but the word has itn conversions in our dictionary, add them
for cls in ["CARDINAL", "ORDINAL", "DATE"]: # date, ex sixties -> 60s
k = (cls, written)
if k in reference_vcb:
for tr_variant in reference_vcb[k]:
references.add(tr_variant)
semiotic_info.append(
cls
+ " "
+ str(len(words) - 1)
+ " "
+ str(len(words))
+ " | "
+ " | ".join(references)
)
break
continue
spoken_words = spoken.split()
words.extend(spoken_words)
k = (cls, spoken)
if k in reference_vcb:
for tr_variant in reference_vcb[k]:
references.add(tr_variant)
references.add(spoken)
references.add(written)
for tr_variant in list(references):
# 6,51 km² => 6,51 km 2
(tr_variant2, n2) = re.subn(r"²", " 2", tr_variant)
(tr_variant3, n3) = re.subn(r"³", " 3", tr_variant)
if n2 > 0:
references.add(tr_variant2)
if n3 > 0:
references.add(tr_variant3)
semiotic_info.append(
cls
+ " "
+ str(len(words) - len(spoken_words))
+ " "
+ str(len(words))
+ " | "
+ " | ".join(list(references))
)
reference_words.append(written.casefold())
if cls not in sampling_vcb:
sampling_vcb[cls] = 0
if sampling_vcb[cls] < args.sampling_count:
sent_ok = True
sampling_vcb[cls] += 1
def main() -> None:
if not os.path.exists(args.data_dir):
raise ValueError(f"Data dir {args.data_dir} does not exist")
reference_vcb = {}
with open(args.reference_vocab, "r", encoding="utf-8") as f:
for line in f:
sem, spoken, written, freq = line.strip().split("\t")
k = (sem, spoken)
if k not in reference_vcb:
reference_vcb[k] = {}
reference_vcb[k][written] = int(freq)
sampling_vcb = Counter()
out = open(args.output_file, "w", encoding="utf-8")
out_raw = open(args.output_file + ".raw", "w", encoding="utf-8")
input_paths = sorted([os.path.join(args.data_dir, f) for f in os.listdir(args.data_dir)])
for inputname in input_paths:
process_file(inputname, out, out_raw, reference_vcb, sampling_vcb)
out.close()
out_raw.close()
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text_normalization_as_tagging/evaluation/prepare_corpora_for_testing.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to compare the inference output of Thutmose tagger with multi_reference file.
The additional report is stored to a separate file for each semiotic class.
USAGE Example:
python eval_per_class.py \
--inference_file= \
--reference_file= \
--output_file=
The inference file is a tsv file in which the first column contains the predicted sentence text.
The reference file is a tsv file in which
the first column contains the input sentence text,
the second column contains the reference sentence text (taken from Google TN dataset)
the third column (optional) contains additional acceptable references for semiotic spans in this sentence.
E.g.
mizoguchi akiko september twenty ten mizoguchi akiko september 2010 DATE 2 5 | sept 2010 | sep. 2010 ...
The script generates:
a file with report on accuracy per semiotiotic class (output_file).
files (<output_file>.<semiotic_class>) with sentences, containing errors in this semiotic span.
"""
import glob
import os
from argparse import ArgumentParser
from collections import Counter
parser = ArgumentParser(description="Compare inference output with multi-reference, print report per class")
parser.add_argument("--inference_file", type=str, required=True, help="Path to inference file 1")
parser.add_argument("--reference_file", type=str, required=True, help="Path to reference file")
parser.add_argument("--output_file", type=str, required=True, help="Path to output file")
args = parser.parse_args()
# Main code
if __name__ == '__main__':
# delete all class-specific reports, as they are created in the append mode
for f in glob.glob(args.output_file + ".*"):
os.remove(f)
total_count = Counter()
correct_count = Counter()
f_ref = open(args.reference_file, "r", encoding="utf-8")
f_infer = open(args.inference_file, "r", encoding="utf-8")
f_out = open(args.output_file, "w", encoding="utf-8")
lines_ref = f_ref.readlines()
lines_infer = f_infer.readlines()
f_ref.close()
f_infer.close()
if len(lines_ref) != len(lines_infer):
raise ValueError(
"Number of lines doesn't match: len(lines_ref)="
+ str(len(lines_ref))
+ "; len(lines_infer)="
+ str(len(lines_infer))
)
for i in range(len(lines_infer)):
_, inp_str, _, tag_with_swap_str, semiotic = lines_infer[i].strip().split("\t")
input_words = inp_str.split(" ")
predicted_tags = tag_with_swap_str.split(" ")
predicted_words = predicted_tags[:]
for k in range(len(predicted_tags)):
t = predicted_tags[k]
if t == "<SELF>":
predicted_words[k] = input_words[k]
elif t == "<DELETE>":
predicted_words[k] = ""
else:
predicted_words[k] = predicted_words[k].replace(">", "").replace("<", "")
parts = lines_ref[i].strip().split("\t")
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Bad format: " + lines_ref[i])
if len(parts) == 3: # there are non-trivial semiotic spans
spans = parts[2].split(";")
for span in spans:
span_parts = span.split(" | ")
try:
sem, begin, end = span_parts[0].split(" ")
except Exception:
print("error: ", lines_ref[i])
continue
begin = int(begin)
end = int(end)
ok = False
predicted_span = " ".join(predicted_words[begin:end]).replace("_", " ").replace(" ", "").casefold()
input_span = " ".join(input_words[begin:end])
total_count[sem] += 1
for tr_variant in span_parts[1:]:
ref_span = tr_variant.replace("_", " ").replace(" ", "").casefold()
if ref_span == predicted_span:
ok = True
correct_count[sem] += 1
break
if not ok:
out_sem = open(args.output_file + "." + sem, "a", encoding="utf-8")
out_sem.write(
"error: pred="
+ " ".join(predicted_words[begin:end])
+ "; inp="
+ input_span
+ "; ref="
+ span
+ "\n"
)
out_sem.write("\tinput=" + " ".join(input_words) + "\n")
out_sem.write("\ttags=" + " ".join(predicted_tags) + "\n")
out_sem.write("\tpred=" + " ".join(predicted_words) + "\n")
out_sem.write("\tsemiotic=" + semiotic + "\n")
out_sem.write("\tref=" + parts[1] + "\n")
out_sem.close()
f_out.write("class\ttotal\tcorrect\terrors\taccuracy\n")
for sem in total_count:
f_out.write(
sem
+ "\t"
+ str(total_count[sem])
+ "\t"
+ str(correct_count[sem])
+ "\t"
+ str(total_count[sem] - correct_count[sem])
+ "\t"
+ str(correct_count[sem] / total_count[sem])
+ "\n"
)
f_out.close()
| NeMo-main | examples/nlp/text_normalization_as_tagging/evaluation/eval_per_class.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to compare the inference output of Thutmose tagger with multi_reference file
USAGE Example:
python eval.py \
--inference_file= \
--reference_file= \
--print_other_errors
The inference file is a tsv file in which the first column contains the predicted sentence text.
The reference file is a tsv file in which
the first column contains the input sentence text,
the second column contains the reference sentence text (taken from Google TN dataset)
the third column (optional) contains additional acceptable references for semiotic spans in this sentence.
E.g.
mizoguchi akiko september twenty ten mizoguchi akiko september 2010 DATE 2 5 | sept 2010 | sep. 2010 ...
(to get a reference file see the last steps in examples/nlp/text_normalization_as_tagging/prepare_dataset_en.sh,
starting from ".../examples/nlp/text_normalization_as_tagging/evaluation/get_multi_reference_vocab.py"
)
The script outputs the following metrics:
Word Error Rate (WER) - an automatic metric commonly used in ASR.
It does not take into account additional references.
Sentence accuracy:
The sentence is regarded as correct if its characters (without spaces) match to the reference,
It takes into account additional references.
If at least one digital character doesn't match this sentence is regarded as containing Digit Error.
If all digital character match, but at least one non-digital character doesn't match
this sentence is regarded as containing Other Error.
"""
import re
from argparse import ArgumentParser
from nemo.collections.asr.metrics.wer import word_error_rate
parser = ArgumentParser(description="Compare inference output with multi-reference")
parser.add_argument("--inference_file", type=str, required=True, help="Path to inference file")
parser.add_argument(
"--print_other_errors",
action='store_true',
help="Whether to print other errors, if false only digit errors will be printed",
)
parser.add_argument("--reference_file", type=str, required=True, help="Path to reference file")
args = parser.parse_args()
# Main code
if __name__ == "__main__":
inputs = []
references = [] # list(size=len(inputs)) of lists
skip_ids = set() # sentences ids to be skipped during evaluation
with open(args.reference_file, "r", encoding="utf-8") as f:
for line in f:
multi_references = []
parts = line.strip().split("\t")
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Bad format: " + line)
words = parts[0].split()
inputs.append(words)
if len(parts) == 3: # there are non-trivial semiotic spans
multi_references.append("")
input_position = 0
if "TELEPHONE" in parts[2] or "ELECTRONIC" in parts[2]:
skip_ids.add(len(references))
spans = parts[2].split(";")
multi_references_updated = []
for span in spans:
span_parts = span.split(" | ")
try:
sem, begin, end = span_parts[0].split(" ")
except Exception:
print("error: ", line)
continue
begin = int(begin)
end = int(end)
for ref in multi_references:
if len(span_parts) > 20 or len(multi_references_updated) > 20000:
print("warning: too many references: ", inputs[-1])
break
for tr_variant in span_parts[1:]:
multi_references_updated.append(
ref
+ " "
+ " ".join(inputs[-1][input_position:begin]) # copy needed words from input
+ " "
+ tr_variant
)
multi_references = multi_references_updated[:] # copy
multi_references_updated = []
input_position = end
for i in range(len(multi_references)): # copy needed words from the input end
multi_references[i] += " " + " ".join(inputs[-1][input_position : len(inputs[-1])])
# the last reference added is the actual one
multi_references.append(parts[1])
references.append(multi_references)
predictions = []
predicted_tags = []
predicted_semiotic = []
# load predictions
with open(args.inference_file, "r", encoding="utf-8") as f:
for line in f:
parts = line.strip().split("\t")
if len(parts) == 1:
predictions.append(parts[0].casefold())
predicted_tags.append([])
continue
if len(parts) != 5:
raise ValueError("Bad format: " + line)
prediction, inp_str, tag_str, tags_with_swap_str, semiotic = parts
predictions.append(prediction.casefold())
tags = tag_str.split(" ")
predicted_tags.append(tags)
predicted_semiotic.append(semiotic)
sentences_with_errors_on_digits = 0
correct_sentences_disregarding_space = 0
if len(inputs) != len(predictions) or len(inputs) != len(references):
raise ValueError(
"Length mismatch: len(inputs)="
+ str(len(inputs))
+ "; len(predictions)="
+ str(len(predictions))
+ "; len(references)="
+ str(len(references))
)
refs_for_wer = []
preds_for_wer = []
for i in range(len(inputs)):
ok_digit = False
ok_all = False
if i in skip_ids:
continue
refs_for_wer.append(references[i][-1])
preds_for_wer.append(predictions[i])
for ref in references[i]:
ref_digit_fragments = re.findall(r"\d+", ref)
pred_digit_fragments = re.findall(r"\d+", predictions[i])
if "".join(pred_digit_fragments) == "".join(ref_digit_fragments):
ok_digit = True
if predictions[i].replace("_", "").replace(" ", "") == ref.replace("_", "").replace(" ", ""):
ok_all = True
if not ok_digit:
print("digit error:")
print("\tinput=", " ".join(inputs[i]))
print("\ttags=", " ".join(predicted_tags[i]))
print("\tpred=", predictions[i])
print("\tsemiotic=", predicted_semiotic[i])
print("\tref=", references[i][-1]) # last reference is actual reference
sentences_with_errors_on_digits += 1
elif ok_all:
correct_sentences_disregarding_space += 1
elif args.print_other_errors:
print("other error:")
print("\tinput=", " ".join(inputs[i]))
print("\ttags=", " ".join(predicted_tags[i]))
print("\tpred=", predictions[i])
print("\tsemiotic=", predicted_semiotic[i])
print("\tref=", references[i][-1]) # last reference is actual reference
wer = word_error_rate(refs_for_wer, preds_for_wer)
print("WER: ", wer)
print(
"Sentence accuracy: ",
correct_sentences_disregarding_space / (len(inputs) - len(skip_ids)),
correct_sentences_disregarding_space,
)
print(
"digit errors: ",
sentences_with_errors_on_digits / (len(inputs) - len(skip_ids)),
sentences_with_errors_on_digits,
)
print(
"other errors: ",
(len(inputs) - len(skip_ids) - correct_sentences_disregarding_space - sentences_with_errors_on_digits)
/ (len(inputs) - len(skip_ids)),
len(inputs) - len(skip_ids) - correct_sentences_disregarding_space - sentences_with_errors_on_digits,
)
| NeMo-main | examples/nlp/text_normalization_as_tagging/evaluation/eval.py |
# Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to evaluate a NeuralMachineTranslationModel.
To load the example Text2Sparql dataset, please refer to ./data/import_datasets.py.
To train a model, please refer to text2sparql.py.
***Setting the configs***
This script uses the `/examples/nlp/text2sparql/conf/text2sparql_config.yaml` config file by default.
You may update the config file from the file directly or by using the command line arguments.
Another other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
Please refer to text2sparql.py for detailed instructions on setting the configuration.
***How to run the script?***
- To reload and evaluate the model, run:
python evaluate_text2sparql.py \
model.test_ds.filepath="$TGT_DATA_DIR"/test_easy.tsv \
model.batch_size=16 \
model.nemo_path=./NeMo_logs/bart.nemo \
exp_manager.exp_dir=./NeMo_logs
"""
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.text2sparql import Text2SparqlModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="text2sparql_config")
def main(cfg: DictConfig) -> None:
logging.info(f"Config:\n {OmegaConf.to_yaml(cfg)}")
trainer = pl.Trainer(devices=cfg.trainer.devices, accelerator=cfg.trainer.accelerator)
nmt_model = Text2SparqlModel.restore_from(restore_path=cfg.model.nemo_path)
nmt_model.setup_test_data(cfg.model.test_ds)
results = trainer.test(nmt_model)
with open(cfg.model.test_ds.filepath, "r", encoding='utf-8') as f:
lines = f.readlines()
lines[0] = lines[0].strip() + f"\tpredictions\n"
for i, res in enumerate(results[0]["texts"]):
lines[i + 1] = lines[i + 1].strip() + f"\t{res}\n"
savepath = os.path.join(cfg.exp_manager.exp_dir, os.path.basename(cfg.model.test_ds.filepath))
with open(savepath, "w", encoding='utf-8') as f:
f.writelines(lines)
logging.info(f"Predictions saved to {savepath}")
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text2sparql/evaluate_text2sparql.py |
# Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train and save a Text2SparqlModel.
Text2SparqlModel in NeMo supports sequence to sequence problems such as language translation
and text summarization, provided the data follows the format specified below.
***Data format***
Text2SparqlModel requires the data to be stored in TAB separated files (.tsv) with two columns of
sentence and label, where the first line is a header of format:
sentence[TAB]label
And each line is of the format:
[SENTENCE][TAB][LABEL]
If your dataset is stored in another format, you need to convert it to this format to use a
Text2SparqlModel.
***Setting the configs***
This script uses the `/examples/nlp/text2sparql/conf/text2sparql_config.yaml` config file by default.
You may update the config file from the file directly or by using the command line arguments.
Another other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
A Text2SparqlModel's config file declares multiple import sections. They are:
- trainer: Arguments to be passed to PyTorch Lightning.
- model: All arguments that relate to the Model - language_model, tokenizers, datasets, optimizer, generate.
- exp_manager: Arguments to be passed to NeMo's experiment manager.
- hydra: Arguments to be passed to Hydra.
If using text2sparql_config.yaml, you must first update the following fields in the config:
- model.nemo_path: Model save path. Eg. [PATH]/bart.nemo
- model.data_dir: Path to data directory. Alternatively, you can adjust the file paths directly:
- model.train_ds.filepath
- model.validation_ds.filepath
- model.test_ds.filepath
- exp_manager.exp_dir: Directory to log results from the experiment.
It is highly recommended to also adjust these parameters as necessary:
- trainer.devices: Set to 0 to use CPU. Otherwise the number denotes the number of GPUs.
- trainer.max_epochs: Maximum number of epochs to train for.
- model.batch_size: 8 is sufficient to train a decent Bart model for Text2Sparql.
- model.max_seq_length: Maximum (tokenized) sequence length. 150 is sufficient for Text2Sparql.
- model.language_model.pretrained_model_name: End2end pretrained model name from huggingface.
- model.encoder_tokenizer.tokenizer_name: Pretrained tokenizer name from huggingface.
- model.decoder_tokenizer.tokenizer_name: The same as above, as the tokenizer will handle encoding and decoding.
- model.optim.lr: Learning rate.
You can also specify an encoder and decoder rather than using an end2end model like Bart by defining these parameters:
- model.language_model.pretrained_encoder_model_name: Pretrained huggingface encoder model name.
- model.encoder_tokenizer.tokenizer_name: Pretrained huggingface encoder tokenizer name.
- model.language_model.pretrained_decoder_model_name: Pretrained huggingface decoder model name.
- model.decoder_tokenizer.tokenizer_name: Pretrained huggingface decoder tokenizer name.
- model.language_model.pretrained_model_name: Set this to null.
***How to run the script?***
- First, download the data to TGT_DATA_DIR (see ./data/import_datasets.py):
SRC_DATA_DIR=./data/text2sparql_src
TGT_DATA_DIR=./data/text2sparql_tgt
python ./data/import_datasets.py \
--source_data_dir $SRC_DATA_DIR \
--target_data_dir $TGT_DATA_DIR
- And run the following to train and save the model:
python text2sparql.py \
model.train_ds.filepath="$TGT_DATA_DIR"/train.tsv \
model.validation_ds.filepath="$TGT_DATA_DIR"/test_easy.tsv \
model.test_ds.filepath="$TGT_DATA_DIR"/test_hard.tsv \
model.batch_size=16 \
model.nemo_path=./NeMo_logs/bart.nemo \
exp_manager.exp_dir=./NeMo_logs
"""
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.text2sparql import Text2SparqlModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="text2sparql_config")
def main(cfg: DictConfig) -> None:
logging.info(f"Config:\n {OmegaConf.to_yaml(cfg)}")
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
nmt_model = Text2SparqlModel(cfg.model, trainer=trainer)
trainer.fit(nmt_model)
if cfg.model.nemo_path:
nmt_model.save_to(cfg.model.nemo_path)
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/text2sparql/text2sparql.py |
# Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script downloads Text2Sparql data and processes it into NeMo's neural machine translation dataset format.
Text2Sparql data consists of 3 files which are saved to source_data_dir:
- train_queries_v3.tsv
- test_easy_queries_v3.tsv
- test_hard_queries_v3.tsv
After processing, the script saves them to the target_data_dir as:
- train.tsv
- test_easy.tsv
- test_hard.tsv
You may run it with:
python import_datasets \
--source_data_dir ./text2sparql_src \
--target_data_dir ./text2sparql_tgt
"""
import argparse
import csv
import os
from urllib.request import Request, urlopen
from nemo.collections.nlp.data.data_utils.data_preprocessing import MODE_EXISTS_TMP, if_exist
from nemo.utils import logging
base_url = "https://m.meetkai.com/public_datasets/knowledge/"
prefix_map = {
"train_queries_v3.tsv": "train.tsv",
"test_easy_queries_v3.tsv": "test_easy.tsv",
"test_hard_queries_v3.tsv": "test_hard.tsv",
}
def download_text2sparql(infold: str):
"""Downloads text2sparql train, test_easy, and test_hard data
Args:
infold: save directory path
"""
os.makedirs(infold, exist_ok=True)
for prefix in prefix_map:
url = base_url + prefix
logging.info(f"Downloading: {url}")
if if_exist(infold, [prefix]):
logging.info("** Download file already exists, skipping download")
else:
req = Request(url, headers={"User-Agent": "Mozilla/5.0"})
with open(os.path.join(infold, prefix), "wb") as handle:
handle.write(urlopen(req, timeout=20).read())
def process_text2sparql(infold: str, outfold: str, do_lower_case: bool):
""" Process and convert MeetKai's text2sparql datasets to NeMo's neural machine translation format.
Args:
infold: directory path to raw text2sparql data containing
train.tsv, test_easy.tsv, test_hard.tsv
outfold: output directory path to save formatted data for NeuralMachineTranslationDataset
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
do_lower_case: if true, convert all sentences and labels to lower
"""
logging.info(f"Processing Text2Sparql dataset and storing at: {outfold}")
os.makedirs(outfold, exist_ok=True)
dataset_name = "Text2Sparql"
for prefix in prefix_map:
input_file = os.path.join(infold, prefix)
output_file = os.path.join(outfold, prefix_map[prefix])
if if_exist(outfold, [prefix_map[prefix]]):
logging.info(f"** {MODE_EXISTS_TMP.format(prefix_map[prefix], dataset_name, output_file)}")
continue
if not if_exist(infold, [prefix]):
logging.info(f"** {prefix} of {dataset_name}" f" is skipped as it was not found")
continue
assert input_file != output_file, "input file cannot equal output file"
with open(input_file, "r") as in_file:
with open(output_file, "w") as out_file:
reader = csv.reader(in_file, delimiter="\t")
# replace headers
out_file.write("sentence\tlabel\n")
next(reader)
for line in reader:
sentence = line[0]
label = line[1]
if do_lower_case:
sentence = sentence.lower()
label = label.lower()
out_file.write(f"{sentence}\t{label}\n")
if __name__ == "__main__":
# Parse the command-line arguments.
parser = argparse.ArgumentParser(description="Process and convert datasets into NeMo's format")
parser.add_argument(
"--source_data_dir", required=True, type=str, help="Path to the folder containing the dataset files"
)
parser.add_argument("--target_data_dir", required=True, type=str, help="Path to save the processed dataset")
parser.add_argument("--do_lower_case", action="store_true")
args = parser.parse_args()
source_dir = args.source_data_dir
target_dir = args.target_data_dir
do_lower_case = args.do_lower_case
download_text2sparql(infold=source_dir)
process_text2sparql(infold=source_dir, outfold=target_dir, do_lower_case=do_lower_case)
| NeMo-main | examples/nlp/text2sparql/data/import_datasets.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
## Tasks
This script works with all GLUE Benchmark tasks, more details about the GLUE Benchmark could be found at
https://gluebenchmark.com/
More details on how to use this script could be found in tutorials/nlp/GLUE_Benchmark.ipynb
## Model Training
To train GLUEModel with the default config file, run:
python glue_benchmark.py \
model.dataset.data_dir=<PATH_TO_DATA_DIR> \
model.task_name=TASK_NAME \
trainer.max_epochs=<NUM_EPOCHS> \
trainer.devices="[<CHANGE_TO_GPU_YOU_WANT_TO_USE>]
Supported task names:
["cola", "sst-2", "mrpc", "sts-b", "qqp", "mnli", "qnli", "rte", "wnli"]
Note, MNLI task includes both matched and mismatched dev sets
"""
import os
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import GLUEModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_name="glue_benchmark_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(**cfg.trainer)
exp_manager_cfg = cfg.get("exp_manager", None)
if exp_manager_cfg:
exp_manager_cfg.name = cfg.model.task_name
logging.info(f'Setting task_name to {exp_manager_cfg.name} in exp_manager')
exp_manager(trainer, exp_manager_cfg)
if cfg.model.nemo_path and os.path.exists(cfg.model.nemo_path):
model = GLUEModel.restore_from(cfg.model.nemo_path)
logging.info(f'Restoring model from {cfg.model.nemo_path}')
model.update_data_dir(data_dir=cfg.model.dataset.data_dir)
model.setup_training_data()
model.setup_multiple_validation_data()
trainer.fit(model)
else:
model = GLUEModel(cfg.model, trainer=trainer)
trainer.fit(model)
if cfg.model.nemo_path:
model.save_to(cfg.model.nemo_path)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/glue_benchmark/glue_benchmark.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train, evaluate and perform inference with the TextClassificationModel.
TextClassificationModel in NeMo supports text classification problems such as sentiment analysis or
domain/intent detection for dialogue systems, as long as the data follows the format specified below.
***Data format***
TextClassificationModel requires the data to be stored in TAB separated files (.tsv) with two columns of sentence and
label. Each line of the data file contains text sequences, where words are separated with spaces and label separated
with [TAB], i.e.:
[WORD][SPACE][WORD][SPACE][WORD][TAB][LABEL]
For example:
hide new secretions from the parental units[TAB]0
that loves its characters and communicates something rather beautiful about human nature[TAB]1
...
If your dataset is stored in another format, you need to convert it to this format to use the TextClassificationModel.
***Setting the configs***
The model and the PT trainer are defined in a config file which declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - language model, tokenizer, head classifier, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/text_classification/conf/text_classification_config.yaml` default config file
by default. You may update the config file from the file directly or by using the command line arguments.
Other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
You first need to set the num_classes in the config file which specifies the number of classes in the dataset.
Notice that some config lines, including `model.dataset.classes_num`, have `???` as their value, this means that values
for these fields are required to be specified by the user. We need to specify and set the `model.train_ds.file_name`,
`model.validation_ds.file_name`, and `model.test_ds.file_name` in the config file to the paths of the train, validation,
and test files if they exist. We may do it by updating the config file or by setting them from the command line.
***How to run the script?***
For example the following would train a model for 50 epochs in 2 GPUs on a classification task with 2 classes:
# python text_classification_with_bert.py
model.dataset.num_classes=2
model.train_ds=PATH_TO_TRAIN_FILE
model.validation_ds=PATH_TO_VAL_FILE
trainer.max_epochs=50
trainer.devices=2
This script would also reload the last checkpoint after the training is done and does evaluation on the dev set,
then performs inference on some sample queries.
By default, this script uses examples/nlp/text_classification/conf/text_classifciation_config.py config file, and
you may update all the params in the config file from the command line. You may also use another config file like this:
# python text_classification_with_bert.py --config-name==PATH_TO_CONFIG_FILE
model.dataset.num_classes=2
model.train_ds=PATH_TO_TRAIN_FILE
model.validation_ds=PATH_TO_VAL_FILE
trainer.max_epochs=50
trainer.devices=2
***Load a saved model***
This script would save the model after training into '.nemo' checkpoint file specified by nemo_path of the model config.
You may restore the saved model like this:
model = TextClassificationModel.restore_from(restore_path=NEMO_FILE_PATH)
***Evaluation a saved model on another dataset***
# If you wanted to evaluate the saved model on another dataset, you may restore the model and create a new data loader:
eval_model = TextClassificationModel.restore_from(restore_path=checkpoint_path)
# Then, you may create a dataloader config for evaluation:
eval_config = OmegaConf.create(
{'file_path': cfg.model.test_ds.file_path, 'batch_size': 64, 'shuffle': False, 'num_workers': 3}
)
eval_model.setup_test_data(test_data_config=eval_config)
# You need to create a new trainer:
eval_trainer = pl.Trainer(devices=1)
eval_model.set_trainer(eval_trainer)
eval_trainer.test(model=eval_model, verbose=False)
"""
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.text_classification import TextClassificationModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="text_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(f'\nConfig Params:\n{OmegaConf.to_yaml(cfg)}')
try:
strategy = NLPDDPStrategy()
except (ImportError, ModuleNotFoundError):
strategy = None
trainer = pl.Trainer(strategy=strategy, **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if not cfg.model.train_ds.file_path:
raise ValueError("'train_ds.file_path' need to be set for the training!")
model = TextClassificationModel(cfg.model, trainer=trainer)
logging.info("===========================================================================================")
logging.info('Starting training...')
trainer.fit(model)
logging.info('Training finished!')
logging.info("===========================================================================================")
if cfg.model.nemo_path:
# '.nemo' file contains the last checkpoint and the params to initialize the model
model.save_to(cfg.model.nemo_path)
logging.info(f'Model is saved into `.nemo` file: {cfg.model.nemo_path}')
# We evaluate the trained model on the test set if test_ds is set in the config file
if cfg.model.test_ds.file_path:
logging.info("===========================================================================================")
logging.info("Starting the testing of the trained model on test set...")
trainer.test(model=model, ckpt_path=None, verbose=False)
logging.info("Testing finished!")
logging.info("===========================================================================================")
# perform inference on a list of queries.
if "infer_samples" in cfg.model and cfg.model.infer_samples:
logging.info("===========================================================================================")
logging.info("Starting the inference on some sample queries...")
# max_seq_length=512 is the maximum length BERT supports.
results = model.classifytext(queries=cfg.model.infer_samples, batch_size=16, max_seq_length=512)
logging.info('The prediction results of some sample queries with the trained model:')
for query, result in zip(cfg.model.infer_samples, results):
logging.info(f'Query : {query}')
logging.info(f'Predicted label: {result}')
logging.info("Inference finished!")
logging.info("===========================================================================================")
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/text_classification/text_classification_with_bert.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script runs model parallel text classification evaluation.
"""
import pytorch_lightning as pl
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.text_classification import TextClassificationModel
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="text_classification_config")
def main(cfg: DictConfig) -> None:
logging.info(f'\nConfig Params:\n{OmegaConf.to_yaml(cfg)}')
trainer = pl.Trainer(strategy=NLPDDPStrategy(), **cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
# TODO: can we drop strict=False
model = TextClassificationModel.restore_from(cfg.model.nemo_path, trainer=trainer, strict=False)
model.setup_test_data(test_data_config=cfg.model.test_ds)
trainer.test(model=model, ckpt_path=None)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/text_classification/model_parallel_text_classification_evaluation.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to process and import IMDB, ChemProt, SST-2, and THUCnews datasets into NeMo's format.
You may run it as the following:
python import_datasets.py \
--dataset_name DATASET_NAME \
--target_data_dir TARGET_PATH \
--source_data_dir SOURCE_PATH
The dataset should be specified by "DATASET_NAME" which can be from ["sst-2", "chemprot", "imdb", "thucnews"].
It reads the data from "SOURCE_PATH" folder, processes and converts the data into NeMo's format.
Then writes the results into "TARGET_PATH" folder.
"""
import argparse
import csv
import glob
import os
from os.path import exists
import tqdm
from nemo.utils import logging
def process_imdb(infold, outfold, uncased, modes=['train', 'test']):
if not os.path.exists(infold):
link = 'https://ai.stanford.edu/~amaas/data/sentiment/'
raise ValueError(
f'Data not found at {infold}. '
f'Please download IMDB reviews dataset from {link} and '
f'extract it into the folder specified by source_data_dir argument.'
)
logging.info(f'Processing IMDB dataset and store at {outfold}')
os.makedirs(outfold, exist_ok=True)
outfiles = {}
for mode in modes:
outfiles[mode] = open(os.path.join(outfold, mode + '.tsv'), 'w')
for sent in ['neg', 'pos']:
if sent == 'neg':
label = 0
else:
label = 1
files = glob.glob(f'{infold}/{mode}/{sent}/*.txt')
for file in files:
with open(file, 'r') as f:
review = f.read().strip()
if uncased:
review = review.lower()
review = review.replace("<br />", "")
outfiles[mode].write(f'{review}\t{label}\n')
for mode in modes:
outfiles[mode].close()
class_labels_file = open(os.path.join(outfold, 'label_ids.tsv'), 'w')
class_labels_file.write('negative\npositive\n')
class_labels_file.close()
def process_sst2(infold, outfold, uncased, splits=['train', 'dev']):
"""Process sst2 dataset."""
# "test" split doesn't have labels, so it is skipped
if not os.path.exists(infold):
link = 'https://dl.fbaipublicfiles.com/glue/data/SST-2.zip'
raise ValueError(
f'Data not found at {infold}. Please download SST-2 dataset from `{link}` and '
f'extract it into the folder specified by `source_data_dir` argument.'
)
logging.info(f'Processing SST-2 dataset')
os.makedirs(outfold, exist_ok=True)
def _read_tsv(input_file, quotechar=None):
"""Read a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
for split in splits:
# Load input file.
input_file = os.path.join(infold, split + '.tsv')
lines = _read_tsv(input_file)
# Create output.
outfile = open(os.path.join(outfold, split + '.tsv'), 'w')
# Copy lines, skip the header (line 0).
for line in lines[1:]:
text = line[0]
label = line[1]
# Lowercase when required.
if uncased:
text = text.lower()
# Write output.
outfile.write(f'{text}\t{label}\n')
# Close file.
outfile.close()
class_labels_file = open(os.path.join(outfold, 'label_ids.tsv'), 'w')
class_labels_file.write('negative\npositive\n')
class_labels_file.close()
logging.info(f'Result stored at {outfold}')
def process_chemprot(source_dir, target_dir, uncased, modes=['train', 'test', 'dev']):
if not os.path.exists(source_dir):
link = 'https://github.com/arwhirang/recursive_chemprot/tree/master/Demo/tree_LSTM/data'
raise ValueError(f'Data not found at {source_dir}. ' f'Please download ChemProt from {link}.')
logging.info(f'Processing Chemprot dataset and store at {target_dir}')
os.makedirs(target_dir, exist_ok=True)
naming_map = {'train': 'trainingPosit_chem', 'test': 'testPosit_chem', 'dev': 'developPosit_chem'}
def _read_tsv(input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
outfiles = {}
label_mapping = {}
out_label_mapping = open(os.path.join(target_dir, 'label_mapping.tsv'), 'w')
for mode in modes:
outfiles[mode] = open(os.path.join(target_dir, mode + '.tsv'), 'w')
input_file = os.path.join(source_dir, naming_map[mode])
lines = _read_tsv(input_file)
for line in lines:
text = line[1]
label = line[2]
if label == "True":
label = line[3]
if uncased:
text = text.lower()
if label not in label_mapping:
out_label_mapping.write(f'{label}\t{len(label_mapping)}\n')
label_mapping[label] = len(label_mapping)
label = label_mapping[label]
outfiles[mode].write(f'{text}\t{label}\n')
for mode in modes:
outfiles[mode].close()
out_label_mapping.close()
def process_thucnews(infold, outfold):
modes = ['train', 'test']
train_size = 0.8
if not os.path.exists(infold):
link = 'thuctc.thunlp.org/'
raise ValueError(f'Data not found at {infold}. ' f'Please download THUCNews from {link}.')
logging.info(f'Processing THUCNews dataset and store at {outfold}')
os.makedirs(outfold, exist_ok=True)
outfiles = {}
for mode in modes:
outfiles[mode] = open(os.path.join(outfold, mode + '.tsv'), 'a+', encoding='utf-8')
categories = ['体育', '娱乐', '家居', '彩票', '房产', '教育', '时尚', '时政', '星座', '游戏', '社会', '科技', '股票', '财经']
for category in categories:
label = categories.index(category)
category_files = glob.glob(f'{infold}/{category}/*.txt')
test_num = int(len(category_files) * (1 - train_size))
test_files = category_files[:test_num]
train_files = category_files[test_num:]
for mode in modes:
logging.info(f'Processing {mode} data of the category {category}')
if mode == 'test':
files = test_files
else:
files = train_files
if len(files) == 0:
logging.info(f'Skipping category {category} for {mode} mode')
continue
for file in tqdm.tqdm(files):
with open(file, 'r', encoding='utf-8') as f:
news = f.read().strip().replace('\r', '')
news = news.replace('\n', '').replace('\t', ' ')
outfiles[mode].write(f'{news}\t{label}\n')
for mode in modes:
outfiles[mode].close()
if __name__ == "__main__":
# Parse the command-line arguments.
parser = argparse.ArgumentParser(description="Process and convert datasets into NeMo\'s format.")
parser.add_argument("--dataset_name", required=True, type=str, choices=['imdb', 'thucnews', 'chemprot'])
parser.add_argument(
"--source_data_dir", required=True, type=str, help='The path to the folder containing the dataset files.'
)
parser.add_argument("--target_data_dir", required=True, type=str)
parser.add_argument("--do_lower_case", action='store_true')
args = parser.parse_args()
dataset_name = args.dataset_name
do_lower_case = args.do_lower_case
source_dir = args.source_data_dir
target_dir = args.target_data_dir
if not exists(source_dir):
raise FileNotFoundError(f"{source_dir} does not exist.")
if dataset_name == 'imdb':
process_imdb(source_dir, target_dir, do_lower_case)
elif dataset_name == 'thucnews':
process_thucnews(source_dir, target_dir)
elif dataset_name == "chemprot":
process_chemprot(source_dir, target_dir, do_lower_case)
elif dataset_name == "sst-2":
process_sst2(source_dir, target_dir, do_lower_case)
else:
raise ValueError(
f'Dataset {dataset_name} is not supported.'
+ "Please make sure that you build the preprocessing process for it. "
+ "NeMo's format assumes that a data file has a header and each line of the file follows "
+ "the format: text [TAB] label. Label is assumed to be an integer."
)
| NeMo-main | examples/nlp/text_classification/data/import_datasets.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to visualize the errors made by a (duplex) TN system.
More specifically, after running the evaluation script `duplex_text_normalization_test.py`,
a log file containing info about the errors will be generated. The location of this file
is determined by the argument `inference.errors_log_fp`. After that, we can use this
script to generate a HTML visualization.
USAGE Example:
# python analyze_errors.py \
--errors_log_fp=PATH_TO_ERRORS_LOG_FILE_PATH \
--visualization_fp=PATH_TO_VISUALIZATION_FILE_PATH
"""
from argparse import ArgumentParser
from typing import List
from nemo.collections.nlp.data.text_normalization import constants
# Longest Common Subsequence
def lcs(X, Y):
""" Function for finding the longest common subsequence between two lists.
In this script, this function is particular used for aligning between the
ground-truth output string and the predicted string (for visualization purpose).
Args:
X: a list
Y: a list
Returns: a list which is the longest common subsequence between X and Y
"""
m, n = len(X), len(Y)
L = [[0 for x in range(n + 1)] for x in range(m + 1)]
# Following steps build L[m+1][n+1] in bottom up fashion. Note
# that L[i][j] contains length of LCS of X[0..i-1] and Y[0..j-1]
for i in range(m + 1):
for j in range(n + 1):
if i == 0 or j == 0:
L[i][j] = 0
elif X[i - 1] == Y[j - 1]:
L[i][j] = L[i - 1][j - 1] + 1
else:
L[i][j] = max(L[i - 1][j], L[i][j - 1])
# Following code is used to print LCS
index = L[m][n]
# Create a character array to store the lcs string
lcs = [''] * (index + 1)
lcs[index] = ''
# Start from the right-most-bottom-most corner and
# one by one store characters in lcs[]
i = m
j = n
while i > 0 and j > 0:
# If current character in X[] and Y are same, then
# current character is part of LCS
if X[i - 1] == Y[j - 1]:
lcs[index - 1] = X[i - 1]
i -= 1
j -= 1
index -= 1
# If not same, then find the larger of two and
# go in the direction of larger value
elif L[i - 1][j] > L[i][j - 1]:
i -= 1
else:
j -= 1
return lcs[:-1]
# Classes
class ErrorCase:
"""
This class represents an error case
Args:
_input: Original input string
target: Ground-truth target string
pred: Predicted string
mode: A string indicates the mode (i.e., constants.ITN_MODE or constants.TN_MODE)
"""
def __init__(self, _input: str, target: str, pred: str, classes: str, mode: str):
self._input = _input
self.target = target
self.pred = pred
self.mode = mode
self.classes = classes
# Tokens
self.target_tokens = self.target.split(' ')
self.pred_tokens = self.pred.split(' ')
# LCS
lcs_tokens = lcs(self.target_tokens, self.pred_tokens)
target_tokens_hightlight = [False] * len(self.target_tokens)
pred_tokens_hightlight = [False] * len(self.pred_tokens)
target_idx, pred_idx = 0, 0
for token in lcs_tokens:
while self.target_tokens[target_idx] != token:
target_idx += 1
while self.pred_tokens[pred_idx] != token:
pred_idx += 1
target_tokens_hightlight[target_idx] = True
pred_tokens_hightlight[pred_idx] = True
target_idx += 1
pred_idx += 1
# Spans
self.target_spans = self.get_spans(target_tokens_hightlight)
self.pred_spans = self.get_spans(pred_tokens_hightlight)
# Determine unhighlighted target spans
unhighlighted_target_spans = []
for ix, t in enumerate(self.target_spans):
if not t[-1]:
unhighlighted_target_spans.append((ix, t))
# Determine unhighlighted pred spans
unhighlighted_pred_spans = []
for ix, t in enumerate(self.pred_spans):
if not t[-1]:
unhighlighted_pred_spans.append((ix, t))
@classmethod
def from_lines(cls, lines: List[str], mode: str):
"""
This method returns an instance of ErrorCase from raw string lines.
Args:
lines: A list of raw string lines for the error case.
mode: A string indicates the mode (i.e., constants.ITN_MODE or constants.TN_MODE)
Returns: an instance of ErrorCase.
"""
for line in lines:
if line.startswith('Original Input'):
_input = line[line.find(':') + 1 :].strip()
elif line.startswith('Predicted Str'):
pred = line[line.find(':') + 1 :].strip()
elif line.startswith('Ground-Truth'):
target = line[line.find(':') + 1 :].strip()
elif line.startswith('Ground Classes'):
classes = line[line.find(':') + 1 :].strip()
return cls(_input, target, pred, classes, mode)
def get_html(self):
"""
This method returns a HTML string representing this error case instance.
Returns: a string contains the HTML representing this error case instance.
"""
html_str = ''
# Input
input_form = 'Written' if self.mode == constants.TN_MODE else 'Spoken'
padding_multiplier = 1 if self.mode == constants.TN_MODE else 2
padding_spaces = ''.join([' '] * padding_multiplier)
input_str = f'<b>[Input ({input_form})]{padding_spaces}</b>: {self._input}</br>\n'
html_str += input_str + ' '
# Target
target_html = self.get_spans_html(self.target_spans, self.target_tokens)
target_form = 'Spoken' if self.mode == constants.TN_MODE else 'Written'
target_str = f'<b>[Target ({target_form})]</b>: {target_html}</br>\n'
html_str += target_str + ' '
# Pred
pred_html = self.get_spans_html(self.pred_spans, self.pred_tokens)
padding_multiplier = 10 if self.mode == constants.TN_MODE else 11
padding_spaces = ''.join([' '] * padding_multiplier)
pred_str = f'<b>[Prediction]{padding_spaces}</b>: {pred_html}</br>\n'
html_str += pred_str + ' '
# Classes
padding_multiplier = 15 if self.mode == constants.TN_MODE else 16
padding_spaces = ''.join([' '] * padding_multiplier)
class_str = f'<b>[Classes]{padding_spaces}</b>: {self.classes}</br>\n'
html_str += class_str + ' '
# Space
html_str += '</br>\n'
return html_str
def get_spans(self, tokens_hightlight):
"""
This method extracts the list of spans.
Args:
tokens_hightlight: A list of boolean values where each value indicates whether a token needs to be hightlighted.
Returns:
spans: A list of spans. Each span is represented by a tuple of 3 elements: (1) Start Index (2) End Index (3) A boolean value indicating whether the span needs to be hightlighted.
"""
spans, nb_tokens = [], len(tokens_hightlight)
cur_start_idx, cur_bool_val = 0, tokens_hightlight[0]
for idx in range(nb_tokens):
if idx == nb_tokens - 1:
if tokens_hightlight[idx] != cur_bool_val:
spans.append((cur_start_idx, nb_tokens - 2, cur_bool_val))
spans.append((nb_tokens - 1, nb_tokens - 1, tokens_hightlight[idx]))
else:
spans.append((cur_start_idx, nb_tokens - 1, cur_bool_val))
else:
if tokens_hightlight[idx] != cur_bool_val:
spans.append((cur_start_idx, idx - 1, cur_bool_val))
cur_start_idx, cur_bool_val = idx, tokens_hightlight[idx]
return spans
def get_spans_html(self, spans, tokens):
"""
This method generates a HTML string for a string sequence from its spans.
Args:
spans: A list of contiguous spans in a sequence. Each span is represented by a tuple of 3 elements: (1) Start Index (2) End Index (3) A boolean value indicating whether the span needs to be hightlighted.
tokens: All tokens in the sequence
Returns:
html_str: A HTML string for the string sequence.
"""
html_str = ''
for start, end, type in spans:
color = 'red' if type else 'black'
span_tokens = tokens[start : end + 1]
span_str = '<span style="color:{}">{}</span> '.format(color, ' '.join(span_tokens))
html_str += span_str
return html_str
# Main function for analysis
def analyze(errors_log_fp: str, visualization_fp: str):
"""
This method generates a HTML visualization of the error cases logged in a log file.
Args:
errors_log_fp: Path to the error log file
visualization_fp: Path to the output visualization file
"""
# Read lines from errors log
with open(errors_log_fp, 'r', encoding='utf-8') as f:
lines = f.readlines()
# Process lines
tn_error_cases, itn_error_cases = [], []
for ix in range(0, len(lines), 8):
mode_line = lines[ix]
info_lines = lines[ix + 1 : ix + 7]
# Append new error case
if mode_line.startswith('Forward Problem'):
mode = constants.TN_MODE
tn_error_cases.append(ErrorCase.from_lines(info_lines, mode))
elif mode_line.startswith('Backward Problem'):
mode = constants.ITN_MODE
itn_error_cases.append(ErrorCase.from_lines(info_lines, mode))
# Basic stats
print('---- Text Normalization ----')
print('Number of TN errors: {}'.format(len(tn_error_cases)))
print('---- Inverse Text Normalization ---- ')
print('Number of ITN errors: {}'.format(len(itn_error_cases)))
# Produce a visualization
with open(visualization_fp, 'w+', encoding='utf-8') as f:
# Appendix
f.write('Appendix</br>')
f.write('<a href="#tn_section">Text Normalization Analysis.</a></br>')
f.write('<a href="#itn_section">Inverse Text Normalization Analysis.</a>')
# TN Section
f.write('<h2 id="tn_section">Text Normalization</h2>\n')
for errorcase in tn_error_cases:
f.write(errorcase.get_html())
# ITN Section
f.write('<h2 id="itn_section">Inverse Text Normalization</h2>\n')
for errorcase in itn_error_cases:
f.write(errorcase.get_html())
if __name__ == '__main__':
# Parse argument
parser = ArgumentParser()
parser.add_argument('--errors_log_fp', help='Path to the error log file', required=True)
parser.add_argument('--visualization_fp', help='Path to the output visualization file', required=True)
args = parser.parse_args()
analyze(args.errors_log_fp, args.visualization_fp)
| NeMo-main | examples/nlp/duplex_text_normalization/analyze_errors.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to run inference with the DuplexTextNormalizationModel.
DuplexTextNormalizationModel is essentially a wrapper class around DuplexTaggerModel and DuplexDecoderModel.
Therefore, two trained NeMo models should be specified to run the joint evaluation
(one is a trained DuplexTaggerModel and the other is a trained DuplexDecoderModel).
This script can perform inference for 2 settings:
1. inference from a raw file (no labels required). Each line of the file represents a single example for inference.
Specify in inference.from_file and inference.batch_size parameters.
python duplex_text_normalization_infer.py \
tagger_pretrained_model=PATH_TO_TRAINED_TAGGER \
decoder_pretrained_model=PATH_TO_TRAINED_DECODER \
mode={tn,itn,joint} \
lang={en,ru,de} \
inference.from_file=PATH_TO_RAW_TEXT_FILE
The predictions will be saved at "_norm" and "_denorm" files.
2. Interactive inference (one query at a time), set inference.interactive to True to enter the interactive mode
python duplex_text_normalization_infer.py \
tagger_pretrained_model=PATH_TO_TRAINED_TAGGER \
decoder_pretrained_model=PATH_TO_TRAINED_DECODER \
mode={tn,itn,joint} \
lang={en,ru,de} \
inference.interactive=true
This script uses the `/examples/nlp/duplex_text_normalization/conf/duplex_tn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'.
"""
import os
from typing import List
from helpers import DECODER_MODEL, TAGGER_MODEL, instantiate_model_and_trainer
from nemo_text_processing.text_normalization.data_loader_utils import post_process_punct
from nn_wfst.en.electronic.normalize import ElectronicNormalizer
from nn_wfst.en.whitelist.normalize import WhitelistNormalizer
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.data.text_normalization import constants
from nemo.collections.nlp.models import DuplexTextNormalizationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="duplex_tn_config")
def main(cfg: DictConfig) -> None:
logging.debug(f'Config Params: {OmegaConf.to_yaml(cfg)}')
lang = cfg.lang
if cfg.decoder_pretrained_model is None or cfg.tagger_pretrained_model is None:
raise ValueError("Both pre-trained models (DuplexTaggerModel and DuplexDecoderModel) should be provided.")
tagger_trainer, tagger_model = instantiate_model_and_trainer(cfg, TAGGER_MODEL, False)
decoder_trainer, decoder_model = instantiate_model_and_trainer(cfg, DECODER_MODEL, False)
decoder_model.max_sequence_len = 512
tagger_model.max_sequence_len = 512
tn_model = DuplexTextNormalizationModel(tagger_model, decoder_model, lang)
if lang == constants.ENGLISH:
normalizer_electronic = ElectronicNormalizer(input_case="cased", lang=lang, deterministic=True)
normalizer_whitelist = WhitelistNormalizer(input_case="cased", lang=lang, deterministic=True)
if cfg.inference.get("from_file", False):
text_file = cfg.inference.from_file
logging.info(f'Running inference on {text_file}...')
if not os.path.exists(text_file):
raise ValueError(f'{text_file} not found.')
with open(text_file, 'r') as f:
lines = f.readlines()
if lang == constants.ENGLISH:
new_lines = normalizer_electronic.normalize_list(lines)
lines = [
post_process_punct(input=input_, normalized_text=norm_) for input_, norm_ in zip(lines, new_lines)
]
new_lines = normalizer_whitelist.normalize_list(lines)
lines = [
post_process_punct(input=input_, normalized_text=norm_) for input_, norm_ in zip(lines, new_lines)
]
def _get_predictions(lines: List[str], mode: str, batch_size: int, text_file: str):
""" Runs inference on a batch data without labels and saved predictions to a file. """
assert mode in ['tn', 'itn']
file_name, extension = os.path.splitext(text_file)
batch, all_preds = [], []
for i, line in enumerate(lines):
batch.append(line.strip())
if len(batch) == batch_size or i == len(lines) - 1:
outputs = tn_model._infer(batch, [constants.DIRECTIONS_TO_MODE[mode]] * len(batch),)
all_preds.extend([x for x in outputs[-1]])
batch = []
assert len(all_preds) == len(lines)
out_file = f'{file_name}_{mode}{extension}'
with open(f'{out_file}', 'w') as f_out:
f_out.write("\n".join(all_preds))
logging.info(f'Predictions for {mode} save to {out_file}.')
batch_size = cfg.inference.get("batch_size", 8)
if cfg.mode in ['tn', 'joint']:
# TN mode
_get_predictions(lines, 'tn', batch_size, text_file)
if cfg.mode in ['itn', 'joint']:
# ITN mode
_get_predictions(lines, 'itn', batch_size, text_file)
else:
print('Entering interactive mode.')
done = False
while not done:
print('Type "STOP" to exit.')
test_input = input('Input a test input:')
if test_input == "STOP":
done = True
if not done:
if lang == constants.ENGLISH:
new_input = normalizer_electronic.normalize(test_input, verbose=False)
test_input = post_process_punct(input=test_input, normalized_text=new_input)
new_input = normalizer_whitelist.normalize(test_input, verbose=False)
test_input = post_process_punct(input=test_input, normalized_text=new_input)
directions = []
inputs = []
if cfg.mode in ['itn', 'joint']:
directions.append(constants.DIRECTIONS_TO_MODE[constants.ITN_MODE])
inputs.append(test_input)
if cfg.mode in ['tn', 'joint']:
directions.append(constants.DIRECTIONS_TO_MODE[constants.TN_MODE])
inputs.append(test_input)
outputs = tn_model._infer(inputs, directions)[-1]
if cfg.mode in ['joint', 'itn']:
print(f'Prediction (ITN): {outputs[0]}')
if cfg.mode in ['joint', 'tn']:
print(f'Prediction (TN): {outputs[-1]}')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/duplex_text_normalization/duplex_text_normalization_infer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script contains an example on how to train a DuplexTextNormalizationModel.
Note that DuplexTextNormalizationModel is essentially a wrapper class around
two other classes:
(1) DuplexTaggerModel is a model for identifying spans in the input that need to
be normalized. Usually, such spans belong to semiotic classes (e.g., DATE, NUMBERS, ...).
(2) DuplexDecoderModel is a model for normalizing the spans identified by the tagger.
For example, in the text normalization (TN) problem, each span will be converted to its
spoken form. In the inverse text normalization (ITN) problem, each span will be converted
to its written form.
Therefore, this script consists of two parts, one is for training the tagger model
and the other is for training the decoder.
This script uses the `/examples/nlp/duplex_text_normalization/conf/duplex_tn_config.yaml`
config file by default. The other option is to set another config file via command
line arguments by `--config-name=CONFIG_FILE_PATH'. Probably it is worth looking
at the example config file to see the list of parameters used for training.
USAGE Example:
1. Obtain a processed dataset (refer to the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization/nn_text_normalization.rst>`)
2. Run:
# python duplex_text_normalization_train.py \
data.validation_ds.data_path=PATH_TO_VALIDATION_FILE \
data.train_ds.data_path=PATH_TO_TRAIN_FILE \
mode={tn,itn,joint} \
lang={en,ru,de}
There are 3 different modes. `tn` mode is for training a system for TN only.
`itn` mode is for training a system for ITN. `joint` is for training a system
that can do both TN and ITN at the same time. Note that the above command will
first train a tagger and then train a decoder sequentially.
You can also train only a tagger (without training a decoder) by running the
following command:
# python duplex_text_normalization_train.py
data.validation_ds.data_path=PATH_TO_VALIDATION_FILE \
data.train_ds.data_path=PATH_TO_TRAIN_FILE \
data.test_ds.data_path=PATH_TO_TEST_FILE \
mode={tn,itn,joint}
lang={en,ru,de}
decoder_model.do_training=false
Or you can also train only a decoder (without training a tagger):
# python duplex_text_normalization_train.py \
data.validation_ds.data_path=PATH_TO_VALIDATION_FILE \
data.train_ds.data_path=PATH_TO_TRAIN_FILE \
data.test_ds.data_path=PATH_TO_TEST_FILE \
mode={tn,itn,joint} \
lang={en,ru,de} \
tagger_model.do_training=false
To use tarred dataset for decoder training set:
data.train_ds.use_tarred_dataset=True \
data.train_ds.tar_metadata_file=\PATH_TO\<TARRED_DATA_OUTPUT_DIR>\metadata.json
Information on the arguments:
Most arguments in the example config file are quite self-explanatory (e.g.,
`decoder_model.optim.lr` refers to the learning rate for training the decoder).
Some arguments we want to mention are:
+ lang: The language of the dataset.
+ tagger_model.nemo_path: This is the path where the final trained tagger model
will be saved to.
+ decoder_model.nemo_path: This is the path where the final trained decoder model
will be saved to.
"""
from helpers import DECODER_MODEL, TAGGER_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.data.text_normalization import TextNormalizationTestDataset
from nemo.collections.nlp.models import DuplexTextNormalizationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
@hydra_runner(config_path="conf", config_name="duplex_tn_config")
def main(cfg: DictConfig) -> None:
logging.info(f'Config Params: {OmegaConf.to_yaml(cfg)}')
# Train the tagger
if cfg.tagger_model.do_training:
logging.info(
"================================================================================================"
)
logging.info('Starting training tagger...')
tagger_trainer, tagger_model = instantiate_model_and_trainer(cfg, TAGGER_MODEL, True)
tagger_exp_manager = cfg.get('tagger_exp_manager', None)
exp_manager(tagger_trainer, tagger_exp_manager)
tagger_trainer.fit(tagger_model)
logging.info('Training finished!')
# Train the decoder
if cfg.decoder_model.do_training:
logging.info(
"================================================================================================"
)
logging.info('Starting training decoder...')
decoder_trainer, decoder_model = instantiate_model_and_trainer(cfg, DECODER_MODEL, True)
decoder_exp_manager = cfg.get('decoder_exp_manager', None)
exp_manager(decoder_trainer, decoder_exp_manager)
decoder_trainer.fit(decoder_model)
logging.info('Training finished!')
# Evaluation after training
if (
hasattr(cfg.data, 'test_ds')
and cfg.data.test_ds.data_path is not None
and cfg.tagger_model.do_training
and cfg.decoder_model.do_training
):
tn_model = DuplexTextNormalizationModel(tagger_model, decoder_model, cfg.lang)
test_dataset = TextNormalizationTestDataset(cfg.data.test_ds.data_path, cfg.mode, cfg.lang)
results = tn_model.evaluate(test_dataset, cfg.data.test_ds.batch_size, cfg.data.test_ds.errors_log_fp)
print(f'\nTest results: {results}')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/duplex_text_normalization/duplex_text_normalization_train.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script runs evaluation on the test data. For more details on the data format refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`
1. To evaluate the tagger model:
python duplex_text_normalization_test.py \
tagger_pretrained_model=PATH_TO_TRAINED_TAGGER \
mode={tn,itn,joint} \
lang={en,ru,de}
2. To evaluate the decoder model:
python duplex_text_normalization_test.py \
decoder_pretrained_model=PATH_TO_TRAINED_DECODER \
mode={tn,itn,joint} \
lang={en,ru,de}
3. To jointly evaluate "tagger -> decoder" pipeline the DuplexTextNormalizationModel will be used.
DuplexTextNormalizationModel is essentially a wrapper class around DuplexTaggerModel and DuplexDecoderModel.
Therefore, two trained NeMo models should be specified to run the joint evaluation
(one is a trained DuplexTaggerModel and the other is a trained DuplexDecoderModel).
Additionally, an error log will be saved in a file specified with data.test_ds.errors_log_fp (this file can be
later used with analyze_errors.py)
python duplex_text_normalization_test.py \
tagger_pretrained_model=PATH_TO_TRAINED_TAGGER \
decoder_pretrained_model=PATH_TO_TRAINED_DECODER \
mode={tn,itn,joint} \
lang={en,ru,de} \
data.test_ds.errors_log_fp=PATH_TO_FILE_TO_SAVE_ERROR_LOG \
data.test_ds.use_cache=true \
data.test_ds.batch_size=256
"""
from helpers import DECODER_MODEL, TAGGER_MODEL, instantiate_model_and_trainer
from omegaconf import DictConfig
from nemo.collections.nlp.data.text_normalization import TextNormalizationTestDataset
from nemo.collections.nlp.models import DuplexTextNormalizationModel
from nemo.core.config import hydra_runner
from nemo.utils import logging
@hydra_runner(config_path="conf", config_name="duplex_tn_config")
def main(cfg: DictConfig) -> None:
lang = cfg.lang
if cfg.tagger_pretrained_model:
tagger_trainer, tagger_model = instantiate_model_and_trainer(cfg, TAGGER_MODEL, False)
tagger_model.max_sequence_len = 512
tagger_model.setup_test_data(cfg.data.test_ds)
logging.info('Evaluating the tagger...')
tagger_trainer.test(model=tagger_model, verbose=False)
else:
logging.info('Tagger checkpoint is not provided, skipping tagger evaluation')
if cfg.decoder_pretrained_model:
decoder_trainer, decoder_model = instantiate_model_and_trainer(cfg, DECODER_MODEL, False)
decoder_model.max_sequence_len = 512
decoder_model.setup_multiple_test_data(cfg.data.test_ds)
logging.info('Evaluating the decoder...')
decoder_trainer.test(decoder_model)
else:
logging.info('Decoder checkpoint is not provided, skipping decoder evaluation')
if cfg.tagger_pretrained_model and cfg.decoder_pretrained_model:
logging.info('Running evaluation of the duplex model (tagger + decoder) on the test set.')
tn_model = DuplexTextNormalizationModel(tagger_model, decoder_model, lang)
test_dataset = TextNormalizationTestDataset(cfg.data.test_ds.data_path, cfg.mode, lang)
results = tn_model.evaluate(test_dataset, cfg.data.test_ds.batch_size, cfg.data.test_ds.errors_log_fp)
print(f'\nTest results: {results}')
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/duplex_text_normalization/duplex_text_normalization_test.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
from omegaconf import DictConfig
from nemo.collections.nlp.data.text_normalization import constants
from nemo.collections.nlp.models import DuplexDecoderModel, DuplexTaggerModel
from nemo.utils import logging
__all__ = ['TAGGER_MODEL', 'DECODER_MODEL', 'MODEL_NAMES', 'instantiate_model_and_trainer']
TAGGER_MODEL = 'tagger'
DECODER_MODEL = 'decoder'
MODEL_NAMES = [TAGGER_MODEL, DECODER_MODEL]
def instantiate_model_and_trainer(cfg: DictConfig, model_name: str, do_training: bool):
""" Function for instantiating a model and a trainer
Args:
cfg: The config used to instantiate the model and the trainer.
model_name: A str indicates whether the model to be instantiated is a tagger or a decoder (i.e., model_name should be either TAGGER_MODEL or DECODER_MODEL).
do_training: A boolean flag indicates whether the model will be trained or evaluated.
Returns:
trainer: A PyTorch Lightning trainer
model: A NLPModel that can either be a DuplexTaggerModel or a DuplexDecoderModel
"""
assert model_name in MODEL_NAMES
# Get configs for the corresponding models
trainer_cfg = cfg.get(f'{model_name}_trainer')
model_cfg = cfg.get(f'{model_name}_model')
pretrained_cfg = cfg.get(f'{model_name}_pretrained_model', None)
trainer = pl.Trainer(**trainer_cfg)
if not pretrained_cfg:
logging.info(f'Initializing {model_name} model')
if model_name == TAGGER_MODEL:
model = DuplexTaggerModel(model_cfg, trainer=trainer)
if model_name == DECODER_MODEL:
model = DuplexDecoderModel(model_cfg, trainer=trainer)
elif os.path.exists(pretrained_cfg):
logging.info(f'Restoring pretrained {model_name} model from {pretrained_cfg}')
if model_name == TAGGER_MODEL:
model = DuplexTaggerModel.restore_from(pretrained_cfg)
if model_name == DECODER_MODEL:
model = DuplexDecoderModel.restore_from(pretrained_cfg)
else:
logging.info(f'Loading pretrained model {pretrained_cfg}')
if model_name == TAGGER_MODEL:
if pretrained_cfg not in DuplexTaggerModel.get_available_model_names():
raise (
ValueError(
f'{pretrained_cfg} not in the list of available Tagger models. Select from {DuplexTaggerModel.list_available_models()}'
)
)
model = DuplexTaggerModel.from_pretrained(pretrained_cfg)
if model_name == DECODER_MODEL:
if pretrained_cfg not in DuplexDecoderModel.get_available_model_names():
raise (
ValueError(
f'{pretrained_cfg} not in the list of available Decoder models. Select from {DuplexDecoderModel.list_available_models()}'
)
)
model = DuplexDecoderModel.from_pretrained(pretrained_cfg)
# Set model.lang (if it is still None)
if model.lang is None:
model.lang = cfg.lang
assert model.lang in constants.SUPPORTED_LANGS
# Setup covering grammars (if enabled)
# We only support integrating with English TN covering grammars at the moment
if model_name == DECODER_MODEL and model_cfg.use_cg and cfg.lang == constants.ENGLISH:
if model.cg_normalizer is None:
model.setup_cgs(model_cfg)
# Setup train and validation data
if do_training:
model.setup_training_data(train_data_config=cfg.data.train_ds)
if model_name == DECODER_MODEL:
model.setup_multiple_validation_data(val_data_config=cfg.data.validation_ds)
else:
model.setup_validation_data(val_data_config=cfg.data.validation_ds)
logging.info(f'Model {model_name} -- Device {model.device}')
return trainer, model
| NeMo-main | examples/nlp/duplex_text_normalization/helpers.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.whitelist import WhiteListFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
whitelist_graph = WhiteListFst(deterministic=deterministic).fst
self.fst = whitelist_graph
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.token_parser import TokenParser
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
class WhitelistNormalizer(Normalizer):
"""
Normalizer for WHITELIST.
Args:
input_case: accepting either "lower_cased" or "cased" input.
lang: language
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
lang: str = 'en',
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = False,
whitelist: str = None,
):
from nn_wfst.en.whitelist.tokenize_and_classify import ClassifyFst
from nn_wfst.en.whitelist.verbalize_final import VerbalizeFinalFst
self.tagger = ClassifyFst(
input_case=input_case,
deterministic=deterministic,
cache_dir=cache_dir,
overwrite_cache=overwrite_cache,
whitelist=whitelist,
)
self.verbalizer = VerbalizeFinalFst(deterministic=deterministic)
self.post_processor = None
self.parser = TokenParser()
self.lang = lang
self.processor = MosesProcessor(lang_id=lang)
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/normalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.taggers.whitelist import WhiteListFst
from nemo_text_processing.text_normalization.en.taggers.word import WordFst
from pynini.lib import pynutil
from nemo.utils import logging
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
whitelist: path to a file with whitelist replacements
"""
def __init__(
self,
input_case: str,
cache_dir: str = None,
overwrite_cache: bool = False,
deterministic: bool = True,
whitelist: str = None,
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
whitelist_file = os.path.basename(whitelist) if whitelist else ""
far_file = os.path.join(
cache_dir, f"_{input_case}_en_tn_{deterministic}_deterministic{whitelist_file}.far"
)
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f"Creating ClassifyFst grammars.")
punctuation = PunctuationFst(deterministic=deterministic)
punct_graph = punctuation.fst
word_graph = WordFst(deterministic=deterministic, punctuation=punctuation).fst
whitelist_graph = WhiteListFst(input_case=input_case, deterministic=deterministic).fst
classify = pynutil.add_weight(whitelist_graph, 1) | pynutil.add_weight(word_graph, 100)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = (
token_plus_punct
+ pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
).optimize()
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from nn_wfst.en.electronic.verbalize import VerbalizeFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
if deterministic:
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
else:
graph = delete_space + types + delete_space
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/whitelist/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst
from nemo_text_processing.text_normalization.en.verbalizers.electronic import ElectronicFst
class VerbalizeFst(GraphFst):
"""
Composes other verbalizer grammars.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize", kind="verbalize", deterministic=deterministic)
electronic_graph = ElectronicFst(deterministic=deterministic).fst
self.fst = electronic_graph
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.normalize import Normalizer
from nemo_text_processing.text_normalization.token_parser import TokenParser
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
class ElectronicNormalizer(Normalizer):
"""
Normalizer for ELECTRONIC.
Args:
input_case: accepting either "lower_cased" or "cased" input.
lang: language
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(
self,
input_case: str = 'cased',
lang: str = 'en',
deterministic: bool = True,
cache_dir: str = None,
overwrite_cache: bool = False,
):
from nn_wfst.en.electronic.tokenize_and_classify import ClassifyFst
from nn_wfst.en.electronic.verbalize_final import VerbalizeFinalFst
self.tagger = ClassifyFst(
input_case=input_case, deterministic=deterministic, cache_dir=cache_dir, overwrite_cache=overwrite_cache
)
self.verbalizer = VerbalizeFinalFst(deterministic=deterministic)
self.post_processor = None
self.parser = TokenParser()
self.lang = lang
self.processor = MosesProcessor(lang_id=lang)
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/normalize.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_WHITE_SPACE,
GraphFst,
delete_extra_space,
delete_space,
generator_main,
)
from nemo_text_processing.text_normalization.en.taggers.electronic import ElectronicFst
from nemo_text_processing.text_normalization.en.taggers.punctuation import PunctuationFst
from nemo_text_processing.text_normalization.en.taggers.word import WordFst
from pynini.lib import pynutil
from nemo.utils import logging
class ClassifyFst(GraphFst):
"""
Final class that composes all other classification grammars. This class can process an entire sentence including punctuation.
For deployment, this grammar will be compiled and exported to OpenFst Finate State Archiv (FAR) File.
More details to deployment at NeMo/tools/text_processing_deployment.
Args:
input_case: accepting either "lower_cased" or "cased" input.
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
cache_dir: path to a dir with .far grammar file. Set to None to avoid using cache.
overwrite_cache: set to True to overwrite .far files
"""
def __init__(
self, input_case: str, cache_dir: str = None, overwrite_cache: bool = False, deterministic: bool = True
):
super().__init__(name="tokenize_and_classify", kind="classify", deterministic=deterministic)
far_file = None
if cache_dir is not None and cache_dir != "None":
os.makedirs(cache_dir, exist_ok=True)
far_file = os.path.join(cache_dir, f"_{input_case}_en_tn_{deterministic}_deterministic.far")
if not overwrite_cache and far_file and os.path.exists(far_file):
self.fst = pynini.Far(far_file, mode="r")["tokenize_and_classify"]
logging.info(f'ClassifyFst.fst was restored from {far_file}.')
else:
logging.info(f"Creating ClassifyFst grammars.")
punctuation = PunctuationFst(deterministic=deterministic)
punct_graph = punctuation.fst
word_graph = WordFst(deterministic=deterministic, punctuation=punctuation).fst
electonic_graph = ElectronicFst(deterministic=deterministic).fst
classify = pynutil.add_weight(electonic_graph, 1.1) | pynutil.add_weight(word_graph, 100)
punct = pynutil.insert("tokens { ") + pynutil.add_weight(punct_graph, weight=2.1) + pynutil.insert(" }")
punct = pynini.closure(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct),
1,
)
token = pynutil.insert("tokens { ") + classify + pynutil.insert(" }")
token_plus_punct = (
pynini.closure(punct + pynutil.insert(" ")) + token + pynini.closure(pynutil.insert(" ") + punct)
)
graph = (
token_plus_punct
+ pynini.closure(
(
pynini.compose(pynini.closure(NEMO_WHITE_SPACE, 1), delete_extra_space)
| (pynutil.insert(" ") + punct + pynutil.insert(" "))
)
+ token_plus_punct
).optimize()
)
graph = delete_space + graph + delete_space
graph |= punct
self.fst = graph.optimize()
if far_file:
generator_main(far_file, {"tokenize_and_classify": self.fst})
logging.info(f"ClassifyFst grammars are saved to {far_file}.")
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/tokenize_and_classify.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pynini
from nemo_text_processing.text_normalization.en.graph_utils import GraphFst, delete_extra_space, delete_space
from nemo_text_processing.text_normalization.en.verbalizers.word import WordFst
from nn_wfst.en.electronic.verbalize import VerbalizeFst
from pynini.lib import pynutil
class VerbalizeFinalFst(GraphFst):
"""
Finite state transducer that verbalizes an entire sentence.
Args:
deterministic: if True will provide a single transduction option,
for False multiple options (used for audio-based normalization)
"""
def __init__(self, deterministic: bool = True):
super().__init__(name="verbalize_final", kind="verbalize", deterministic=deterministic)
verbalize = VerbalizeFst(deterministic=deterministic).fst
word = WordFst(deterministic=deterministic).fst
types = verbalize | word
if deterministic:
graph = (
pynutil.delete("tokens")
+ delete_space
+ pynutil.delete("{")
+ delete_space
+ types
+ delete_space
+ pynutil.delete("}")
)
else:
graph = delete_space + types + delete_space
graph = delete_space + pynini.closure(graph + delete_extra_space) + graph + delete_space
self.fst = graph
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/verbalize_final.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | examples/nlp/duplex_text_normalization/nn_wfst/en/electronic/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script creates data splits of the Google Text Normalization dataset
of the format mentioned in the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run this script
# python data_split.py \
--data_dir=en_with_types/ \
--output_dir=data_split/ \
--lang=en
In this example, the split files will be stored in the `data_split` folder.
The folder should contain three subfolders `train`, 'dev', and `test` with `.tsv` files.
"""
from argparse import ArgumentParser
from os import listdir, mkdir
from os.path import isdir, isfile, join
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
# Local Constants
TEST_SIZE_EN = 100002
TEST_SIZE_RUS = 100007
def read_google_data(data_file: str, lang: str, split: str, add_test_full=False):
"""
The function can be used to read the raw data files of the Google Text Normalization
dataset (which can be downloaded from https://www.kaggle.com/google-nlu/text-normalization)
Args:
data_file: Path to the data file. Should be of the form output-xxxxx-of-00100
lang: Selected language.
split: data split
add_test_full: do not truncate test data i.e. take the whole test file not #num of lines
Return:
data: list of examples
"""
data = []
cur_classes, cur_tokens, cur_outputs = [], [], []
with open(data_file, 'r', encoding='utf-8') as f:
for linectx, line in tqdm(enumerate(f)):
es = line.strip().split('\t')
if split == "test" and not add_test_full:
# For the results reported in the paper "RNN Approaches to Text Normalization: A Challenge":
# + For English, the first 100,002 lines of output-00099-of-00100 are used for the test set
# + For Russian, the first 100,007 lines of output-00099-of-00100 are used for the test set
if lang == constants.ENGLISH and linectx == TEST_SIZE_EN:
break
if lang == constants.RUSSIAN and linectx == TEST_SIZE_RUS:
break
if len(es) == 2 and es[0] == '<eos>':
data.append((cur_classes, cur_tokens, cur_outputs))
# Reset
cur_classes, cur_tokens, cur_outputs = [], [], []
continue
# Remove _trans (for Russian)
if lang == constants.RUSSIAN:
es[2] = es[2].replace('_trans', '')
# Update the current example
assert len(es) == 3
cur_classes.append(es[0])
cur_tokens.append(es[1])
cur_outputs.append(es[2])
return data
if __name__ == '__main__':
parser = ArgumentParser(description='Preprocess Google text normalization dataset')
parser.add_argument('--data_dir', type=str, required=True, help='Path to folder with data')
parser.add_argument('--output_dir', type=str, default='preprocessed', help='Path to folder with preprocessed data')
parser.add_argument(
'--lang', type=str, default=constants.ENGLISH, choices=constants.SUPPORTED_LANGS, help='Language'
)
parser.add_argument(
'--add_test_full',
action='store_true',
help='If True, additional folder test_full will be created without truncation of files',
)
args = parser.parse_args()
# Create the output dir (if not exist)
if not isdir(args.output_dir):
mkdir(args.output_dir)
mkdir(args.output_dir + '/train')
mkdir(args.output_dir + '/dev')
mkdir(args.output_dir + '/test')
if args.add_test_full:
mkdir(args.output_dir + '/test_full')
for fn in sorted(listdir(args.data_dir))[::-1]:
fp = join(args.data_dir, fn)
if not isfile(fp):
continue
if not fn.startswith('output'):
continue
# Determine the current split
split_nb = int(fn.split('-')[1])
if split_nb < 90:
cur_split = "train"
elif split_nb < 95:
cur_split = "dev"
elif split_nb == 99:
cur_split = "test"
data = read_google_data(data_file=fp, lang=args.lang, split=cur_split)
# write out
output_file = join(args.output_dir, f'{cur_split}', f'{fn}.tsv')
print(fp)
print(output_file)
output_f = open(output_file, 'w', encoding='utf-8')
for inst in data:
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write('<eos>\t<eos>\n')
print(f'{cur_split}_sentences: {len(data)}')
# additionally generate full test files if needed
if cur_split == "test" and args.add_test_full:
data = read_google_data(data_file=fp, lang=args.lang, split=cur_split, add_test_full=True)
# write out
output_file = join(args.output_dir, 'test_full', f'{fn}.tsv')
output_f = open(output_file, 'w', encoding='utf-8')
for inst in data:
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write('<eos>\t<eos>\n')
print(f'{cur_split}_sentences: {len(data)}')
| NeMo-main | examples/nlp/duplex_text_normalization/data/data_split.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pickle
import random
import tarfile
from glob import glob
from typing import List, Tuple
from joblib import Parallel, delayed
from tqdm import tqdm
from transformers import AutoTokenizer
import nemo.collections.nlp.data.text_normalization.constants as constants
from nemo.collections.nlp.data.text_normalization.decoder_dataset import TextNormalizationDecoderDataset
from nemo.utils import logging
"""
The script builds tar files for Tarred TextNormalizationDecoderDataset
See `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization/nn_text_normalization.rst>`
for more details on data format, and en/data_processing.py on how to pre-process the data before tarring.
To run the script, use:
python create_tarred_dataset.py \
--input_files = "train_processed/output-00099-of-00100" \
--input_files = "train_processed/output-00098-of-00100" \
--lang = "en" \
--out_dir="TARRED_DATA_OUTPUT_DIR"
See the argparse help for more arguments.
"""
def _preprocess_file(input_file: str) -> List[Tuple[List[str]]]:
"""
Performs initial preprocessing, i.e., urls formatting, removal of "_trans" from Ru set
Args:
input_file: path to a file in google TN format
Returns:
Processed data. Each element is a Tuple(List[semiotic classes], List[written words], List[spoken words])
"""
print(f"Reading and running initial pre-processing of {input_file}...")
cur_split = []
with open(input_file, 'r', encoding='utf-8') as f:
# Loop through each line of the file
cur_classes, cur_tokens, cur_outputs = [], [], []
for linectx, line in tqdm(enumerate(f)):
es = line.strip().split('\t')
if len(es) == 2 and es[0] == '<eos>':
cur_split.append((cur_classes, cur_tokens, cur_outputs))
# Reset
cur_classes, cur_tokens, cur_outputs = [], [], []
continue
assert len(es) == 3
cur_classes.append(es[0])
cur_tokens.append(es[1])
cur_outputs.append(es[2])
return cur_split
def _write_batches_to_tarfiles(
input_file: str,
tokenizer: AutoTokenizer,
tokenizer_name: str,
mode: str,
lang: str,
max_seq_len: int,
batch_size: int,
out_dir: str,
num_batches_per_tarfile: int,
decoder_data_augmentation: bool = False,
):
"""
Creates tar files for the input file, i.e.:
1. Creates a TextNormalizationDecoderDataset from the input file
2. Constructs batches of size `batch_size`
3. Saves each created batch to a pickle file and then adds `num_batches_per_tarfile`
of the pickle files to a tarfile.
Args:
input_file: path to cleaned data file. See en/data_processing.py for cleaning.
tokenizer: tokenizer
tokenizer_name: the name of the tokenizer, usually corresponds to the pre-trained LM
mode: model training mode
max_seq_len: maximum length of the sequence (examples that are longer will be discarded)
batch_size: batch size
out_dir: path to output directory
num_batches_per_tarfile: number of batches saved in each tar file
decoder_data_augmentation: Set to True to enable data augmentation for the decoder model
lang: data language
"""
dataset = TextNormalizationDecoderDataset(
input_file=input_file,
raw_instances=_preprocess_file(input_file=input_file),
tokenizer=tokenizer,
tokenizer_name=tokenizer_name,
mode=mode,
max_len=max_seq_len,
decoder_data_augmentation=decoder_data_augmentation,
lang=lang,
use_cache=False,
max_insts=-1,
do_tokenize=False,
initial_shuffle=True,
)
dataset.batchify(batch_size)
file_name = os.path.basename(input_file)
tar_file_ctr = 0
tar_file_path = os.path.join(
out_dir, '%s-batches.%d.%d.%d.tar' % (file_name, batch_size, max_seq_len, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w')
total_batch_ctr = 0
batch_ctr = 0
for batch in dataset.batches:
total_batch_ctr += 1
batch_ctr += 1
pickle_file = os.path.join(out_dir, '%s-batch-%d.pkl' % (file_name, total_batch_ctr))
pickle.dump(batch, open(pickle_file, 'wb'))
tar_file_ptr.add(pickle_file)
os.remove(pickle_file)
if batch_ctr == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_path = os.path.join(
out_dir, f'%s-batches.%d.%d.%d.tar' % (file_name, batch_size, max_seq_len, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w',)
batch_ctr = 0
# return tar files paths that have batches remaining
remainder_tar_file_path = tar_file_ptr.name
tar_file_ptr.close()
return total_batch_ctr, remainder_tar_file_path
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='(Inverse) Text Normalization tarred dataset creation')
parser.add_argument('--transformer_name', type=str, default="t5-small", help='Name of the pretrained LM.')
parser.add_argument('--mode', type=str, default='tn', choices=constants.MODES, help='(I)TN model training mode.')
parser.add_argument('--lang', type=str, default='en', choices=constants.SUPPORTED_LANGS, help='language.')
parser.add_argument(
'--decoder_data_augmentation',
action="store_true",
help='Set to True to use data augmentation for the decoder model.',
)
parser.add_argument(
'-in',
'--input_files',
action='append',
required=True,
help="Example: -in train_processed/output-00099-of-00100 -in train_processed/output-00098-of-00100",
)
parser.add_argument('--out_dir', type=str, required=True, help='Path to store dataloader and tokenizer models.')
parser.add_argument(
'--max_seq_length', type=int, default=80, help='Maximum sequence length, longer examples will be discarded.'
)
parser.add_argument('--min_seq_length', type=int, default=1, help='Minimum sequence length.')
parser.add_argument(
'--num_batches_per_tarfile',
type=int,
default=2,
help='Number batches, i.e., pickle files, included in a single .tar file.',
)
parser.add_argument('--n_jobs', type=int, default=-2, help='The maximum number of concurrently running jobs.')
parser.add_argument(
'--batch_size',
type=int,
default=16,
help='Batch size, i.e., number of examples in a single pickle file. This batch size will override the training size.',
)
parser.add_argument(
'--factor', default=8, type=int, help='The final number of tar files will be divisible by the "factor" value'
)
args = parser.parse_args()
# check if tar files exist
if os.path.exists(args.out_dir):
tar_files_in_out_dir = glob(f'{args.out_dir}/*.tar')
if tar_files_in_out_dir:
raise ValueError(
f'Tar files detected in {args.out_dir}. Delete the files to re-construct the dataset in the same directory.'
)
else:
os.makedirs(args.out_dir)
world_size = 1
tokenizer = AutoTokenizer.from_pretrained(args.transformer_name)
results_list = Parallel(n_jobs=args.n_jobs)(
delayed(_write_batches_to_tarfiles)(
input_file=input_file,
tokenizer=tokenizer,
tokenizer_name=args.transformer_name,
mode=args.mode,
lang=args.lang,
batch_size=args.batch_size,
max_seq_len=args.max_seq_length,
decoder_data_augmentation=args.decoder_data_augmentation,
out_dir=args.out_dir,
num_batches_per_tarfile=args.num_batches_per_tarfile,
)
for input_file in args.input_files
)
total_batches = sum([batch_count for batch_count, _ in results_list])
# save batches from tar files containing the left over batches (if there's enough batches)
remainder_tar_file_ctr = 0
remainder_tar_file_path = os.path.join(
args.out_dir, f'remainder-batches.tokens.{args.batch_size}.tar_file_{remainder_tar_file_ctr}.tar'
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w')
batch_in_tar_ctr = 0
for _, tar_file_path in results_list:
tar_file_ptr = tarfile.open(tar_file_path, 'r')
for member in tar_file_ptr.getmembers():
remainder_tar_file_ptr.addfile(member, tar_file_ptr.extractfile(member.name))
batch_in_tar_ctr += 1
if batch_in_tar_ctr == args.num_batches_per_tarfile:
remainder_tar_file_ctr += 1
remainder_tar_file_ptr.close()
remainder_tar_file_path = os.path.join(
args.out_dir, f'remainder-batches.tokens.{args.batch_size}.tar_file_{remainder_tar_file_ctr}.tar',
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w',)
batch_in_tar_ctr = 0
tar_file_ptr.close()
os.remove(tar_file_path)
# log the number of batches remaining as they will be discarded
num_batches_discarded = len(remainder_tar_file_ptr.getmembers())
remainder_tar_file_ptr.close()
os.remove(remainder_tar_file_path)
tar_file_paths = glob(f'{args.out_dir}/*.tar')
if args.factor != 1:
num_tar_files = len(tar_file_paths)
num_tars_to_drop = num_tar_files % args.factor
num_batches_discarded += num_tars_to_drop * args.num_batches_per_tarfile
random.shuffle(tar_file_paths)
for _ in range(num_tars_to_drop):
os.remove(tar_file_paths.pop(-1))
total_batches -= num_batches_discarded
logging.info(f'Number of batches discarded: {num_batches_discarded}, total batches kept: {total_batches}')
# dump metadata to json
metadata = {}
metadata['num_batches'] = total_batches
# rename tar files so they can be more easily used with CLI and YAML
file_name = f'{args.mode}.{args.batch_size}_bs.{args.num_batches_per_tarfile}_b_per_tar.{args.max_seq_length}_len'
for index, path in enumerate(tar_file_paths):
os.rename(path, os.path.join(args.out_dir, f'{file_name}.{index}.tar'))
text_tar_filepaths = f'{file_name}._OP_0..{index}_CL_.tar'
logging.info(f'Files for brace expansion: "{text_tar_filepaths}"')
metadata['text_tar_filepaths'] = text_tar_filepaths
# add tar files to metadata
tar_file_paths = glob(f'{args.out_dir}/*.tar')
metadata['tar_files'] = tar_file_paths
metadata_path = os.path.join(args.out_dir, 'metadata.json')
json.dump(metadata, open(metadata_path, 'w'))
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
| NeMo-main | examples/nlp/duplex_text_normalization/data/create_tarred_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to clean the splits of English Google Text Normalization dataset
for better training performance. Without these processing steps we noticed that the model would have a hard time to learn certain input cases, and instead starts to either make unrecoverable errors
or hallucinate. For example, the model struggles to learn numbers with five or more digits due to limited examples in the training data, so we simplified the task for the model by letting it verbalize those cases
digit by digit. This makes the model more rebust to errors.
The operations include:
- numbers that are longer than `max_integer_length` will be verbalized digit by digit, e.g. the mapping "10001" -> "ten thousand and one" in the data
will be changed to "10001" -> "one zero zero zero one"
- denominators of fractions that are longer than `max_denominator_length` will be verbalized digit by digit
- sentences with non-English characters will be removed
- some class formats converted to standardized format, e.g. for `Fraction` "½" become "1/2"
- urls that have a spoken form of "*_letter" e.g. "dot h_letter _letter t_letter _letter m_letter _letter l_letter" are converted to "dot h t m l"
- for class types "PLAIN", "LETTERS", "ELECTRONIC", "VERBATIM", "PUNCT" the spoken form is changed to "<self>" which means this class should be left unchanged
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run the data_split.py scripts to obtain the data splits
4. Run this script on the different splits
# python data_preprocessing.py \
--input_path=data_split/train \
--output_dir=train_processed \
--max_integer_length=4 \
--max_denominator_length=3
In this example, the cleaned files will be saved in train_processed/.
After this script, you can use upsample.py to create a more class balanced training dataset for better performance.
"""
import os
from argparse import ArgumentParser
import inflect
import regex as re
from tqdm import tqdm
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data.text_normalization.constants import EN_GREEK_TO_SPOKEN
from nemo.collections.nlp.data.text_normalization.utils import (
add_space_around_dash,
convert_fraction,
convert_superscript,
)
from nemo.utils import logging
engine = inflect.engine()
# these are all words that can appear in a verbalized number, this list will be used later as a filter to detect numbers in verbalizations
number_verbalizations = list(range(0, 20)) + list(range(20, 100, 10))
number_verbalizations = (
[engine.number_to_words(x, zero="zero").replace("-", " ").replace(",", "") for x in number_verbalizations]
+ ["hundred", "thousand", "million", "billion", "trillion"]
+ ["point"]
)
digit = "0123456789"
processor = MosesProcessor(lang_id="en")
def process_url(o):
"""
The function is used to process the spoken form of every URL in an example.
E.g., "dot h_letter _letter t_letter _letter m_letter _letter l_letter" ->
"dot h t m l"
Args:
o: The expected outputs for the spoken form
Return:
o: The outputs for the spoken form with preprocessed URLs.
"""
def flatten(l):
""" flatten a list of lists """
return [item for sublist in l for item in sublist]
if o != '<self>' and '_letter' in o:
o_tokens = o.split(' ')
all_spans, cur_span = [], []
for j in range(len(o_tokens)):
if len(o_tokens[j]) == 0:
continue
if o_tokens[j] == '_letter':
all_spans.append(cur_span)
all_spans.append([' '])
cur_span = []
else:
o_tokens[j] = o_tokens[j].replace('_letter', '')
cur_span.append(o_tokens[j])
if len(cur_span) > 0:
all_spans.append(cur_span)
o_tokens = flatten(all_spans)
o = ''
for o_token in o_tokens:
if len(o_token) > 1:
o += ' ' + o_token + ' '
else:
o += o_token
o = o.strip()
o_tokens = processor.tokenize(o).split()
o = ' '.join(o_tokens)
return o
def convert2digits(digits: str):
"""
Verbalizes integer digit by digit, e.g. "12,000.12" -> "one two zero zero zero point one two"
It can also take in a string that has an integer as prefix and outputs only the verbalized part of that, e.g. "12 kg" -> "one two"
and outputs a warning
Args:
digits: integer in string format
Return:
res: number verbalization of the integer prefix of the input
"""
res = []
for i, x in enumerate(digits):
if x in digit:
res.append(engine.number_to_words(str(x), zero="zero").replace("-", " ").replace(",", ""))
elif x == ".":
res.append("point")
elif x in [" ", ","]:
continue
else:
# logging.warning(f"remove {digits[:i]} from {digits[i:]}")
break
res = " ".join(res)
return res, i
def convert(example):
cls, written, spoken = example
written = convert_fraction(written)
written = re.sub("é", "e", written)
written = convert_superscript(written)
if cls == "TIME":
written = re.sub("([0-9]): ([0-9])", "\\1:\\2", written)
if cls == "MEASURE":
written = re.sub("([0-9])\s?''", '\\1"', written)
spoken = process_url(spoken)
if cls in ["TELEPHONE", "DIGIT", "MEASURE", "DECIMAL", "MONEY", "ADDRESS"]:
spoken = re.sub(" o ", " zero ", spoken)
spoken = re.sub(" o ", " zero ", spoken)
spoken = re.sub("^o ", "zero ", spoken)
spoken = re.sub(" o$", " zero", spoken)
spoken = re.sub("^sil ", "", spoken)
spoken = re.sub(" sil ", " ", spoken)
spoken = re.sub(" sil ", " ", spoken)
spoken = re.sub(" sil$", "", spoken)
if cls != "ELECTRONIC":
written = add_space_around_dash(written)
example[1] = written
example[2] = spoken
l = args.max_integer_length - 2
# if written form does not fulfill this format return
if not re.search("[0-9]{%s}[,\s]?[0-9]{3}" % l, written):
if cls != "FRACTION":
return
idx = written.index("/")
denominator = written[idx + 1 :].strip()
if not re.search(r"[0-9]{%s}" % (args.max_denominator_length + 1), denominator):
return
# convert spoken forms for different classes
if cls == "CARDINAL":
if written[0] == "-":
digits = "minus " + convert2digits(written[1:])[0]
else:
digits = convert2digits(written)[0]
spoken = digits
elif cls == "ADDRESS":
idx = re.search("[0-9]", written).start()
number = convert2digits(written[idx:].strip())[0]
s_words = spoken.split()
for i, x in enumerate(s_words):
if x in number_verbalizations:
break
spoken = " ".join(s_words[:i]) + " " + number
elif cls == "DECIMAL":
res = []
for i, x in enumerate(written):
if i == 0 and x == "-":
res.append("minus")
elif x in digit:
res.append(engine.number_to_words(str(x), zero="zero").replace("-", " ").replace(",", ""))
elif x == ".":
res.append("point")
spoken = " ".join(res)
m = re.search("([a-z]+)", written)
if m:
spoken += " " + m.group(1)
elif cls == "FRACTION":
res = []
if written[0] == "-":
res.append("minus")
written = written[1:]
idx = written.index("/")
numerator = written[:idx].strip()
denominator = written[idx + 1 :].strip()
if len(numerator) > args.max_integer_length:
numerator = convert2digits(numerator)[0]
else:
numerator = engine.number_to_words(str(numerator), zero="zero").replace("-", " ").replace(",", "")
if len(denominator) > args.max_denominator_length:
denominator = convert2digits(denominator)[0]
else:
denominator = engine.number_to_words(str(denominator), zero="zero").replace("-", " ").replace(",", "")
spoken = numerator + " slash " + denominator
if res:
spoken = "minus " + spoken
elif cls == "MEASURE":
res = []
if written[0] == "-":
res.append("minus")
written = written[1:]
idx = re.search("(?s:.*)([0-9]\s?[a-zA-Zµμ\/%Ω'])", written).end()
number, unit_idx = convert2digits(written[:idx].strip())
s_words = spoken.split()
for i, x in enumerate(s_words):
if x not in number_verbalizations:
break
spoken = number + " " + " ".join(s_words[i:])
if res:
spoken = "minus " + spoken
elif cls == "MONEY":
res = []
if written[0] == "-":
res.append("minus")
written = written[1:]
idx = re.search("[0-9]", written).start()
m = re.search("\.", written[idx:])
idx_end = len(written)
if m:
idx_end = m.start() + idx
number, unit_idx = convert2digits(written[idx:idx_end].strip())
s_words = spoken.split()
for i, x in enumerate(s_words):
if x not in number_verbalizations:
break
spoken = number + " " + " ".join(s_words[i:])
if res:
spoken = "minus " + spoken
elif cls == "ORDINAL":
res = []
if written[0] == "-":
res.append("minus")
written = written[1:]
if "th" in written.lower():
idx = written.lower().index("th")
elif "rd" in written.lower():
idx = written.lower().index("rd")
elif "nd" in written.lower():
idx = written.lower().index("nd")
elif "st" in written.lower():
idx = written.lower().index("st")
if re.search(r"[¿¡ºª]", written) is None:
spoken = convert2digits(written[:idx].strip())[0] + " " + written[idx:].lower()
if res:
spoken = "minus " + spoken
example[2] = spoken
def ignore(example):
"""
This function makes sure specific class types like 'PLAIN', 'ELECTRONIC' etc. are left unchanged.
Args:
example: data example
"""
cls, _, _ = example
if cls in ["PLAIN", "LETTERS", "ELECTRONIC", "VERBATIM", "PUNCT"]:
example[2] = "<self>"
if example[1] == 'I' and re.search("(first|one)", example[2]):
example[2] = "<self>"
def process_file(fp):
""" Reading the raw data from a file of NeMo format and preprocesses it. Write is out to the output directory.
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
Args:
fp: file path
"""
file_name = fp.split("/")[-1]
output_path = f"{args.output_dir}/{file_name}"
logging.info(f"-----input_file--------\n{fp}")
logging.info(f"-----output_file--------\n{output_path}")
insts, w_words, s_words, classes = [], [], [], []
delete_sentence = False
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
if not delete_sentence:
inst = (classes, w_words, s_words)
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
delete_sentence = False
else:
# convert data sample
convert(es)
# decide if this data sample's spoken form should be same as written form
ignore(es)
characters_ignore = "¿¡ºª" + "".join(EN_GREEK_TO_SPOKEN.keys())
# delete sentence with greek symbols, etc.
if re.search(rf"[{characters_ignore}]", es[1]) is not None:
delete_sentence = True
# delete characters from chinese, japanese, korean
if re.search(r'[\u4e00-\u9fff]+', es[1]) is not None:
delete_sentence = True
if es[0] == 'MONEY' and re.search("\s?DM$", es[1]):
delete_sentence = True
if es[0] == 'MEASURE' and re.search("\s?Da$", es[1]):
delete_sentence = True
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
inst = (classes, w_words, s_words)
insts.append(inst)
output_f = open(output_path, 'w+', encoding='utf-8')
for _, inst in enumerate(insts):
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write(f'<eos>\t<eos>\n')
def main():
if not os.path.exists(args.input_path):
raise ValueError(f"Input path {args.input_path} does not exist")
if os.path.exists(args.output_dir):
logging.info(
f"Output directory {args.output_dir} exists already. Existing files could be potentially overwritten."
)
else:
logging.info(f"Creating output directory {args.output_dir}.")
os.makedirs(args.output_dir, exist_ok=True)
if os.path.isdir(args.input_path):
input_paths = sorted([os.path.join(args.input_path, f) for f in os.listdir(args.input_path)])
else:
input_paths = [args.input_path]
for input_file in input_paths:
process_file(input_file)
if __name__ == "__main__":
parser = ArgumentParser(description="Text Normalization Data Preprocessing for English")
parser.add_argument("--output_dir", required=True, type=str, help='Path to output directory.')
parser.add_argument("--input_path", required=True, type=str, help='Path to input file or input directory.')
parser.add_argument(
"--max_integer_length",
default=4,
type=int,
help='Maximum number of digits for integers that are allowed. Beyond this, the integers are verbalized digit by digit.',
)
parser.add_argument(
"--max_denominator_length",
default=3,
type=int,
help='Maximum number of digits for denominators that are allowed. Beyond this, the denominator is verbalized digit by digit.',
)
args = parser.parse_args()
main()
| NeMo-main | examples/nlp/duplex_text_normalization/data/en/data_preprocessing.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to create a more class balanced file from a set of the data files of the English Google Text Normalization dataset
for better training performance. Currently this script upsamples the class types "MONEY", "MEASURE", "TIME", "FRACTION" since these are underrepresented in the Google Text Normalization dataset, but still diverse in its representations.
Of all the input files in `input_dir` this script takes the first file and computes the class patterns that occurs in it.
For those that are underrepresented, quantitatively defined as lower than `min_number`, the other files are scanned for sentences that have the missing patterns.
Those sentences are appended to the first file and outputted.
USAGE Example:
1. Download the Google TN dataset from https://www.kaggle.com/google-nlu/text-normalization
2. Unzip the English subset (e.g., by running `tar zxvf en_with_types.tgz`). Then there will a folder named `en_with_types`.
3. Run the data_split.py, data_preprocessing.py scripts to obtain cleaned data files
4. Run this script on the training data portion
# python upsample.py \
--input_dir=train_processed/ \
--output_file=train_upsampled.tsv/ \
--min_number=2000
In this example, the final file will be train_upsampled.tsv.
"""
import glob
from argparse import ArgumentParser
from collections import defaultdict
from typing import List
import numpy as np
import regex as re
parser = ArgumentParser(description="English Text Normalization upsampling")
parser.add_argument("--input_dir", required=True, type=str, help='Path to input directory with preprocessed data')
parser.add_argument("--output_file", required=True, type=str, help='Path to output file')
parser.add_argument("--min_number", default=2000, type=int, help='minimum number per pattern')
parser.add_argument("--pretty", action="store_true", help='Pretty print')
args = parser.parse_args()
# global pattern tables
MONEY_PATTERNS = defaultdict(int)
MEASURE_PATTERNS = defaultdict(int)
TIME_PATTERNS = defaultdict(int)
FRACTION_PATTERNS = defaultdict(int)
# global templates/stencils for creating patterns
money_templates = ["([0-9]|\.|,)+"]
measure_templates = ["^-?([0-9]|\.|,|/|\s)+"]
time_templates = [
"^[0-9]+:[0-9][0-9]$",
"^[0-9]+:[0-9][0-9]\s?[a-zA-Z]+$",
"^[0-9]+\s(p|P|A|a)\.?(m|M)\.?",
"^[0-9]+(p|P|A|a)\.?(m|M)\.?",
"^[0-9]:[0-9][0-9]\s(p|P|A|a)\.?(m|M)\.?",
"^[0-9][0-9]:[0-9][0-9]\s(p|P|A|a)\.?(m|M)\.?",
"^[0-9]:[0-9][0-9](p|P|A|a)\.?(m|M)\.?",
"^[0-9][0-9]:[0-9][0-9](p|P|A|a)\.?(m|M)\.?",
"^[0-9]+.[0-9][0-9]\s?(p|P|A|a)\.?(m|M)\.?",
"^[0-9]+:[0-9]+:[0-9]+",
"^[0-9]+:[0-9]+.[0-9]+",
"^[0-9]+.[0-9]+$",
"^[0-9]+.[0-9]+\s?[a-zA-Z]+$",
]
fraction_templates = [
"^-?[0-9]+\s?\/\s?[0-9]{3}$",
"^-?[0-9]{3}\s?\/\s?[0-9]+$",
"^[0-9]+\s[0-9]+\/[0-9]+$",
"^[0-9]+\s[0-9]+\/[0-9]+$",
"^[0-9]+\s[0-9]+\s\/\s[0-9]+$",
"^-?[0-9]+\s\/\s[0-9]+$",
"^-?[0-9]+\/[0-9]+$",
]
# classes that still need to be upsampled, and required number of instances needed
classes_to_upsample = defaultdict(int)
def include_sentence(sentence_patterns) -> bool:
"""
Determines whether to use a sentence for upsampling whose patterns are provided as input. This will check the global pattern tables
if this sentence includes any patterns that are still needed.
Args:
sentence_patterns: dictionary of patterns for a sentence grouped by class
Returns:
include: whether or not to use the sentence or for upsampling
"""
include = False
for k, v in sentence_patterns["MONEY"].items():
if v > 0 and k in MONEY_PATTERNS and MONEY_PATTERNS[k] < args.min_number:
include = True
for k, v in sentence_patterns["MEASURE"].items():
if v > 0 and k in MEASURE_PATTERNS and MEASURE_PATTERNS[k] < args.min_number:
include = True
for k, v in sentence_patterns["TIME"].items():
if v > 0 and k in TIME_PATTERNS and TIME_PATTERNS[k] < args.min_number:
include = True
for k, v in sentence_patterns["FRACTION"].items():
if v > 0 and k in FRACTION_PATTERNS and FRACTION_PATTERNS[k] < args.min_number:
include = True
if include:
for k, v in sentence_patterns["MONEY"].items():
if v > 0 and k in MONEY_PATTERNS:
MONEY_PATTERNS[k] += v
if MONEY_PATTERNS[k] - v < args.min_number and MONEY_PATTERNS[k] >= args.min_number:
classes_to_upsample["MONEY"] -= 1
if classes_to_upsample["MONEY"] <= 0:
classes_to_upsample.pop("MONEY")
for k, v in sentence_patterns["MEASURE"].items():
if v > 0 and k in MEASURE_PATTERNS:
MEASURE_PATTERNS[k] += v
if MEASURE_PATTERNS[k] - v < args.min_number and MEASURE_PATTERNS[k] >= args.min_number:
classes_to_upsample["MEASURE"] -= 1
if classes_to_upsample["MEASURE"] <= 0:
classes_to_upsample.pop("MEASURE")
for k, v in sentence_patterns["TIME"].items():
if v > 0 and k in TIME_PATTERNS:
TIME_PATTERNS[k] += v
if TIME_PATTERNS[k] - v < args.min_number and TIME_PATTERNS[k] >= args.min_number:
classes_to_upsample["TIME"] -= 1
if classes_to_upsample["TIME"] <= 0:
classes_to_upsample.pop("TIME")
for k, v in sentence_patterns["FRACTION"].items():
if v > 0 and k in FRACTION_PATTERNS:
FRACTION_PATTERNS[k] += v
if FRACTION_PATTERNS[k] - v < args.min_number and FRACTION_PATTERNS[k] >= args.min_number:
classes_to_upsample["FRACTION"] -= 1
if classes_to_upsample["FRACTION"] <= 0:
classes_to_upsample.pop("FRACTION")
return include
def read_data_file(fp: str, upsample_file: bool = False):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
Args:
fp: file paths
upsample_file: whether or not this input file should be used in full or only for upsampling, i.e. only as a subset
Returns:
insts: List of sentences parsed as list of words
"""
insts, w_words, s_words, classes = [], [], [], []
with open(fp, 'r', encoding='utf-8') as f:
sentence_patterns = {
"FRACTION": defaultdict(int),
"MEASURE": defaultdict(int),
"TIME": defaultdict(int),
"MONEY": defaultdict(int),
}
for line in f:
es = [e.strip() for e in line.strip().split('\t')]
if es[0] == '<eos>':
if not upsample_file:
inst = (classes, w_words, s_words)
insts.append(inst)
else:
ok = include_sentence(sentence_patterns)
if ok:
inst = (classes, w_words, s_words)
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
sentence_patterns = {
"FRACTION": defaultdict(int),
"MEASURE": defaultdict(int),
"TIME": defaultdict(int),
"MONEY": defaultdict(int),
}
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
if not upsample_file:
register_patterns(cls=es[0], input_str=es[1], pretty=args.pretty)
else:
if es[0] in classes_to_upsample:
patterns = lookup_patterns(cls=es[0], input_str=es[1])
update_patterns(sentence_patterns[es[0]], patterns)
if not upsample_file:
inst = (classes, w_words, s_words)
insts.append(inst)
return insts
def update_patterns(patterns: dict, new_patterns: dict):
"""
updates a given pattern table with counts from another table by adding them to the given table.
Args:
patterns: main table
new_patterns: new table to update the main table with
"""
for k, v in new_patterns.items():
patterns[k] += v
def register_patterns(cls: str, input_str: str, pretty: bool = False):
"""
Saves all patterns created from input string from global templates/stencils to global pattern table
Args:
cls: class type of input_str
input_str: input string
pretty: used to pretty print patterns
"""
if cls == "MONEY":
new_dict = create_pattern(money_templates, input_str, pretty=pretty)
update_patterns(MONEY_PATTERNS, new_dict)
if cls == "MEASURE":
new_dict = create_pattern(measure_templates, input_str, pretty=pretty)
update_patterns(MEASURE_PATTERNS, new_dict)
if cls == "TIME":
new_dict = create_pattern(time_templates, input_str, pretty=pretty)
update_patterns(TIME_PATTERNS, new_dict)
if cls == "FRACTION":
new_dict = create_pattern(fraction_templates, input_str, pretty=pretty)
update_patterns(FRACTION_PATTERNS, new_dict)
def lookup_patterns(cls: str, input_str: str) -> dict:
"""
Look up all patterns that match an input string from global pattern table
Args:
cls: class type of input_str
input_str: input string
"""
if cls == "MONEY":
new_dict = create_pattern(MONEY_PATTERNS.keys(), input_str)
if cls == "MEASURE":
new_dict = create_pattern(MEASURE_PATTERNS.keys(), input_str)
if cls == "TIME":
new_dict = create_pattern(TIME_PATTERNS.keys(), input_str)
if cls == "FRACTION":
new_dict = create_pattern(FRACTION_PATTERNS.keys(), input_str)
return new_dict
def create_pattern(templates: List[str], input_str: str, pretty: bool = False):
"""
create all patterns based on list of input templates using the input string.
Args:
templates: list of templates/stencils
input_str: string to apply templates on to create patterns
pretty: used to pretty print patterns
"""
res = defaultdict(int)
for template in templates:
if re.search(template, input_str) is None:
continue
if not pretty:
res[re.sub(template, template, input_str)] += 1
else:
res[re.sub(template, "@", input_str)] += 1
return res
def print_stats():
"""
print statistics on class patterns to be upsampled
"""
print("MONEY")
for k, v in MONEY_PATTERNS.items():
print(f"\t{k}\t{v}")
print("no. patterns to upsample", classes_to_upsample["MONEY"])
print("MEASURE")
for k, v in MEASURE_PATTERNS.items():
print(f"\t{k}\t{v}")
print("no. patterns to upsample", classes_to_upsample["MEASURE"])
print("TIME")
for k, v in TIME_PATTERNS.items():
print(f"\t{k}\t{v}")
print("no. patterns to upsample", classes_to_upsample["TIME"])
print("FRACTION")
for k, v in FRACTION_PATTERNS.items():
print(f"\t{k}\t{v}")
print("no. patterns to upsample", classes_to_upsample["FRACTION"])
def main():
input_files = sorted(glob.glob(f"{args.input_dir}/output-*"))
print("Taking in full: ", input_files[0])
inst_first_file = read_data_file(input_files[0])
measure_keys = list(MEASURE_PATTERNS.keys())
for k in measure_keys:
if re.search("\s?st$", k) is not None or re.search("\s?Da$", k) is not None:
MEASURE_PATTERNS.pop(k)
money_keys = list(MONEY_PATTERNS.keys())
for k in money_keys:
if re.search("(DM|SHP|BMD|SCR|SHP|ARS|BWP|SBD)$", k) is not None:
MONEY_PATTERNS.pop(k)
classes_to_upsample["FRACTION"] = sum(np.asarray(list(FRACTION_PATTERNS.values())) < args.min_number)
classes_to_upsample["MEASURE"] = sum(np.asarray(list(MEASURE_PATTERNS.values())) < args.min_number)
classes_to_upsample["TIME"] = sum(np.asarray(list(TIME_PATTERNS.values())) < args.min_number)
classes_to_upsample["MONEY"] = sum(np.asarray(list(MONEY_PATTERNS.values())) < args.min_number)
print_stats()
for fp in input_files[1:]:
print("Upsamling: ", fp)
instances = read_data_file(fp, upsample_file=True)
inst_first_file.extend(instances)
print_stats()
output_f = open(args.output_file, 'w+', encoding='utf-8')
for ix, inst in enumerate(inst_first_file):
cur_classes, cur_tokens, cur_outputs = inst
for c, t, o in zip(cur_classes, cur_tokens, cur_outputs):
output_f.write(f'{c}\t{t}\t{o}\n')
output_f.write(f'<eos>\t<eos>\n')
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/duplex_text_normalization/data/en/upsample.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import (
PunctuationCapitalizationLexicalAudioConfig,
)
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_lexical_audio_model import (
PunctuationCapitalizationLexicalAudioModel,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
This script show how to train a Punctuation and Capitalization Model with lexical and acoustic features.
More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb
*** Setting the configs ***
The model and the PT trainer are defined in a config file which declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - language model, audio encoder, tokenizer, token classifier, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/token_classification/conf/punctuation_capitalization_lexical_audio_config.yaml` config file
by default. You may update the config file from the file directly.
The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
*** Model training ***
To run this script and train the model from scratch, use:
python punctuation_capitalization_lexical_audio_train_evaluate.py \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.train_ds.audio_file=<NAME_OF_TRAIN_AUDIO_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
model.validation_ds.audio_file=<NAME_OF_DEV_AUDIO_FILE>
To use BERT-like pretrained P&C models' weights to initialize lexical encoder, use:
python punctuation_capitalization_lexical_audio_train_evaluate.py \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.train_ds.audio_file=<NAME_OF_TRAIN_AUDIO_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
model.validation_ds.audio_file=<NAME_OF_DEV_AUDIO_FILE> \
model.restore_lexical_encoder_from=<PATH/TO/CHECKPOINT.nemo>
If you wish to perform testing after training set `do_testing` to `true:
python punctuation_capitalization_lexical_audio_train_evaluate.py \
+do_testing=true \
pretrained_model=<PATH/TO/CHECKPOINT.nemo> \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.train_ds.audio_file=<NAME_OF_TRAIN_AUDIO_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
model.validation_ds.audio_file=<NAME_OF_DEV_AUDIO_FILE> \
model.test_ds.ds_item=<PATH/TO/TEST_DATA> \
model.test_ds.text_file=<NAME_OF_TEST_INPUT_TEXT_FILE> \
model.test_ds.labels_file=<NAME_OF_TEST_LABELS_FILE> \
model.test_ds.audio_file=<NAME_OF_TEST_AUDIO_FILE>
Set `do_training` to `false` and `do_testing` to `true` to perform evaluation without training:
python punctuation_capitalization_lexical_audio_train_evaluate.py \
+do_testing=true \
+do_training=false \
pretrained_model==<PATH/TO/CHECKPOINT.nemo> \
model.test_ds.ds_item=<PATH/TO/DEV/DATA> \
model.test_ds.text_file=<NAME_OF_TEST_INPUT_TEXT_FILE> \
model.test_ds.labels_file=<NAME_OF_TEST_LABELS_FILE> \
model.test_ds.audio_file=<NAME_OF_TEST_AUDIO_FILE>
"""
@hydra_runner(config_path="conf", config_name="punctuation_capitalization_lexical_audio_config")
def main(cfg: DictConfig) -> None:
torch.manual_seed(42)
cfg = OmegaConf.merge(OmegaConf.structured(PunctuationCapitalizationLexicalAudioConfig()), cfg)
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if not cfg.do_training and not cfg.do_testing:
raise ValueError("At least one of config parameters `do_training` and `do_testing` has to be `true`.")
if cfg.do_training:
if cfg.model.get('train_ds') is None:
raise ValueError('`model.train_ds` config section is required if `do_training` config item is `True`.')
if cfg.do_testing:
if cfg.model.get('test_ds') is None:
raise ValueError('`model.test_ds` config section is required if `do_testing` config item is `True`.')
if not cfg.pretrained_model:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = PunctuationCapitalizationLexicalAudioModel(cfg.model, trainer=trainer)
else:
if os.path.exists(cfg.pretrained_model):
model = PunctuationCapitalizationLexicalAudioModel.restore_from(cfg.pretrained_model)
elif cfg.pretrained_model in PunctuationCapitalizationLexicalAudioModel.get_available_model_names():
model = PunctuationCapitalizationLexicalAudioModel.from_pretrained(cfg.pretrained_model)
else:
raise ValueError(
f'Provide path to the pre-trained .nemo file or choose from '
f'{PunctuationCapitalizationLexicalAudioModel.list_available_models()}'
)
model.update_config_after_restoring_from_checkpoint(
class_labels=cfg.model.class_labels,
common_dataset_parameters=cfg.model.common_dataset_parameters,
train_ds=cfg.model.get('train_ds') if cfg.do_training else None,
validation_ds=cfg.model.get('validation_ds') if cfg.do_training else None,
test_ds=cfg.model.get('test_ds') if cfg.do_testing else None,
optim=cfg.model.get('optim') if cfg.do_training else None,
)
model.set_trainer(trainer)
if cfg.do_training:
model.setup_training_data()
model.setup_multiple_validation_data(cfg.model.validation_ds)
model.setup_optimization()
else:
model.setup_multiple_test_data(cfg.model.test_ds)
if cfg.do_training:
trainer.fit(model)
if cfg.do_testing:
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/token_classification/punctuation_capitalization_lexical_audio_train_evaluate.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from pathlib import Path
from typing import Dict, List, Union
import torch.cuda
from nemo.collections.nlp.models import PunctuationCapitalizationLexicalAudioModel, PunctuationCapitalizationModel
"""
This script is for restoring punctuation and capitalization.
Usage example:
python punctuate_capitalize.py \
--input_manifest <PATH/TO/INPUT/MANIFEST> \
--output_manifest <PATH/TO/OUTPUT/MANIFEST>
Usage example for lexical audio model:
python punctuate_capitalize.py \
--input_manifest <PATH/TO/INPUT/MANIFEST> \
--output_manifest <PATH/TO/OUTPUT/MANIFEST> \
--use_audio
<PATH/TO/INPUT/MANIFEST> is a path to NeMo ASR manifest. Usually it is an output of
NeMo/examples/asr/transcribe_speech.py but can be a manifest with 'text' key. Alternatively you can use
--input_text parameter for passing text for inference.
<PATH/TO/OUTPUT/MANIFEST> is a path to NeMo ASR manifest into which script output will be written. Alternatively
you can use parameter --output_text.
For more details on this script usage look in argparse help.
"""
def get_args() -> argparse.Namespace:
default_model_parameter = "pretrained_name"
default_model = "punctuation_en_bert"
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="The script is for restoring punctuation and capitalization in text or text and audio. To use text and audio use '--use_audio'. Long strings are split into "
"segments of length `--max_seq_length`. `--max_seq_length` is the length which includes [CLS] and [SEP] "
"tokens. If `--use_audio` is set, samples with texts longer than `--max_seq_length` will be ignored. Parameter `--step` controls segments overlapping. `--step` is a distance between beginnings of "
"consequent segments. Model outputs for tokens near the borders of tensors are less accurate and can be "
"discarded before final predictions computation. Parameter `--margin` is number of discarded outputs near "
"segments borders. Probabilities of tokens in overlapping parts of segments multiplied before selecting the "
"best prediction. Default values of parameters `--max_seq_length`, `--step`, and `--margin` are optimal for "
"IWSLT 2019 test dataset.",
)
parser.add_argument(
'--use_audio',
required=False,
action="store_true",
help="If set `PunctuationCapitalizationLexicalAudioModel` will be used for inference",
)
input_ = parser.add_mutually_exclusive_group(required=True)
input_.add_argument(
"--input_manifest",
"-m",
type=Path,
help="Path to the file with NeMo manifest which needs punctuation and capitalization. If the first element "
"of manifest contains key 'pred_text', 'pred_text' values are passed for tokenization. Otherwise 'text' "
"values are passed for punctuation and capitalization. Exactly one parameter of `--input_manifest` and "
"`--input_text` should be provided.",
)
input_.add_argument(
"--input_text",
"-t",
type=Path,
help="Path to file with text which needs punctuation and capitalization. Exactly one parameter of "
"`--input_manifest` and `--input_text` should be provided.",
)
parser.add_argument(
'--audio_file',
required=False,
type=Path,
help="Path to file with paths to audio. One path per row. Required if '--input_text' provided. Else 'audio_filepath' from manifest will be used.",
)
output = parser.add_mutually_exclusive_group(required=True)
output.add_argument(
"--output_manifest",
"-M",
type=Path,
help="Path to output NeMo manifest. Text with restored punctuation and capitalization will be saved in "
"'pred_text' elements if 'pred_text' key is present in the input manifest. Otherwise text with restored "
"punctuation and capitalization will be saved in 'text' elements. Exactly one parameter of `--output_manifest` "
"and `--output_text` should be provided.",
)
output.add_argument(
"--output_text",
"-T",
type=Path,
help="Path to file with text with restored punctuation and capitalization. Exactly one parameter of "
"`--output_manifest` and `--output_text` should be provided.",
)
model = parser.add_mutually_exclusive_group(required=False)
model.add_argument(
"--pretrained_name",
"-p",
help=f"The name of NGC pretrained model. No more than one of parameters `--pretrained_name`, `--model_path`"
f"should be provided. If neither of parameters `--pretrained_name` and `--model_path` are provided, then the "
f"script is run with `--{default_model_parameter}={default_model}`.",
choices=[m.pretrained_model_name for m in PunctuationCapitalizationModel.list_available_models()]
+ [m.pretrained_model_name for m in PunctuationCapitalizationLexicalAudioModel.list_available_models()],
)
model.add_argument(
"--model_path",
"-P",
type=Path,
help=f"Path to .nemo checkpoint of punctuation and capitalization model. No more than one of parameters "
f"`--pretrained_name` and `--model_path` should be provided. If neither of parameters `--pretrained_name` and "
f"`--model_path` are provided, then the script is run with `--{default_model_parameter}={default_model}`.",
)
parser.add_argument(
"--max_seq_length",
"-L",
type=int,
default=64,
help="Length of segments into which queries are split. `--max_seq_length` includes [CLS] and [SEP] tokens.",
)
parser.add_argument(
"--step",
"-s",
type=int,
default=8,
help="Relative shift of consequent segments into which long queries are split. Long queries are split into "
"segments which can overlap. Parameter `step` controls such overlapping. Imagine that queries are "
"tokenized into characters, `max_seq_length=5`, and `step=2`. In such a case query 'hello' is tokenized "
"into segments `[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]`.",
)
parser.add_argument(
"--margin",
"-g",
type=int,
default=16,
help="A number of subtokens in the beginning and the end of segments which output probabilities are not used "
"for prediction computation. The first segment does not have left margin and the last segment does not have "
"right margin. For example, if input sequence is tokenized into characters, `max_seq_length=5`, `step=1`, "
"and `margin=1`, then query 'hello' will be tokenized into segments `[['[CLS]', 'h', 'e', 'l', '[SEP]'], "
"['[CLS]', 'e', 'l', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]`. These segments are passed to the "
"model. Before final predictions computation, margins are removed. In the next list, subtokens which logits "
"are not used for final predictions computation are marked with asterisk: `[['[CLS]'*, 'h', 'e', 'l'*, "
"'[SEP]'*], ['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]`.",
)
parser.add_argument(
"--batch_size", "-b", type=int, default=128, help="Number of segments which are processed simultaneously.",
)
parser.add_argument(
"--save_labels_instead_of_text",
"-B",
action="store_true",
help="If this option is set, then punctuation and capitalization labels are saved instead text with restored "
"punctuation and capitalization. Labels are saved in format described here "
"https://docs.nvidia.com/deeplearning/nemo/"
"user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format",
)
parser.add_argument(
"--device",
"-d",
choices=['cpu', 'cuda'],
help="Which device to use. If device is not set and CUDA is available, then GPU will be used. If device is "
"not set and CUDA is not available, then CPU is used.",
)
parser.add_argument(
"--sample_rate",
type=int,
default=16000,
help="Target sample rate for audios if `--use_audio` was passed",
required=False,
)
args = parser.parse_args()
if args.input_manifest is None and args.output_manifest is not None:
parser.error("--output_manifest requires --input_manifest")
if args.use_audio and (args.input_manifest is None and args.audio_file is None):
parser.error("--use_audio and --input_text require --audio_file")
if args.pretrained_name is None and args.model_path is None:
setattr(args, default_model_parameter, default_model)
for name in ["input_manifest", "input_text", "output_manifest", "output_text", "model_path", "audio_file"]:
if getattr(args, name) is not None:
setattr(args, name, getattr(args, name).expanduser())
return args
def load_manifest(manifest: Path) -> List[Dict[str, Union[str, float]]]:
result = []
with manifest.open() as f:
for i, line in enumerate(f):
data = json.loads(line)
result.append(data)
return result
def main() -> None:
args = get_args()
if args.pretrained_name is None:
model = (
PunctuationCapitalizationModel.restore_from(args.model_path)
if not args.use_audio
else PunctuationCapitalizationLexicalAudioModel.restore_from(args.model_path)
)
else:
model = (
PunctuationCapitalizationModel.from_pretrained(args.pretrained_name)
if not args.use_audio
else PunctuationCapitalizationLexicalAudioModel.restore_from(args.model_path)
)
if args.device is None:
if torch.cuda.is_available():
model = model.cuda()
else:
model = model.cpu()
else:
model = model.to(args.device)
if args.input_manifest is None:
texts = []
audios = []
with args.input_text.open() as f:
for line in f:
texts.append(line.strip())
if args.use_audio:
with args.audio_file.open() as f:
for line in f:
audios.append(line.strip())
else:
manifest = load_manifest(args.input_manifest)
text_key = "pred_text" if "pred_text" in manifest[0] else "text"
texts = []
audios = []
for item in manifest:
texts.append(item[text_key])
if args.use_audio:
audios.append(item["audio_filepath"])
if args.use_audio:
processed_texts = model.add_punctuation_capitalization(
texts,
batch_size=args.batch_size,
max_seq_length=args.max_seq_length,
step=args.step,
margin=args.margin,
return_labels=args.save_labels_instead_of_text,
audio_queries=audios,
target_sr=args.sample_rate,
)
else:
processed_texts = model.add_punctuation_capitalization(
texts,
batch_size=args.batch_size,
max_seq_length=args.max_seq_length,
step=args.step,
margin=args.margin,
return_labels=args.save_labels_instead_of_text,
)
if args.output_manifest is None:
args.output_text.parent.mkdir(exist_ok=True, parents=True)
with args.output_text.open('w') as f:
for t in processed_texts:
f.write(t + '\n')
else:
args.output_manifest.parent.mkdir(exist_ok=True, parents=True)
with args.output_manifest.open('w') as f:
for item, t in zip(manifest, processed_texts):
item[text_key] = t
f.write(json.dumps(item) + '\n')
if __name__ == "__main__":
main()
| NeMo-main | examples/nlp/token_classification/punctuate_capitalize_infer.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytorch_lightning as pl
import torch
from omegaconf import DictConfig, OmegaConf
from nemo.collections.nlp.models import PunctuationCapitalizationModel
from nemo.collections.nlp.models.token_classification.punctuation_capitalization_config import (
PunctuationCapitalizationConfig,
)
from nemo.core.config import hydra_runner
from nemo.utils import logging
from nemo.utils.exp_manager import exp_manager
"""
This script show how to train a Punctuation and Capitalization Model.
More details on the task and data format could be found in tutorials/nlp/Punctuation_and_Capitalization.ipynb
*** Setting the configs ***
The model and the PT trainer are defined in a config file which declares multiple important sections.
The most important ones are:
model: All arguments that are related to the Model - language model, tokenizer, token classifier, optimizer,
schedulers, and datasets/data loaders.
trainer: Any argument to be passed to PyTorch Lightning including number of epochs, number of GPUs,
precision level, etc.
This script uses the `/examples/nlp/token_classification/conf/punctuation_capitalization_config.yaml` config file
by default. You may update the config file from the file directly.
The other option is to set another config file via command line arguments by `--config-name=CONFIG_FILE_PATH'.
Additional default parameters could be found in PunctuationCapitalizationDataConfigBase from
/nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py,
use `+` to modify their values via command line, e.g.: `+model.train_ds.num_workers=2`
For more details about the config files and different ways of model restoration, see tutorials/00_NeMo_Primer.ipynb
*** Model training ***
To run this script and train the model from scratch, use:
python punctuation_capitalization_train_evaluate.py \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
~model.test_ds
To use one of the pretrained versions of the model and finetune it, run:
python punctuation_capitalization_train_evaluate.py \
pretrained_model=punctuation_en_bert \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
~model.test_ds
pretrained_model - pretrained PunctuationCapitalization model from list_available_models() or
path to a .nemo file, for example: punctuation_en_bert or model.nemo
If you wish to perform testing after training set `do_testing` to `true:
python punctuation_capitalization_train_evaluate.py \
+do_testing=true \
pretrained_model=punctuation_en_bert \
model.train_ds.ds_item=<PATH/TO/TRAIN/DATA> \
model.train_ds.text_file=<NAME_OF_TRAIN_INPUT_TEXT_FILE> \
model.train_ds.labels_file=<NAME_OF_TRAIN_LABELS_FILE> \
model.validation_ds.ds_item=<PATH/TO/DEV/DATA> \
model.validation_ds.text_file=<NAME_OF_DEV_INPUT_TEXT_FILE> \
model.validation_ds.labels_file=<NAME_OF_DEV_LABELS_FILE> \
model.test_ds.ds_item=<PATH/TO/TEST_DATA> \
model.test_ds.text_file=<NAME_OF_TEST_INPUT_TEXT_FILE> \
model.test_ds.labels_file=<NAME_OF_TEST_LABELS_FILE>
Set `do_training` to `false` and `do_testing` to `true` to perform evaluation without training:
python punctuation_capitalization_train_evaluate.py \
+do_testing=true \
+do_training=false \
pretrained_model=punctuation_en_bert \
model.test_ds.ds_item=<PATH/TO/TEST/DATA> \
model.test_ds.text_file=<NAME_OF_TEST_INPUT_TEXT_FILE> \
model.test_ds.labels_file=<NAME_OF_TEST_LABELS_FILE>
"""
@hydra_runner(config_path="conf", config_name="punctuation_capitalization_config")
def main(cfg: DictConfig) -> None:
torch.manual_seed(42)
cfg = OmegaConf.merge(OmegaConf.structured(PunctuationCapitalizationConfig()), cfg)
trainer = pl.Trainer(**cfg.trainer)
exp_manager(trainer, cfg.get("exp_manager", None))
if not cfg.do_training and not cfg.do_testing:
raise ValueError("At least one of config parameters `do_training` and `do_testing` has to `true`.")
if cfg.do_training:
if cfg.model.get('train_ds') is None:
raise ValueError('`model.train_ds` config section is required if `do_training` config item is `True`.')
if cfg.do_testing:
if cfg.model.get('test_ds') is None:
raise ValueError('`model.test_ds` config section is required if `do_testing` config item is `True`.')
if not cfg.pretrained_model:
logging.info(f'Config: {OmegaConf.to_yaml(cfg)}')
model = PunctuationCapitalizationModel(cfg.model, trainer=trainer)
else:
if os.path.exists(cfg.pretrained_model):
model = PunctuationCapitalizationModel.restore_from(cfg.pretrained_model)
elif cfg.pretrained_model in PunctuationCapitalizationModel.get_available_model_names():
model = PunctuationCapitalizationModel.from_pretrained(cfg.pretrained_model)
else:
raise ValueError(
f'Config parameter `pretrained_model` should contain a path to the pre-trained .nemo file or a model '
f'name from '
f'{[m.pretrained_model_name for m in PunctuationCapitalizationModel.list_available_models()]}. '
f'Provided `pretrained_model="{cfg.pretrained_model}"` is neither a valid path, nor a valid model '
f'name.'
)
model.update_config_after_restoring_from_checkpoint(
class_labels=cfg.model.class_labels,
common_dataset_parameters=cfg.model.common_dataset_parameters,
train_ds=cfg.model.get('train_ds') if cfg.do_training else None,
validation_ds=cfg.model.get('validation_ds') if cfg.do_training else None,
test_ds=cfg.model.get('test_ds') if cfg.do_testing else None,
optim=cfg.model.get('optim') if cfg.do_training else None,
)
model.set_trainer(trainer)
if cfg.do_training:
model.setup_training_data()
model.setup_multiple_validation_data(cfg.model.validation_ds)
model.setup_optimization()
else:
model.setup_multiple_test_data(cfg.model.test_ds)
if cfg.do_training:
trainer.fit(model)
if cfg.do_testing:
trainer.test(model)
if __name__ == '__main__':
main()
| NeMo-main | examples/nlp/token_classification/punctuation_capitalization_train_evaluate.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.