python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class GPTQAInputExample(object):
""" A single set of features of a QA example for GPT-like model """
unique_id: int
input_ids: List[int]
input_attn_mask: List[int]
training_mask_end: int = None
labels: List[int] = None
example_index: int = None
context_span_index: int = None
is_impossible: Optional[bool] = False
| NeMo-main | nemo/collections/nlp/data/question_answering/input_example/qa_gpt_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import List, Optional
@dataclass
class S2SQAInputExample(object):
""" A single set of features of a QA example for T5/BART-like model """
unique_id: int
input_ids: List[int]
input_attn_mask: List[int]
labels: List[int] = None
example_index: int = None
context_span_index: int = None
is_impossible: Optional[bool] = False
| NeMo-main | nemo/collections/nlp/data/question_answering/input_example/qa_s2s_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ijson
import numpy as np
from nemo.collections.nlp.data.data_utils import DataProcessor
from nemo.collections.nlp.data.question_answering.input_example.qa_input_example import QAExample
from nemo.utils import logging
TRAINING_MODE = "train"
EVALUATION_MODE = "eval"
INFERENCE_MODE = "infer"
class QAProcessor(DataProcessor):
"""
Processor for a QA dataset, expected in SQuAD format.
Args:
data_file: data file path
mode: TRAINING_MODE/EVALUATION_MODE/INFERENCE_MODE
for creating training/evaluation/inference dataset
"""
def __init__(self, data_file: str, mode: str):
self.data_file = data_file
self.mode = mode
# Memoizes documents to reduce memory use (as the same document is often used for many questions)
self.doc_id = 0
self.context_text_to_doc_id = {}
self.doc_id_to_context_text = {}
def get_examples(self):
""" Get examples from raw json file """
if self.data_file is None:
raise ValueError(f"{self.mode} data file is None.")
# remove this line and the replace cache line below - which is a temp fix
with open(self.data_file.replace('_cache', ''), "r", encoding="utf-8") as reader:
input_data = ijson.items(reader, "data.item")
examples = []
for entry in input_data:
len_docs = []
title = entry["title"]
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
if not question_text:
continue
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"] or len(qa["answers"]) < 1
else:
is_impossible = False
if not is_impossible:
if self.mode in [TRAINING_MODE, EVALUATION_MODE]:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
if self.mode == EVALUATION_MODE:
answers = qa["answers"]
if context_text in self.context_text_to_doc_id:
doc_id = self.context_text_to_doc_id[context_text]
else:
doc_id = self.doc_id
self.context_text_to_doc_id[context_text] = doc_id
self.doc_id_to_context_text[doc_id] = context_text
self.doc_id += 1
len_docs.append(len(context_text))
example = QAExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
context_id=doc_id,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
logging.info('mean no. of chars in doc: {}'.format(np.mean(len_docs)))
logging.info('max no. of chars in doc: {}'.format(np.max(len_docs)))
return examples
| NeMo-main | nemo/collections/nlp/data/question_answering/data_processor/qa_processing.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.question_answering.data_processor.qa_processing import QAProcessor
| NeMo-main | nemo/collections/nlp/data/question_answering/data_processor/__init__.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils import get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['IntentSlotClassificationDataset', 'IntentSlotInferenceDataset']
def get_features(
queries,
max_seq_length,
tokenizer,
pad_label=128,
raw_slots=None,
ignore_extra_tokens=False,
ignore_start_end=False,
):
all_subtokens = []
all_loss_mask = []
all_subtokens_mask = []
all_segment_ids = []
all_input_ids = []
all_input_mask = []
sent_lengths = []
all_slots = []
with_label = False
if raw_slots is not None:
with_label = True
for i, query in enumerate(queries):
words = query.strip().split()
subtokens = [tokenizer.cls_token]
loss_mask = [1 - ignore_start_end]
subtokens_mask = [0]
if with_label:
slots = [pad_label]
for j, word in enumerate(words):
word_tokens = tokenizer.text_to_tokens(word)
# to handle emojis that could be neglected during tokenization
if len(word.strip()) > 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.ids_to_tokens(tokenizer.unk_id)]
subtokens.extend(word_tokens)
loss_mask.append(1)
loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1))
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_tokens) - 1))
if with_label:
slots.extend([raw_slots[i][j]] * len(word_tokens))
subtokens.append(tokenizer.sep_token)
loss_mask.append(1 - ignore_start_end)
subtokens_mask.append(0)
sent_lengths.append(len(subtokens))
all_subtokens.append(subtokens)
all_loss_mask.append(loss_mask)
all_subtokens_mask.append(subtokens_mask)
all_input_mask.append([1] * len(subtokens))
if with_label:
slots.append(pad_label)
all_slots.append(slots)
max_seq_length_data = max(sent_lengths)
max_seq_length = min(max_seq_length, max_seq_length_data) if max_seq_length > 0 else max_seq_length_data
logging.info(f'Setting max length to: {max_seq_length}')
get_stats(sent_lengths)
too_long_count = 0
for i, subtokens in enumerate(all_subtokens):
if len(subtokens) > max_seq_length:
subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :]
all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :]
all_loss_mask[i] = [1 - ignore_start_end] + all_loss_mask[i][-max_seq_length + 1 :]
all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :]
if with_label:
all_slots[i] = [pad_label] + all_slots[i][-max_seq_length + 1 :]
too_long_count += 1
all_input_ids.append([tokenizer.tokens_to_ids(t) for t in subtokens])
if len(subtokens) < max_seq_length:
extra = max_seq_length - len(subtokens)
all_input_ids[i] = all_input_ids[i] + [0] * extra
all_loss_mask[i] = all_loss_mask[i] + [0] * extra
all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra
all_input_mask[i] = all_input_mask[i] + [0] * extra
if with_label:
all_slots[i] = all_slots[i] + [pad_label] * extra
all_segment_ids.append([0] * max_seq_length)
logging.info(f'{too_long_count} are longer than {max_seq_length}')
# May be useful for debugging
logging.debug("*** Some Examples of Processed Data ***")
for i in range(min(len(all_input_ids), 5)):
logging.debug("i: %s" % (i))
logging.debug("subtokens: %s" % " ".join(list(map(str, all_subtokens[i]))))
logging.debug("loss_mask: %s" % " ".join(list(map(str, all_loss_mask[i]))))
logging.debug("input_mask: %s" % " ".join(list(map(str, all_input_mask[i]))))
logging.debug("subtokens_mask: %s" % " ".join(list(map(str, all_subtokens_mask[i]))))
if with_label:
logging.debug("slots_label: %s" % " ".join(list(map(str, all_slots[i]))))
return (all_input_ids, all_segment_ids, all_input_mask, all_loss_mask, all_subtokens_mask, all_slots)
class IntentSlotClassificationDataset(Dataset):
"""
Creates dataset to use for the task of joint intent
and slot classification with pretrained model.
Converts from raw data to an instance that can be used by
NMDataLayer.
For dataset to use during inference without labels, see
IntentSlotDataset.
Args:
input_file: file to sequence + label. the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
slot_file: file to slot labels, each line corresponding to slot labels for a sentence in input_file. No header.
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
tokenizer: such as NemoBertTokenizer
num_samples: number of samples you want to use for the dataset. If -1, use all dataset. Useful for testing.
pad_label: pad value use for slot labels. by default, it's the neutral label.
ignore_extra_tokens: whether to ignore extra tokens in the loss_mask.
ignore_start_end: whether to ignore bos and eos tokens in the loss_mask.
do_lower_case: convert query to lower case or not
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'intent_labels': NeuralType(('B'), LabelsType()),
'slot_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
input_file: str,
slot_file: str,
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
pad_label: int = 128,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = False,
do_lower_case: bool = False,
):
if num_samples == 0:
raise ValueError("num_samples has to be positive", num_samples)
with open(slot_file, 'r') as f:
slot_lines = f.readlines()
with open(input_file, 'r') as f:
input_lines = f.readlines()[1:]
assert len(slot_lines) == len(input_lines)
dataset = list(zip(slot_lines, input_lines))
if num_samples > 0:
dataset = dataset[:num_samples]
raw_slots, queries, raw_intents = [], [], []
for slot_line, input_line in dataset:
raw_slots.append([int(slot) for slot in slot_line.strip().split()])
parts = input_line.strip().split()
raw_intents.append(int(parts[-1]))
query = ' '.join(parts[:-1])
if do_lower_case:
query = query.lower()
queries.append(query)
features = get_features(
queries,
max_seq_length,
tokenizer,
pad_label=pad_label,
raw_slots=raw_slots,
ignore_extra_tokens=ignore_extra_tokens,
ignore_start_end=ignore_start_end,
)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
self.all_slots = features[5]
self.all_intents = raw_intents
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_loss_mask[idx]),
np.array(self.all_subtokens_mask[idx]),
self.all_intents[idx],
np.array(self.all_slots[idx]),
)
class IntentSlotInferenceDataset(Dataset):
"""
Creates dataset to use for the task of joint intent
and slot classification with pretrained model.
This is to be used during inference only.
It uses list of queries as the input.
Args:
queries (list): list of queries to run inference on
max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]
tokenizer (Tokenizer): such as NemoBertTokenizer
pad_label (int): pad value use for slot labels.
by default, it's the neutral label.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
}
def __init__(self, queries, max_seq_length, tokenizer, do_lower_case):
if do_lower_case:
for idx, query in enumerate(queries):
queries[idx] = queries[idx].lower()
features = get_features(queries, max_seq_length, tokenizer)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_loss_mask[idx]),
np.array(self.all_subtokens_mask[idx]),
)
| NeMo-main | nemo/collections/nlp/data/intent_slot_classification/intent_slot_classification_dataset.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.intent_slot_classification import IntentSlotClassificationDataset
from nemo.collections.nlp.data.intent_slot_classification.intent_slot_classification_dataset import get_features
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
__all__ = ['MultiLabelIntentSlotClassificationDataset']
class MultiLabelIntentSlotClassificationDataset(IntentSlotClassificationDataset):
"""
Creates dataset to use for the task of multi-label joint intent
and slot classification with pretrained model.
Converts from raw data to an instance that can be used by
NMDataLayer.
Args:
input_file: file containing sentences + labels. The first line is header (sentence [tab] label)
each line should be [sentence][tab][label] where label can be multiple labels separated by a comma
slot_file: file containing slot labels, each line corresponding to slot labels for a sentence in input_file. No header.
num_intents: total number of intents in dict.intents file
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
tokenizer: such as NemoBertTokenizer
num_samples: number of samples you want to use for the dataset. If -1, use all dataset. Useful for testing.
pad_label: pad value use for slot labels. by default, it's the neutral label.
ignore_extra_tokens: whether to ignore extra tokens in the loss_mask.
ignore_start_end: whether to ignore bos and eos tokens in the loss_mask.
do_lower_case: convert query to lower case or not
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'intent_labels': [NeuralType(('B'), LabelsType())],
'slot_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
input_file: str,
slot_file: str,
num_intents: int,
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
pad_label: int = 128,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = False,
do_lower_case: bool = False,
):
if num_samples == 0:
raise ValueError("num_samples has to be positive", num_samples)
with open(slot_file, 'r') as f:
slot_lines = f.readlines()
with open(input_file, 'r') as f:
input_lines = f.readlines()[1:]
assert len(slot_lines) == len(input_lines)
dataset = list(zip(slot_lines, input_lines))
if num_samples > 0:
dataset = dataset[:num_samples]
raw_slots, queries, raw_intents = [], [], []
for slot_line, input_line in dataset:
raw_slots.append([int(slot) for slot in slot_line.strip().split()])
parts = input_line.strip().split("\t")[1:][0]
parts = list(map(int, parts.split(",")))
parts = [1 if label in parts else 0 for label in range(num_intents)]
raw_intents.append(tuple(parts))
tokens = input_line.strip().split("\t")[0].split()
query = ' '.join(tokens)
if do_lower_case:
query = query.lower()
queries.append(query)
features = get_features(
queries,
max_seq_length,
tokenizer,
pad_label=pad_label,
raw_slots=raw_slots,
ignore_extra_tokens=ignore_extra_tokens,
ignore_start_end=ignore_start_end,
)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
self.all_slots = features[5]
self.all_intents = raw_intents
| NeMo-main | nemo/collections/nlp/data/intent_slot_classification/multi_label_intent_slot_classification_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.intent_slot_classification.intent_slot_classification_dataset import (
IntentSlotClassificationDataset,
IntentSlotInferenceDataset,
)
from nemo.collections.nlp.data.intent_slot_classification.intent_slot_classification_descriptor import (
IntentSlotDataDesc,
)
from nemo.collections.nlp.data.intent_slot_classification.multi_label_intent_slot_classification_dataset import (
MultiLabelIntentSlotClassificationDataset,
)
from nemo.collections.nlp.data.intent_slot_classification.multi_label_intent_slot_classification_descriptor import (
MultiLabelIntentSlotDataDesc,
)
| NeMo-main | nemo/collections/nlp/data/intent_slot_classification/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import List
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_freq_weights_bce_with_logits_loss,
get_label_stats,
get_labels_to_labels_id_mapping,
get_multi_label_stats,
if_exist,
)
from nemo.utils import logging
class MultiLabelIntentSlotDataDesc:
""" Convert the raw data to the standard format supported by
MultiLabelIntentSlotDataDesc.
By default, the None label for slots is 'O'.
MultiLabelIntentSlotDataDesc requires two files:
input_file: file containing sentences + labels.
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label] where label is a string of comma separated values.
Example: 1 or 1,2 are both valid labels
slot_file: file containing slot labels, each line corresponding to
slot labels for a sentence in input_file. No header.
To keep the mapping from label index to label consistent during
training and inferencing we require the following files:
dicts.intents.csv: each line is an intent. The first line
corresponding to the 0 intent label, the second line
corresponding to the 1 intent label, and so on.
dicts.slots.csv: each line is a slot. The first line
corresponding to the 0 slot label, the second line
corresponding to the 1 slot label, and so on.
Args:
data_dir: the directory of the dataset
modes: ['train', 'test', 'dev'],
none_slot_label: the label for slots that aren't identified defaulted to 'O'
pad_label: the int used for padding. If set to -1, it'll be set to the whatever the None label is.
"""
def __init__(
self,
data_dir: str,
modes: List[str] = ["train", "test", "dev"],
none_slot_label: str = "O",
pad_label: int = -1,
):
if not if_exist(data_dir, ["dict.intents.csv", "dict.slots.csv"]):
raise FileNotFoundError(
"Make sure that your data follows the standard format "
"supported by MultiLabelIntentSlotDataset. Your data must "
"contain dict.intents.csv and dict.slots.csv."
)
self.data_dir = data_dir
self.intent_dict_file = self.data_dir + "/dict.intents.csv"
self.slot_dict_file = self.data_dir + "/dict.slots.csv"
self.intents_label_ids = get_labels_to_labels_id_mapping(self.intent_dict_file)
self.num_intents = len(self.intents_label_ids)
self.slots_label_ids = get_labels_to_labels_id_mapping(self.slot_dict_file)
self.num_slots = len(self.slots_label_ids)
infold = self.data_dir
for mode in modes:
if not if_exist(self.data_dir, [f"{mode}.tsv"]):
logging.info(f" Stats calculation for {mode} mode" f" is skipped as {mode}.tsv was not found.")
continue
logging.info(f" Stats calculating for {mode} mode...")
slot_file = f"{self.data_dir}/{mode}_slots.tsv"
with open(slot_file, "r") as f:
slot_lines = f.readlines()
input_file = f"{self.data_dir}/{mode}.tsv"
with open(input_file, "r") as f:
input_lines = f.readlines()[1:] # Skipping headers at index 0
if len(slot_lines) != len(input_lines):
raise ValueError(
"Make sure that the number of slot lines match the "
"number of intent lines. There should be a 1-1 "
"correspondence between every slot and intent lines."
)
dataset = list(zip(slot_lines, input_lines))
raw_slots, raw_intents = [], []
for slot_line, input_line in dataset:
slot_list = [int(slot) for slot in slot_line.strip().split()]
raw_slots.append(slot_list)
parts = input_line.strip().split("\t")[1:][0]
parts = list(map(int, parts.split(",")))
parts = [1 if label in parts else 0 for label in range(self.num_intents)]
raw_intents.append(tuple(parts))
logging.info(f"Three most popular intents in {mode} mode:")
total_intents, intent_label_freq, max_id = get_multi_label_stats(
raw_intents, infold + f"/{mode}_intent_stats.tsv"
)
merged_slots = itertools.chain.from_iterable(raw_slots)
logging.info(f"Three most popular slots in {mode} mode:")
slots_total, slots_label_freq, max_id = get_label_stats(merged_slots, infold + f"/{mode}_slot_stats.tsv")
logging.info(f"Total Number of Intent Labels: {total_intents}")
logging.info(f"Intent Label Frequencies: {intent_label_freq}")
logging.info(f"Total Number of Slots: {slots_total}")
logging.info(f"Slots Label Frequencies: {slots_label_freq}")
if mode == "train":
intent_weights_dict = get_freq_weights_bce_with_logits_loss(intent_label_freq)
logging.info(f"Intent Weights: {intent_weights_dict}")
slot_weights_dict = get_freq_weights(slots_label_freq)
logging.info(f"Slot Weights: {slot_weights_dict}")
self.intent_weights = fill_class_weights(intent_weights_dict, self.num_intents - 1)
self.slot_weights = fill_class_weights(slot_weights_dict, self.num_slots - 1)
if pad_label != -1:
self.pad_label = pad_label
else:
if none_slot_label not in self.slots_label_ids:
raise ValueError(f"none_slot_label {none_slot_label} not " f"found in {self.slot_dict_file}.")
self.pad_label = self.slots_label_ids[none_slot_label]
| NeMo-main | nemo/collections/nlp/data/intent_slot_classification/multi_label_intent_slot_classification_descriptor.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
from typing import List
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_label_stats,
if_exist,
)
from nemo.utils import logging
class IntentSlotDataDesc:
""" Convert the raw data to the standard format supported by
IntentSlotDataDesc.
By default, the None label for slots is 'O'.
IntentSlotDataDesc requires two files:
input_file: file to sequence + label.
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
slot_file: file to slot labels, each line corresponding to
slot labels for a sentence in input_file. No header.
To keep the mapping from label index to label consistent during
training and inferencing we require the following files:
dicts.intents.csv: each line is an intent. The first line
corresponding to the 0 intent label, the second line
corresponding to the 1 intent label, and so on.
dicts.slots.csv: each line is a slot. The first line
corresponding to the 0 slot label, the second line
corresponding to the 1 slot label, and so on.
Args:
data_dir: the directory of the dataset
modes: ['train', 'test', 'dev'],
none_slot_label: the label for slots that aren't identified defaulted to 'O'
pad_label: the int used for padding. If set to -1, it'll be set to the whatever the None label is.
"""
def __init__(
self,
data_dir: str,
modes: List[str] = ['train', 'test', 'dev'],
none_slot_label: str = 'O',
pad_label: int = -1,
):
if not if_exist(data_dir, ['dict.intents.csv', 'dict.slots.csv']):
raise FileNotFoundError(
"Make sure that your data follows the standard format "
"supported by JointIntentSlotDataset. Your data must "
"contain dict.intents.csv and dict.slots.csv."
)
self.data_dir = data_dir
self.intent_dict_file = self.data_dir + '/dict.intents.csv'
self.slot_dict_file = self.data_dir + '/dict.slots.csv'
self.intents_label_ids = IntentSlotDataDesc.label2idx(self.intent_dict_file)
self.num_intents = len(self.intents_label_ids)
self.slots_label_ids = IntentSlotDataDesc.label2idx(self.slot_dict_file)
self.num_slots = len(self.slots_label_ids)
infold = self.data_dir
for mode in modes:
if not if_exist(self.data_dir, [f'{mode}.tsv']):
logging.info(f' Stats calculation for {mode} mode' f' is skipped as {mode}.tsv was not found.')
continue
logging.info(f' Stats calculating for {mode} mode...')
slot_file = f'{self.data_dir}/{mode}_slots.tsv'
with open(slot_file, 'r') as f:
slot_lines = f.readlines()
input_file = f'{self.data_dir}/{mode}.tsv'
with open(input_file, 'r') as f:
input_lines = f.readlines()[1:] # Skipping headers at index 0
if len(slot_lines) != len(input_lines):
raise ValueError(
"Make sure that the number of slot lines match the "
"number of intent lines. There should be a 1-1 "
"correspondence between every slot and intent lines."
)
dataset = list(zip(slot_lines, input_lines))
raw_slots, raw_intents = [], []
for slot_line, input_line in dataset:
slot_list = [int(slot) for slot in slot_line.strip().split()]
raw_slots.append(slot_list)
parts = input_line.strip().split()
raw_intents.append(int(parts[-1]))
logging.info(f'Three most popular intents in {mode} mode:')
total_intents, intent_label_freq, max_id = get_label_stats(
raw_intents, infold + f'/{mode}_intent_stats.tsv'
)
merged_slots = itertools.chain.from_iterable(raw_slots)
logging.info(f'Three most popular slots in {mode} mode:')
slots_total, slots_label_freq, max_id = get_label_stats(merged_slots, infold + f'/{mode}_slot_stats.tsv')
logging.info(f'Total Number of Intents: {total_intents}')
logging.info(f'Intent Label Frequencies: {intent_label_freq}')
logging.info(f'Total Number of Slots: {slots_total}')
logging.info(f'Slots Label Frequencies: {slots_label_freq}')
if mode == 'train':
intent_weights_dict = get_freq_weights(intent_label_freq)
logging.info(f'Intent Weights: {intent_weights_dict}')
slot_weights_dict = get_freq_weights(slots_label_freq)
logging.info(f'Slot Weights: {slot_weights_dict}')
self.intent_weights = fill_class_weights(intent_weights_dict, self.num_intents - 1)
self.slot_weights = fill_class_weights(slot_weights_dict, self.num_slots - 1)
if pad_label != -1:
self.pad_label = pad_label
else:
if none_slot_label not in self.slots_label_ids:
raise ValueError(f'none_slot_label {none_slot_label} not ' f'found in {self.slot_dict_file}.')
self.pad_label = self.slots_label_ids[none_slot_label]
@staticmethod
def label2idx(file):
lines = open(file, 'r').readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {lines[i]: i for i in range(len(lines))}
return labels
@staticmethod
def intent_slot_dicts(data_dir):
'''
Return Intent and slot dictionaries
'''
intent_dict_file = data_dir + '/dict.intents.csv'
slot_dict_file = data_dir + '/dict.slots.csv'
intents_labels = open(intent_dict_file, 'r').readlines()
intents_labels = [line.strip() for line in intents_labels if line.strip()]
slots_labels = open(slot_dict_file, 'r').readlines()
slots_labels = [line.strip() for line in slots_labels if line.strip()]
return intents_labels, slots_labels
| NeMo-main | nemo/collections/nlp/data/intent_slot_classification/intent_slot_classification_descriptor.py |
# Copyright 2019 The Google Research Authors.
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections import OrderedDict
from os import path
from typing import Dict, List, Optional, Tuple, Union
from transformers import PreTrainedTokenizerBase
"""Build BERT Examples from asr hypothesis, customization candidates, target labels, span info.
"""
class BertExample(object):
"""Class for training and inference examples for BERT.
Attributes:
features: Feature dictionary.
"""
def __init__(
self,
input_ids: List[int],
input_mask: List[int],
segment_ids: List[int],
input_ids_for_subwords: List[int],
input_mask_for_subwords: List[int],
segment_ids_for_subwords: List[int],
character_pos_to_subword_pos: List[int],
fragment_indices: List[Tuple[int, int, int]],
labels_mask: List[int],
labels: List[int],
spans: List[Tuple[int, int, int]],
default_label: int,
) -> None:
"""Inputs to the example wrapper
Args:
input_ids: indices of single characters (treated as subwords)
input_mask: list of bools with 0s in place of input_ids to be masked
segment_ids: list of ints from 0 to 10 to denote the text segment type (
0 - for tokens of ASR hypothesis,
1 - for tokens of the first candidate
...
10 - for tokens of the tenth candidate
)
input_ids_for_subwords: indices of real subwords (as tokenized by bert tokenizer)
input_mask_for_subwords: list of bools with 0s in place of input_ids_for_subwords to be masked
segment_ids_for_subwords: same as segment_ids but for input_ids_for_subwords
character_pos_to_subword_pos: list of size=len(input_ids), value=(position of corresponding subword in input_ids_for_subwords)
fragment_indices: list of tuples (start_position, end_position, candidate_id), end is exclusive, candidate_id can be -1 if not set
labels_mask: bool tensor with 0s in place of label tokens to be masked
labels: indices of semiotic classes which should be predicted from each of the
corresponding input tokens
spans: list of tuples (class_id, start_position, end_position), end is exclusive, class is always 1(CUSTOM)
default_label: The default label
"""
input_len = len(input_ids)
if not (
input_len == len(input_mask)
and input_len == len(segment_ids)
and input_len == len(labels_mask)
and input_len == len(labels)
and input_len == len(character_pos_to_subword_pos)
):
raise ValueError("All feature lists should have the same length ({})".format(input_len))
input_len_for_subwords = len(input_ids_for_subwords)
if not (
input_len_for_subwords == len(input_mask_for_subwords)
and input_len_for_subwords == len(segment_ids_for_subwords)
):
raise ValueError(
"All feature lists for subwords should have the same length ({})".format(input_len_for_subwords)
)
self.features = OrderedDict(
[
("input_ids", input_ids),
("input_mask", input_mask),
("segment_ids", segment_ids),
("input_ids_for_subwords", input_ids_for_subwords),
("input_mask_for_subwords", input_mask_for_subwords),
("segment_ids_for_subwords", segment_ids_for_subwords),
("character_pos_to_subword_pos", character_pos_to_subword_pos),
("fragment_indices", fragment_indices),
("labels_mask", labels_mask),
("labels", labels),
("spans", spans),
]
)
self._default_label = default_label
class BertExampleBuilder(object):
"""Builder class for BertExample objects."""
def __init__(
self,
label_map: Dict[str, int],
semiotic_classes: Dict[str, int],
tokenizer: PreTrainedTokenizerBase,
max_seq_length: int,
) -> None:
"""Initializes an instance of BertExampleBuilder.
Args:
label_map: Mapping from tags to tag IDs.
semiotic_classes: Mapping from semiotic classes to their ids.
tokenizer: Tokenizer object.
max_seq_length: Maximum sequence length.
"""
self._label_map = label_map
self._semiotic_classes = semiotic_classes
self._tokenizer = tokenizer
self._max_seq_length = max_seq_length
# one span usually covers one or more words and it only exists for custom phrases, so there are much less spans than characters.
self._max_spans_length = max(4, int(max_seq_length / 20))
self._pad_id = self._tokenizer.pad_token_id
self._default_label = 0
def build_bert_example(
self, hyp: str, ref: str, target: Optional[str] = None, span_info: Optional[str] = None, infer: bool = False
) -> Optional[BertExample]:
"""Constructs a BERT Example.
Args:
hyp: Hypothesis text.
ref: Candidate customization variants divided by ';'
target:
if infer==False, string of labels (each label is 1-based index of correct candidate) or 0.
if infer==True, it can be None or string of labels (each label is 1-based index of some candidate). In inference this can be used to get corresponding fragments to fragment_indices.
span_info:
string of format "CUSTOM 6 20;CUSTOM 40 51", number of parts corresponds to number of targets. Can be empty if target is 0.
If infer==False, numbers are correct start and end(exclusive) positions of the corresponding target candidate in the text.
If infer==True, numbers are EXPECTED positions in the text. In inference this can be used to get corresponding fragments to fragment_indices.
infer: inference mode
Returns:
BertExample, or None if the conversion from text to tags was infeasible
Example (infer=False):
hyp: "a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o"
ref: "d i d i e r _ s a u m o n;a s t r o n o m i e;t r i s t a n _ g u i l l o t;t r i s t e s s e;m o n a d e;c h r i s t i a n;a s t r o n o m e r;s o l o m o n;d i d i d i d i d i;m e r c y"
target: "1 3"
span_info: "CUSTOM 12 23;CUSTOM 28 41"
"""
if not ref.count(";") == 9:
raise ValueError("Expect 10 candidates: " + ref)
span_info_parts = []
targets = []
if len(target) > 0 and target != "0":
span_info_parts = span_info.split(";")
targets = list(map(int, target.split(" ")))
if len(span_info_parts) != len(targets):
raise ValueError(
"len(span_info_parts)="
+ str(len(span_info_parts))
+ " is different from len(target_parts)="
+ str(len(targets))
)
tags = [0 for _ in hyp.split()]
if not infer:
for p, t in zip(span_info_parts, targets):
c, start, end = p.split(" ")
start = int(start)
end = int(end)
tags[start:end] = [t for i in range(end - start)]
# get input features for characters
(input_ids, input_mask, segment_ids, labels_mask, labels, _, _,) = self._get_input_features(
hyp=hyp, ref=ref, tags=tags
)
# get input features for words
hyp_with_words = hyp.replace(" ", "").replace("_", " ")
ref_with_words = ref.replace(" ", "").replace("_", " ")
(
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
_,
_,
_,
_,
) = self._get_input_features(hyp=hyp_with_words, ref=ref_with_words, tags=None)
# used in forward to concatenate subword embeddings to character embeddings
character_pos_to_subword_pos = self._map_characters_to_subwords(input_ids, input_ids_for_subwords)
fragment_indices = []
if infer:
# used in inference to take argmax over whole fragments instead of separate characters to get more consistent predictions
fragment_indices = self._get_fragment_indices(hyp, targets, span_info_parts)
spans = []
if not infer:
# during training spans are used in validation step to calculate accuracy on whole custom phrases instead of separate characters
spans = self._get_spans(span_info_parts)
if len(input_ids) > self._max_seq_length or len(spans) > self._max_spans_length:
print(
"Max len exceeded: len(input_ids)=",
len(input_ids),
"; _max_seq_length=",
self._max_seq_length,
"; len(spans)=",
len(spans),
"; _max_spans_length=",
self._max_spans_length,
)
return None
example = BertExample(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
input_ids_for_subwords=input_ids_for_subwords,
input_mask_for_subwords=input_mask_for_subwords,
segment_ids_for_subwords=segment_ids_for_subwords,
character_pos_to_subword_pos=character_pos_to_subword_pos,
fragment_indices=fragment_indices,
labels_mask=labels_mask,
labels=labels,
spans=spans,
default_label=self._default_label,
)
return example
def _get_spans(self, span_info_parts: List[str]) -> List[Tuple[int, int, int]]:
""" Converts span_info string into a list of (class_id, start, end) where start, end are coordinates of starting and ending(exclusive) tokens in input_ids of BertExample
Example:
span_info_parts: ["CUSTOM 37 41", "CUSTOM 47 52", "CUSTOM 42 46", "CUSTOM 0 7"]
result: [(1, 38, 42), (1, 48, 53), (1, 43, 47), (1, 1, 8)]
"""
result_spans = []
for p in span_info_parts:
if p == "":
break
c, start, end = p.split(" ")
if c not in self._semiotic_classes:
raise KeyError("class=" + c + " not found in self._semiotic_classes")
cid = self._semiotic_classes[c]
# +1 because this should be indexing on input_ids which has [CLS] token at beginning
start = int(start) + 1
end = int(end) + 1
result_spans.append((cid, start, end))
return result_spans
def _get_fragment_indices(
self, hyp: str, targets: List[int], span_info_parts: List[str]
) -> Tuple[List[Tuple[int, int, int]]]:
""" Build fragment indices for real candidates.
This is used only at inference.
After external candidate retrieval we know approximately, where the candidate is located in the text (from the positions of matched n-grams).
In this function we
1) adjust start/end positions to match word borders (possibly in multiple ways).
2) generate content for fragment_indices tensor (it will be used during inference to average all predictions inside each fragment).
Args:
hyp: ASR-hypothesis where space separates single characters (real space is replaced to underscore).
targets: list of candidate ids (only for real candidates, not dummy)
span_info_parts: list of strings of format like "CUSTOM 12 25", corresponding to each of targets, with start/end coordinates in text.
Returns:
List of tuples (start, end, target) where start and end are positions in ASR-hypothesis, target is candidate_id.
Note that returned fragments can be unsorted and can overlap, it's ok.
Example:
hyp: "a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o"
targets: [1 2 3 4 6 7 9]
span_info_parts: ["CUSTOM 12 25", "CUSTOM 0 10", "CUSTOM 27 42", ...], where numbers are EXPECTED start/end positions of corresponding target candidates in the text. These positions will be adjusted in this functuion.
fragment_indices: [(1, 12, 2), (13, 24, 1), (13, 28, 1), ..., (29, 42, 3)]
"""
fragment_indices = []
letters = hyp.split()
for target, p in zip(targets, span_info_parts):
_, start, end = p.split(" ")
start = int(start)
end = min(int(end), len(hyp)) # guarantee that end is not outside length
# Adjusting strategy 1: expand both sides to the nearest space.
# Adjust start by finding the nearest left space or beginning of text. If start is already some word beginning, it won't change.
k = start
while k > 0 and letters[k] != '_':
k -= 1
adjusted_start = k if k == 0 else k + 1
# Adjust end by finding the nearest right space. If end is already space or sentence end, it won't change.
k = end
while k < len(letters) and letters[k] != '_':
k += 1
adjusted_end = k
# +1 because this should be indexing on input_ids which has [CLS] token at beginning
fragment_indices.append((adjusted_start + 1, adjusted_end + 1, target))
# Adjusting strategy 2: try to shrink to the closest space (from left or right or both sides).
# For example, here the candidate "shippers" has a matching n-gram covering part of previous word
# a b o u t _ o u r _ s h i p e r s _ b u t _ y o u _ k n o w
# 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 0 0 0 0 0 0
expanded_fragment = "".join(letters[adjusted_start:adjusted_end])
left_space_position = expanded_fragment.find("_")
right_space_position = expanded_fragment.rfind("_")
is_left_shrink = False
is_right_shrink = False
if left_space_position > -1 and left_space_position < len(expanded_fragment) / 2:
# +1 because of CLS token, another +1 to put start position after found space
fragment_indices.append((adjusted_start + 1 + left_space_position + 1, adjusted_end + 1, target))
is_left_shrink = True
if right_space_position > -1 and right_space_position > len(expanded_fragment) / 2:
fragment_indices.append((adjusted_start + 1, adjusted_start + 1 + right_space_position, target))
is_right_shrink = True
if is_left_shrink and is_right_shrink:
fragment_indices.append(
(adjusted_start + 1 + left_space_position + 1, adjusted_start + 1 + right_space_position, target)
)
return fragment_indices
def _map_characters_to_subwords(self, input_ids: List[int], input_ids_for_subwords: List[int]) -> List[int]:
""" Maps each single character to the position of its corresponding subword.
Args:
input_ids: List of character token ids.
input_ids_for_subwords: List of subword token ids.
Returns:
List of subword positions in input_ids_for_subwords. Its length is equal to len(input_ids)
Example:
input_ids: [101, 1037, 1055, 1056, 1054, 1051, 1050, ..., 1051, 102, 1040, ..., 1050, 102, 1037, ..., 1041, 102, ..., 102]
input_ids_for_subwords: [101, 26357, 2106, 2666, 2061, 8202, 1998, 13012, 16643, 2319, 1043, 7174, 102, 2106, 3771, 7842, 2819, 2239, 102, ..., 102]
result: [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, ... , 45, 46, 46, 46, 46, 46, 47]
"""
character_pos_to_subword_pos = [0 for _ in input_ids]
## '[CLS]', 'a', 's', 't', 'r', 'o', 'n', 'o', 'm', 'e', 'r', 's', '_', 'd', 'i', ..., 'l', 'o', '[SEP]', 'd', 'i', 'd', 'i', 'e', 'r', '_', 's', 'a', 'u', 'm', 'o', 'n', ..., '[SEP]'
tokens = self._tokenizer.convert_ids_to_tokens(input_ids)
## '[CLS]', 'astronomers', 'did', '##ie', 'so', '##mon', 'and', 'tri', '##sti', '##an', 'g', '##llo', '[SEP]', 'did', '##ier', 'sa', '##um', '##on', '[SEP]', 'astro', '##no', '##mie', '[SEP]', 'tristan', 'gui', '##llo', '##t', '[SEP]', ..., '[SEP]', 'mercy', '[SEP]']
tokens_for_subwords = self._tokenizer.convert_ids_to_tokens(input_ids_for_subwords)
j = 0 # index for tokens_for_subwords
j_offset = 0 # current letter index within subword
for i in range(len(tokens)):
character = tokens[i]
subword = tokens_for_subwords[j]
if character == "[CLS]" and subword == "[CLS]":
character_pos_to_subword_pos[i] = j
j += 1
continue
if character == "[SEP]" and subword == "[SEP]":
character_pos_to_subword_pos[i] = j
j += 1
continue
if character == "[CLS]" or character == "[SEP]" or subword == "[CLS]" or subword == "[SEP]":
raise IndexError(
"character["
+ str(i)
+ "]="
+ character
+ "; subword["
+ str(j)
+ ";="
+ subword
+ "subwords="
+ str(tokens_for_subwords)
)
# At this point we expect that
# subword either 1) is a normal first token of a word or 2) starts with "##" (not first word token)
# character either 1) is a normal character or 2) is a space character "_"
if character == "_":
character_pos_to_subword_pos[i] = j - 1 # space is assigned to previous subtoken
continue
if j_offset < len(subword):
if character == subword[j_offset]:
character_pos_to_subword_pos[i] = j
j_offset += 1
else:
raise IndexError(
"character mismatch:"
+ "i="
+ str(i)
+ "j="
+ str(j)
+ "j_offset="
+ str(j_offset)
+ "; len(tokens)="
+ str(len(tokens))
+ "; len(subwords)="
+ str(len(tokens_for_subwords))
)
# if subword is finished, increase j
if j_offset >= len(subword):
j += 1
j_offset = 0
if j >= len(tokens_for_subwords):
break
if tokens_for_subwords[j].startswith("##"):
j_offset = 2
# check that all subword tokens are processed
if j < len(tokens_for_subwords):
raise IndexError(
"j="
+ str(j)
+ "; len(tokens)="
+ str(len(tokens))
+ "; len(subwords)="
+ str(len(tokens_for_subwords))
)
return character_pos_to_subword_pos
def _get_input_features(
self, hyp: str, ref: str, tags: List[int]
) -> Tuple[List[int], List[int], List[int], List[int], List[int], List[str], List[int]]:
"""Converts given ASR-hypothesis(hyp) and candidate string(ref) to features(token ids, mask, segment ids, etc).
Args:
hyp: Hypothesis text.
ref: Candidate customization variants divided by ';'
tags: List of labels corresponding to each token of ASR-hypothesis or None when building an example during inference.
Returns:
Features (input_ids, input_mask, segment_ids, labels_mask, labels, hyp_tokens, token_start_indices)
Note that this method is called both for character-based example and for word-based example (to split to subwords).
Character-based example:
hyp: "a s t r o n o m e r s _ d i d i e _ s o m o n _ a n d _ t r i s t i a n _ g l l o"
ref: "d i d i e r _ s a u m o n;a s t r o n o m i e;t r i s t a n _ g u i l l o t;t r i s t e s s e;m o n a d e;c h r i s t i a n;a s t r o n o m e r;s o l o m o n;d i d i d i d i d i;m e r c y"
tags: "0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0 0 0 0 3 3 3 3 3 3 3 3 3 3 3 3 3"
resulting token sequence:
'[CLS]', 'a', 's', 't', 'r', 'o', 'n', 'o', 'm', 'e', 'r', 's', '_', 'd', 'i', ..., 'l', 'o', '[SEP]', 'd', 'i', 'd', 'i', 'e', 'r', '_', 's', 'a', 'u', 'm', 'o', 'n', ..., '[SEP]'
Word-based example:
hyp: "astronomers didie somon and tristian gllo"
ref: "didier saumon;astronomie;tristan guillot;tristesse;monade;christian;astronomer;solomon;dididididi;mercy"
tags: None (not used for word-based case)
resulting token sequence:
'[CLS]', 'astronomers', 'did', '##ie', 'so', '##mon', 'and', 'tri', '##sti', '##an', 'g', '##llo', '[SEP]', 'did', '##ier', 'sa', '##um', '##on', '[SEP]', 'astro', '##no', '##mie', '[SEP]', 'tristan', 'gui', '##llo', '##t', '[SEP]', ..., '[SEP]', 'mercy', '[SEP]']
"""
labels_mask = []
labels = []
if tags is None:
hyp_tokens, token_start_indices = self._split_to_wordpieces(hyp.split())
else:
hyp_tokens, labels, token_start_indices = self._split_to_wordpieces_with_labels(hyp.split(), tags)
references = ref.split(";")
all_ref_tokens = []
all_ref_segment_ids = []
for i in range(len(references)):
ref_tokens, _ = self._split_to_wordpieces(references[i].split())
all_ref_tokens.extend(ref_tokens + ["[SEP]"])
all_ref_segment_ids.extend([i + 1] * (len(ref_tokens) + 1))
input_tokens = ["[CLS]"] + hyp_tokens + ["[SEP]"] + all_ref_tokens # ends with [SEP]
input_ids = self._tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] + [0] * len(hyp_tokens) + [0] + all_ref_segment_ids
if len(input_ids) != len(segment_ids):
raise ValueError(
"len(input_ids)="
+ str(len(input_ids))
+ " is different from len(segment_ids)="
+ str(len(segment_ids))
)
if tags:
labels_mask = [0] + [1] * len(labels) + [0] + [0] * len(all_ref_tokens)
labels = [0] + labels + [0] + [0] * len(all_ref_tokens)
return (input_ids, input_mask, segment_ids, labels_mask, labels, hyp_tokens, token_start_indices)
def _split_to_wordpieces_with_labels(
self, tokens: List[str], labels: List[int]
) -> Tuple[List[str], List[int], List[int]]:
"""Splits tokens (and the labels accordingly) to WordPieces.
Args:
tokens: Tokens to be split.
labels: Labels (one per token) to be split.
Returns:
3-tuple with the split tokens, split labels, and the indices of starting tokens of words
"""
bert_tokens = [] # Original tokens split into wordpieces.
bert_labels = [] # Label for each wordpiece.
# Index of each wordpiece that starts a new token.
token_start_indices = []
for i, token in enumerate(tokens):
# '+ 1' is because bert_tokens will be prepended by [CLS] token later.
token_start_indices.append(len(bert_tokens) + 1)
pieces = self._tokenizer.tokenize(token)
bert_tokens.extend(pieces)
bert_labels.extend([labels[i]] * len(pieces))
return bert_tokens, bert_labels, token_start_indices
def _split_to_wordpieces(self, tokens: List[str]) -> Tuple[List[str], List[int]]:
"""Splits tokens to WordPieces.
Args:
tokens: Tokens to be split.
Returns:
tuple with the split tokens, and the indices of the WordPieces that start a token.
"""
bert_tokens = [] # Original tokens split into wordpieces.
# Index of each wordpiece that starts a new token.
token_start_indices = []
for i, token in enumerate(tokens):
# '+ 1' is because bert_tokens will be prepended by [CLS] token later.
token_start_indices.append(len(bert_tokens) + 1)
pieces = self._tokenizer.tokenize(token)
bert_tokens.extend(pieces)
return bert_tokens, token_start_indices
def read_input_file(
self, input_filename: str, infer: bool = False
) -> Union[List['BertExample'], Tuple[List['BertExample'], Tuple[str, str]]]:
"""Reads in Tab Separated Value file and converts to training/inference-ready examples.
Args:
example_builder: Instance of BertExampleBuilder
input_filename: Path to the TSV input file.
infer: If true, input examples do not contain target info.
Returns:
examples: List of converted examples (BertExample).
or
(examples, hyps_refs): If infer==true, returns h
"""
if not path.exists(input_filename):
raise ValueError("Cannot find file: " + input_filename)
examples = [] # output list of BertExample
hyps_refs = [] # output list of tuples (ASR-hypothesis, candidate_str)
with open(input_filename, 'r') as f:
for line in f:
if len(examples) % 1000 == 0:
logging.info("{} examples processed.".format(len(examples)))
if infer:
parts = line.rstrip('\n').split('\t')
hyp, ref, target, span_info = parts[0], parts[1], None, None
if len(parts) == 4:
target, span_info = parts[2], parts[3]
try:
example = self.build_bert_example(hyp, ref, target=target, span_info=span_info, infer=infer)
except Exception as e:
logging.warning(str(e))
logging.warning(line)
continue
if example is None:
logging.info("cannot create example: ")
logging.info(line)
continue
hyps_refs.append((hyp, ref))
examples.append(example)
else:
hyp, ref, target, semiotic_info = line.rstrip('\n').split('\t')
try:
example = self.build_bert_example(
hyp, ref, target=target, span_info=semiotic_info, infer=infer
)
except Exception as e:
logging.warning(str(e))
logging.warning(line)
continue
if example is None:
logging.info("cannot create example: ")
logging.info(line)
continue
examples.append(example)
logging.info(f"Done. {len(examples)} examples converted.")
if infer:
return examples, hyps_refs
return examples
| NeMo-main | nemo/collections/nlp/data/spellchecking_asr_customization/bert_example.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.spellchecking_asr_customization.dataset import (
SpellcheckingAsrCustomizationDataset,
SpellcheckingAsrCustomizationTestDataset,
TarredSpellcheckingAsrCustomizationDataset,
)
| NeMo-main | nemo/collections/nlp/data/spellchecking_asr_customization/__init__.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
from io import BytesIO
from typing import Dict, List, Optional, Tuple
import braceexpand
import numpy as np
import torch
import webdataset as wd
from nemo.collections.nlp.data.spellchecking_asr_customization.bert_example import BertExampleBuilder
from nemo.core.classes.dataset import Dataset, IterableDataset
from nemo.core.neural_types import ChannelType, IntType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
__all__ = [
"SpellcheckingAsrCustomizationDataset",
"SpellcheckingAsrCustomizationTestDataset",
"TarredSpellcheckingAsrCustomizationDataset",
]
def collate_train_dataset(
batch: List[
Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
]
],
pad_token_id: int,
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
]:
"""collate batch of training items
Args:
batch: A list of tuples of (input_ids, input_mask, segment_ids, input_ids_for_subwords, input_mask_for_subwords, segment_ids_for_subwords, character_pos_to_subword_pos, labels_mask, labels, spans).
pad_token_id: integer id of padding token (to use in padded_input_ids, padded_input_ids_for_subwords)
"""
max_length = 0
max_length_for_subwords = 0
max_length_for_spans = 1 # to avoid empty tensor
for (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
) in batch:
if len(input_ids) > max_length:
max_length = len(input_ids)
if len(input_ids_for_subwords) > max_length_for_subwords:
max_length_for_subwords = len(input_ids_for_subwords)
if len(spans) > max_length_for_spans:
max_length_for_spans = len(spans)
padded_input_ids = []
padded_input_mask = []
padded_segment_ids = []
padded_input_ids_for_subwords = []
padded_input_mask_for_subwords = []
padded_segment_ids_for_subwords = []
padded_character_pos_to_subword_pos = []
padded_labels_mask = []
padded_labels = []
padded_spans = []
for (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
) in batch:
if len(input_ids) < max_length:
pad_length = max_length - len(input_ids)
padded_input_ids.append(np.pad(input_ids, pad_width=[0, pad_length], constant_values=pad_token_id))
padded_input_mask.append(np.pad(input_mask, pad_width=[0, pad_length], constant_values=0))
padded_segment_ids.append(np.pad(segment_ids, pad_width=[0, pad_length], constant_values=0))
padded_labels_mask.append(np.pad(labels_mask, pad_width=[0, pad_length], constant_values=0))
padded_labels.append(np.pad(labels, pad_width=[0, pad_length], constant_values=0))
padded_character_pos_to_subword_pos.append(
np.pad(character_pos_to_subword_pos, pad_width=[0, pad_length], constant_values=0)
)
else:
padded_input_ids.append(input_ids)
padded_input_mask.append(input_mask)
padded_segment_ids.append(segment_ids)
padded_labels_mask.append(labels_mask)
padded_labels.append(labels)
padded_character_pos_to_subword_pos.append(character_pos_to_subword_pos)
if len(input_ids_for_subwords) < max_length_for_subwords:
pad_length = max_length_for_subwords - len(input_ids_for_subwords)
padded_input_ids_for_subwords.append(
np.pad(input_ids_for_subwords, pad_width=[0, pad_length], constant_values=pad_token_id)
)
padded_input_mask_for_subwords.append(
np.pad(input_mask_for_subwords, pad_width=[0, pad_length], constant_values=0)
)
padded_segment_ids_for_subwords.append(
np.pad(segment_ids_for_subwords, pad_width=[0, pad_length], constant_values=0)
)
else:
padded_input_ids_for_subwords.append(input_ids_for_subwords)
padded_input_mask_for_subwords.append(input_mask_for_subwords)
padded_segment_ids_for_subwords.append(segment_ids_for_subwords)
if len(spans) < max_length_for_spans:
padded_spans.append(np.ones((max_length_for_spans, 3), dtype=int) * -1) # pad value is [-1, -1, -1]
if len(spans) > 0:
padded_spans[-1][: spans.shape[0], : spans.shape[1]] = spans # copy actual spans to the beginning
else:
padded_spans.append(spans)
return (
torch.LongTensor(np.array(padded_input_ids)),
torch.LongTensor(np.array(padded_input_mask)),
torch.LongTensor(np.array(padded_segment_ids)),
torch.LongTensor(np.array(padded_input_ids_for_subwords)),
torch.LongTensor(np.array(padded_input_mask_for_subwords)),
torch.LongTensor(np.array(padded_segment_ids_for_subwords)),
torch.LongTensor(np.array(padded_character_pos_to_subword_pos)),
torch.LongTensor(np.array(padded_labels_mask)),
torch.LongTensor(np.array(padded_labels)),
torch.LongTensor(np.array(padded_spans)),
)
def collate_test_dataset(
batch: List[Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]],
pad_token_id: int,
) -> Tuple[
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
torch.LongTensor,
]:
"""collate batch of test items
Args:
batch: A list of tuples of (input_ids, input_mask, segment_ids, input_ids_for_subwords, input_mask_for_subwords, segment_ids_for_subwords, character_pos_to_subword_pos, fragment_indices).
pad_token_id: integer id of padding token (to use in padded_input_ids, padded_input_ids_for_subwords)
"""
max_length = 0
max_length_for_subwords = 0
max_length_for_fragment_indices = 1 # to avoid empty tensor
for (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
fragment_indices,
) in batch:
if len(input_ids) > max_length:
max_length = len(input_ids)
if len(input_ids_for_subwords) > max_length_for_subwords:
max_length_for_subwords = len(input_ids_for_subwords)
if len(fragment_indices) > max_length_for_fragment_indices:
max_length_for_fragment_indices = len(fragment_indices)
padded_input_ids = []
padded_input_mask = []
padded_segment_ids = []
padded_input_ids_for_subwords = []
padded_input_mask_for_subwords = []
padded_segment_ids_for_subwords = []
padded_character_pos_to_subword_pos = []
padded_fragment_indices = []
for (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
fragment_indices,
) in batch:
if len(input_ids) < max_length:
pad_length = max_length - len(input_ids)
padded_input_ids.append(np.pad(input_ids, pad_width=[0, pad_length], constant_values=pad_token_id))
padded_input_mask.append(np.pad(input_mask, pad_width=[0, pad_length], constant_values=0))
padded_segment_ids.append(np.pad(segment_ids, pad_width=[0, pad_length], constant_values=0))
padded_character_pos_to_subword_pos.append(
np.pad(character_pos_to_subword_pos, pad_width=[0, pad_length], constant_values=0)
)
else:
padded_input_ids.append(input_ids)
padded_input_mask.append(input_mask)
padded_segment_ids.append(segment_ids)
padded_character_pos_to_subword_pos.append(character_pos_to_subword_pos)
if len(input_ids_for_subwords) < max_length_for_subwords:
pad_length = max_length_for_subwords - len(input_ids_for_subwords)
padded_input_ids_for_subwords.append(
np.pad(input_ids_for_subwords, pad_width=[0, pad_length], constant_values=pad_token_id)
)
padded_input_mask_for_subwords.append(
np.pad(input_mask_for_subwords, pad_width=[0, pad_length], constant_values=0)
)
padded_segment_ids_for_subwords.append(
np.pad(segment_ids_for_subwords, pad_width=[0, pad_length], constant_values=0)
)
else:
padded_input_ids_for_subwords.append(input_ids_for_subwords)
padded_input_mask_for_subwords.append(input_mask_for_subwords)
padded_segment_ids_for_subwords.append(segment_ids_for_subwords)
if len(fragment_indices) < max_length_for_fragment_indices:
# we use [0, 1, 0] as padding value for fragment_indices, it corresponds to [CLS] token, which is ignored and won't affect anything
p = np.zeros((max_length_for_fragment_indices, 3), dtype=int)
p[:, 1] = 1
p[:, 2] = 0
padded_fragment_indices.append(p)
if len(fragment_indices) > 0:
padded_fragment_indices[-1][
: fragment_indices.shape[0], : fragment_indices.shape[1]
] = fragment_indices # copy actual fragment_indices to the beginning
else:
padded_fragment_indices.append(fragment_indices)
return (
torch.LongTensor(np.array(padded_input_ids)),
torch.LongTensor(np.array(padded_input_mask)),
torch.LongTensor(np.array(padded_segment_ids)),
torch.LongTensor(np.array(padded_input_ids_for_subwords)),
torch.LongTensor(np.array(padded_input_mask_for_subwords)),
torch.LongTensor(np.array(padded_segment_ids_for_subwords)),
torch.LongTensor(np.array(padded_character_pos_to_subword_pos)),
torch.LongTensor(np.array(padded_fragment_indices)),
)
class SpellcheckingAsrCustomizationDataset(Dataset):
"""
Dataset as used by the SpellcheckingAsrCustomizationModel for training and validation pipelines.
Args:
input_file (str): path to tsv-file with data
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
"input_ids_for_subwords": NeuralType(('B', 'T'), ChannelType()),
"input_mask_for_subwords": NeuralType(('B', 'T'), MaskType()),
"segment_ids_for_subwords": NeuralType(('B', 'T'), ChannelType()),
"character_pos_to_subword_pos": NeuralType(('B', 'T'), ChannelType()),
"labels_mask": NeuralType(('B', 'T'), MaskType()),
"labels": NeuralType(('B', 'T'), LabelsType()),
"spans": NeuralType(('B', 'T', 'C'), IntType()),
}
def __init__(self, input_file: str, example_builder: BertExampleBuilder) -> None:
self.example_builder = example_builder
self.examples = self.example_builder.read_input_file(input_file, infer=False)
self.pad_token_id = self.example_builder._pad_id
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
example = self.examples[idx]
input_ids = np.array(example.features["input_ids"], dtype=np.int16)
input_mask = np.array(example.features["input_mask"], dtype=np.int8)
segment_ids = np.array(example.features["segment_ids"], dtype=np.int8)
input_ids_for_subwords = np.array(example.features["input_ids_for_subwords"], dtype=np.int16)
input_mask_for_subwords = np.array(example.features["input_mask_for_subwords"], dtype=np.int8)
segment_ids_for_subwords = np.array(example.features["segment_ids_for_subwords"], dtype=np.int8)
character_pos_to_subword_pos = np.array(example.features["character_pos_to_subword_pos"], dtype=np.int16)
labels_mask = np.array(example.features["labels_mask"], dtype=np.int8)
labels = np.array(example.features["labels"], dtype=np.int8)
spans = np.array(example.features["spans"], dtype=np.int16)
return (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
)
def _collate_fn(self, batch):
"""collate batch of items
Args:
batch: A list of tuples of (input_ids, input_mask, segment_ids, input_ids_for_subwords, input_mask_for_subwords, segment_ids_for_subwords, character_pos_to_subword_pos, labels_mask, labels, spans).
"""
return collate_train_dataset(batch, pad_token_id=self.pad_token_id)
class TarredSpellcheckingAsrCustomizationDataset(IterableDataset):
"""
This Dataset loads training examples from tarred tokenized pickle files.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
Additionally, please note that the len() of this DataLayer is assumed to be the number of tokens
of the text data. Shard strategy is scatter - each node gets a unique set of shards, which are permanently
pre-allocated and never changed at runtime.
Args:
text_tar_filepaths: a string (can be brace-expandable).
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
pad_token_id: id of pad token (used in collate_fn)
"""
def __init__(
self,
text_tar_filepaths: str,
shuffle_n: int = 1,
global_rank: int = 0,
world_size: int = 1,
pad_token_id: int = -1, # use real value or get error
):
super(TarredSpellcheckingAsrCustomizationDataset, self).__init__()
if pad_token_id < 0:
raise ValueError("use non-negative pad_token_id: " + str(pad_token_id))
self.pad_token_id = pad_token_id
# Replace '(', '[', '<' and '_OP_' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "{")
# Replace ')', ']', '>' and '_CL_' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "}")
# Brace expand
text_tar_filepaths = list(braceexpand.braceexpand(text_tar_filepaths))
logging.info("Tarred dataset shards will be scattered evenly across all nodes.")
if len(text_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(text_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size}). "
f"Some shards will not be used ({len(text_tar_filepaths) % world_size})."
)
begin_idx = (len(text_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(text_tar_filepaths) // world_size)
logging.info('Begin Index : %d' % (begin_idx))
logging.info('End Index : %d' % (end_idx))
text_tar_filepaths = text_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
self.tarpath = text_tar_filepaths
# Put together WebDataset
self._dataset = wd.WebDataset(urls=text_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n, initial=shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.rename(pkl='pkl', key='__key__').to_tuple('pkl', 'key').map(f=self._build_sample)
def _build_sample(self, fname):
# Load file
pkl_file, _ = fname
pkl_file = BytesIO(pkl_file)
data = pickle.load(pkl_file)
pkl_file.close()
input_ids = data["input_ids"]
input_mask = data["input_mask"]
segment_ids = data["segment_ids"]
input_ids_for_subwords = data["input_ids_for_subwords"]
input_mask_for_subwords = data["input_mask_for_subwords"]
segment_ids_for_subwords = data["segment_ids_for_subwords"]
character_pos_to_subword_pos = data["character_pos_to_subword_pos"]
labels_mask = data["labels_mask"]
labels = data["labels"]
spans = data["spans"]
return (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
labels_mask,
labels,
spans,
)
def __iter__(self):
return self._dataset.__iter__()
def _collate_fn(self, batch):
"""collate batch of items
Args:
batch: A list of tuples of (input_ids, input_mask, segment_ids, input_ids_for_subwords, input_mask_for_subwords, segment_ids_for_subwords, character_pos_to_subword_pos, labels_mask, labels, spans).
"""
return collate_train_dataset(batch, pad_token_id=self.pad_token_id)
class SpellcheckingAsrCustomizationTestDataset(Dataset):
"""
Dataset for inference pipeline.
Args:
sents: list of strings
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
"input_ids_for_subwords": NeuralType(('B', 'T'), ChannelType()),
"input_mask_for_subwords": NeuralType(('B', 'T'), MaskType()),
"segment_ids_for_subwords": NeuralType(('B', 'T'), ChannelType()),
"character_pos_to_subword_pos": NeuralType(('B', 'T'), ChannelType()),
"fragment_indices": NeuralType(('B', 'T', 'C'), IntType()),
}
def __init__(self, input_file: str, example_builder: BertExampleBuilder) -> None:
self.example_builder = example_builder
self.examples, self.hyps_refs = self.example_builder.read_input_file(input_file, infer=True)
self.pad_token_id = self.example_builder._pad_id
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
example = self.examples[idx]
input_ids = np.array(example.features["input_ids"])
input_mask = np.array(example.features["input_mask"])
segment_ids = np.array(example.features["segment_ids"])
input_ids_for_subwords = np.array(example.features["input_ids_for_subwords"])
input_mask_for_subwords = np.array(example.features["input_mask_for_subwords"])
segment_ids_for_subwords = np.array(example.features["segment_ids_for_subwords"])
character_pos_to_subword_pos = np.array(example.features["character_pos_to_subword_pos"], dtype=np.int64)
fragment_indices = np.array(example.features["fragment_indices"], dtype=np.int16)
return (
input_ids,
input_mask,
segment_ids,
input_ids_for_subwords,
input_mask_for_subwords,
segment_ids_for_subwords,
character_pos_to_subword_pos,
fragment_indices,
)
def _collate_fn(self, batch):
"""collate batch of items
Args:
batch: A list of tuples of (input_ids, input_mask, segment_ids, input_ids_for_subwords, input_mask_for_subwords, segment_ids_for_subwords, character_pos_to_subword_pos).
"""
return collate_test_dataset(batch, pad_token_id=self.pad_token_id)
| NeMo-main | nemo/collections/nlp/data/spellchecking_asr_customization/dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import random
import re
from collections import defaultdict, namedtuple
from typing import Dict, List, Set, Tuple, Union
import numpy as np
from numba import jit
"""Utility functions for Spellchecking ASR Customization."""
def replace_diacritics(text):
text = re.sub(r"[éèëēêęěė]", "e", text) # latin
text = re.sub(r"[ё]", "е", text) # cyrillic
text = re.sub(r"[ãâāáäăàąåạảǎ]", "a", text)
text = re.sub(r"[úūüùưûů]", "u", text)
text = re.sub(r"[ôōóöõòőø]", "o", text)
text = re.sub(r"[ćçč]", "c", text)
text = re.sub(r"[ïīíîıì]", "i", text)
text = re.sub(r"[ñńňņ]", "n", text)
text = re.sub(r"[țťţ]", "t", text)
text = re.sub(r"[łľļ]", "l", text)
text = re.sub(r"[żžź]", "z", text)
text = re.sub(r"[ğ]", "g", text)
text = re.sub(r"[ďđ]", "d", text)
text = re.sub(r"[ķ]", "k", text)
text = re.sub(r"[ř]", "r", text)
text = re.sub(r"[ý]", "y", text)
text = re.sub(r"[æ]", "ae", text)
text = re.sub(r"[œ]", "oe", text)
text = re.sub(r"[șşšś]", "s", text)
return text
def load_ngram_mappings(input_name: str, max_misspelled_freq: int = 1000000000) -> Tuple[defaultdict, Set]:
"""Loads n-gram mapping vocabularies in form required by dynamic programming
Args:
input_name: file with n-gram mappings
max_misspelled_freq: threshold on misspelled n-gram frequency
Returns:
vocab: dict {key=original_ngram, value=dict{key=misspelled_ngram, value=frequency}}
ban_ngram: set of banned misspelled n-grams
Input format:
u t o u+i t o 49 8145 114
u t o <DELETE> t e 63 8145 16970
u t o o+_ t o 42 8145 1807
"""
vocab = defaultdict(dict)
ban_ngram = set()
with open(input_name, "r", encoding="utf-8") as f:
for line in f:
orig, misspelled, joint_freq, orig_freq, misspelled_freq = line.strip().split("\t")
if orig == "" or misspelled == "":
raise ValueError("Empty n-gram: orig=" + orig + "; misspelled=" + misspelled)
misspelled = misspelled.replace("<DELETE>", "=")
if misspelled.replace("=", "").strip() == "": # skip if resulting ngram doesn't contain any real character
continue
if int(misspelled_freq) > max_misspelled_freq:
ban_ngram.add(misspelled + " ") # space at the end is required within get_index function
vocab[orig][misspelled] = int(joint_freq) / int(orig_freq)
return vocab, ban_ngram
def load_ngram_mappings_for_dp(input_name: str) -> Tuple[defaultdict, defaultdict, defaultdict, int]:
"""Loads n-gram mapping vocabularies in form required by dynamic programming
Args:
input_name: file with n-gram mappings
Returns:
joint_vocab: dict where key=(original_ngram, misspelled_ngram), value=frequency
orig_vocab: dict where key=original_ngram, value=frequency
misspelled_vocab: dict where key=misspelled_ngram, value=frequency
max_len: maximum n-gram length seen in vocabulary
Input format: original \t misspelled \t joint_freq \t original_freq \t misspelled_freq
u t o u+i t o 49 8145 114
u t o <DELETE> t e 63 8145 16970
u t o o+_ t o 42 8145 1807
"""
joint_vocab = defaultdict(int)
orig_vocab = defaultdict(int)
misspelled_vocab = defaultdict(int)
max_len = 0
with open(input_name, "r", encoding="utf-8") as f:
for line in f:
orig, misspelled, joint_freq, _, _ = line.strip().split("\t")
if orig == "" or misspelled == "":
raise ValueError("Emty n-gram: orig=" + orig + "; misspelled=" + misspelled)
misspelled = misspelled.replace("<DELETE>", " ").replace("+", " ")
misspelled = " ".join(misspelled.split())
if misspelled == "": # skip if resulting ngram doesn't contain any real character
continue
max_len = max(max_len, orig.count(" ") + 1, misspelled.count(" ") + 1)
joint_vocab[(orig, misspelled)] += int(joint_freq)
orig_vocab[orig] += int(joint_freq)
misspelled_vocab[misspelled] += int(joint_freq)
return joint_vocab, orig_vocab, misspelled_vocab, max_len
def get_alignment_by_dp(
ref_phrase: str, hyp_phrase: str, dp_data: Tuple[defaultdict, defaultdict, defaultdict, int]
) -> List[Tuple[str, str, float, float, int, int, int]]:
"""Get best alignment path between a reference and (possibly) misspelled phrase using n-gram mappings vocabulary.
Args:
ref_phrase: candidate reference phrase (letters separated by space, real space replaced by underscore)
hyp_phrase: (possibly) misspelled phrase (letters separated by space, real space replaced by underscore)
dp_data: n-gram mapping vocabularies used by dynamic programming
Returns:
list of tuples (hyp_ngram, ref_ngram, logprob, sum_logprob, joint_freq, orig_freq, misspelled_freq)
This is best alignment path.
Example:
ref_phrase: "a n h y d r i d e"
hyp_phrase: "a n d _ h y d r o d"
Result:
[("*", "*", 0.0, 0.0, 0, 0, 0)
("a n d _ h", "a n h", -2.34, -2.34, 226, 2338, 2203)
("y d r o", "y d r i", -2.95, -5.29, 11, 211, 1584)
("d", "d e", -1.99, -7.28, 60610, 444714, 2450334)
]
Final path score is in path[-1][3]: -7.28
Note that the order of ref_phrase and hyp_phrase matters, because n-gram mappings vocabulary is not symmetrical.
"""
joint_vocab, orig_vocab, misspelled_vocab, max_len = dp_data
hyp_letters = ["*"] + hyp_phrase.split()
ref_letters = ["*"] + ref_phrase.split()
DpInfo = namedtuple(
"DpInfo", ["hyp_pos", "ref_pos", "best_hyp_ngram_len", "best_ref_ngram_len", "score", "sum_score"]
)
history = defaultdict(DpInfo)
history[(0, 0)] = DpInfo(
hyp_pos=0, ref_pos=0, best_hyp_ngram_len=1, best_ref_ngram_len=1, score=0.0, sum_score=0.0
)
for hyp_pos in range(len(hyp_letters)):
for ref_pos in range(len(ref_letters)):
if hyp_pos == 0 and ref_pos == 0: # cell (0, 0) is already defined
continue
# consider cell (hyp_pos, ref_pos) and find best path to get there
best_hyp_ngram_len = 0
best_ref_ngram_len = 0
best_ngram_score = float("-inf")
best_sum_score = float("-inf")
# loop over paths ending on non-empty ngram mapping
for hyp_ngram_len in range(1, 1 + min(max_len, hyp_pos + 1)):
hyp_ngram = " ".join(hyp_letters[(hyp_pos - hyp_ngram_len + 1) : (hyp_pos + 1)])
for ref_ngram_len in range(1, 1 + min(max_len, ref_pos + 1)):
ref_ngram = " ".join(ref_letters[(ref_pos - ref_ngram_len + 1) : (ref_pos + 1)])
if (ref_ngram, hyp_ngram) not in joint_vocab:
continue
joint_freq = joint_vocab[(ref_ngram, hyp_ngram)]
orig_freq = orig_vocab.get(ref_ngram, 1)
ngram_score = math.log(joint_freq / orig_freq)
previous_cell = (hyp_pos - hyp_ngram_len, ref_pos - ref_ngram_len)
if previous_cell not in history:
print("cell ", previous_cell, "does not exist")
continue
previous_score = history[previous_cell].sum_score
sum_score = ngram_score + previous_score
if sum_score > best_sum_score:
best_sum_score = sum_score
best_ngram_score = ngram_score
best_hyp_ngram_len = hyp_ngram_len
best_ref_ngram_len = ref_ngram_len
# loop over two variants with deletion of one character
deletion_score = -6.0
insertion_score = -6.0
if hyp_pos > 0:
previous_cell = (hyp_pos - 1, ref_pos)
previous_score = history[previous_cell].sum_score
sum_score = deletion_score + previous_score
if sum_score > best_sum_score:
best_sum_score = sum_score
best_ngram_score = deletion_score
best_hyp_ngram_len = 1
best_ref_ngram_len = 0
if ref_pos > 0:
previous_cell = (hyp_pos, ref_pos - 1)
previous_score = history[previous_cell].sum_score
sum_score = insertion_score + previous_score
if sum_score > best_sum_score:
best_sum_score = sum_score
best_ngram_score = insertion_score
best_hyp_ngram_len = 0
best_ref_ngram_len = 1
if best_hyp_ngram_len == 0 and best_ref_ngram_len == 0:
raise ValueError("best_hyp_ngram_len = 0 and best_ref_ngram_len = 0")
# save cell to history
history[(hyp_pos, ref_pos)] = DpInfo(
hyp_pos=hyp_pos,
ref_pos=ref_pos,
best_hyp_ngram_len=best_hyp_ngram_len,
best_ref_ngram_len=best_ref_ngram_len,
score=best_ngram_score,
sum_score=best_sum_score,
)
# now trace back on best path starting from last positions
path = []
hyp_pos = len(hyp_letters) - 1
ref_pos = len(ref_letters) - 1
cell_info = history[(hyp_pos, ref_pos)]
path.append(cell_info)
while hyp_pos > 0 or ref_pos > 0:
hyp_pos -= cell_info.best_hyp_ngram_len
ref_pos -= cell_info.best_ref_ngram_len
cell_info = history[(hyp_pos, ref_pos)]
path.append(cell_info)
result = []
for info in reversed(path):
hyp_ngram = " ".join(hyp_letters[(info.hyp_pos - info.best_hyp_ngram_len + 1) : (info.hyp_pos + 1)])
ref_ngram = " ".join(ref_letters[(info.ref_pos - info.best_ref_ngram_len + 1) : (info.ref_pos + 1)])
joint_freq = joint_vocab.get((ref_ngram, hyp_ngram), 0)
orig_freq = orig_vocab.get(ref_ngram, 0)
misspelled_freq = misspelled_vocab.get(hyp_ngram, 0)
result.append((hyp_ngram, ref_ngram, info.score, info.sum_score, joint_freq, orig_freq, misspelled_freq))
return result
def get_index(
custom_phrases: List[str],
vocab: defaultdict,
ban_ngram_global: Set[str],
min_log_prob: float = -4.0,
max_phrases_per_ngram: int = 100,
) -> Tuple[List[str], Dict[str, List[Tuple[int, int, int, float]]]]:
"""Given a restricted vocabulary of replacements,
loops through custom phrases,
generates all possible conversions and creates index.
Args:
custom_phrases: list of all custom phrases, characters should be split by space, real space replaced to underscore.
vocab: n-gram mappings vocabulary - dict {key=original_ngram, value=dict{key=misspelled_ngram, value=frequency}}
ban_ngram_global: set of banned misspelled n-grams
min_log_prob: minimum log probability, after which we stop growing this n-gram.
max_phrases_per_ngram: maximum phrases that we allow to store per one n-gram. N-grams exceeding that quantity get banned.
Returns:
phrases - list of phrases. Position in this list is used as phrase_id.
ngram2phrases - resulting index, i.e. dict where key=ngram, value=list of tuples (phrase_id, begin_pos, size, logprob)
"""
ban_ngram_local = set() # these ngrams are banned only for given custom_phrases
ngram_to_phrase_and_position = defaultdict(list)
for custom_phrase in custom_phrases:
inputs = custom_phrase.split(" ")
begin = 0
index_keys = [{} for _ in inputs] # key - letter ngram, index - beginning positions in phrase
for begin in range(len(inputs)):
for end in range(begin + 1, min(len(inputs) + 1, begin + 5)):
inp = " ".join(inputs[begin:end])
if inp not in vocab:
continue
for rep in vocab[inp]:
lp = math.log(vocab[inp][rep])
for b in range(max(0, end - 5), end): # try to grow previous ngrams with new replacement
new_ngrams = {}
for ngram in index_keys[b]:
lp_prev = index_keys[b][ngram]
if len(ngram) + len(rep) <= 10 and b + ngram.count(" ") == begin:
if lp_prev + lp > min_log_prob:
new_ngrams[ngram + rep + " "] = lp_prev + lp
index_keys[b].update(new_ngrams) # join two dictionaries
# add current replacement as ngram
if lp > min_log_prob:
index_keys[begin][rep + " "] = lp
for b in range(len(index_keys)):
for ngram, lp in sorted(index_keys[b].items(), key=lambda item: item[1], reverse=True):
if ngram in ban_ngram_global: # here ngram ends with a space
continue
real_length = ngram.count(" ")
ngram = ngram.replace("+", " ").replace("=", " ")
ngram = " ".join(ngram.split()) # here ngram doesn't end with a space anymore
if ngram + " " in ban_ngram_global: # this can happen after deletion of + and =
continue
if ngram in ban_ngram_local:
continue
ngram_to_phrase_and_position[ngram].append((custom_phrase, b, real_length, lp))
if len(ngram_to_phrase_and_position[ngram]) > max_phrases_per_ngram:
ban_ngram_local.add(ngram)
del ngram_to_phrase_and_position[ngram]
continue
phrases = [] # id to phrase
phrase2id = {} # phrase to id
ngram2phrases = defaultdict(list) # ngram to list of tuples (phrase_id, begin, length, logprob)
for ngram in ngram_to_phrase_and_position:
for phrase, b, length, lp in ngram_to_phrase_and_position[ngram]:
if phrase not in phrase2id:
phrases.append(phrase)
phrase2id[phrase] = len(phrases) - 1
ngram2phrases[ngram].append((phrase2id[phrase], b, length, lp))
return phrases, ngram2phrases
def load_index(input_name: str) -> Tuple[List[str], Dict[str, List[Tuple[int, int, int, float]]]]:
""" Load index from file
Args:
input_name: file with index
Returns:
phrases: List of all phrases in custom vocabulary. Position corresponds to phrase_id.
ngram2phrases: dict where key=ngram, value=list of tuples (phrase_id, begin_pos, size, logprob)
"""
phrases = [] # id to phrase
phrase2id = {} # phrase to id
ngram2phrases = defaultdict(list) # ngram to list of tuples (phrase_id, begin_pos, size, logprob)
with open(input_name, "r", encoding="utf-8") as f:
for line in f:
ngram, phrase, b, size, lp = line.split("\t")
b = int(b)
size = int(size)
lp = float(lp)
if phrase not in phrase2id:
phrases.append(phrase)
phrase2id[phrase] = len(phrases) - 1
ngram2phrases[ngram].append((phrase2id[phrase], b, size, lp))
return phrases, ngram2phrases
def search_in_index(
ngram2phrases: Dict[str, List[Tuple[int, int, int, float]]], phrases: List[str], letters: Union[str, List[str]]
) -> Tuple[np.ndarray, List[Set[str]]]:
""" Function used to search in index
Args:
ngram2phrases: dict where key=ngram, value=list of tuples (phrase_id, begin_pos, size, logprob)
phrases: List of all phrases in custom vocabulary. Position corresponds to phrase_id.
letters: list of letters of ASR-hypothesis. Should not contain spaces - real spaces should be replaced with underscores.
Returns:
phrases2positions: a matrix of size (len(phrases), len(letters)).
It is filled with 1.0 (hits) on intersection of letter n-grams and phrases that are indexed by these n-grams, 0.0 - elsewhere.
It is used later to find phrases with many hits within a contiguous window - potential matching candidates.
position2ngrams: positions in ASR-hypothesis mapped to sets of ngrams starting from that position.
It is used later to check how well each found candidate is covered by n-grams (to avoid cases where some repeating n-gram gives many hits to a phrase, but the phrase itself is not well covered).
"""
if " " in letters:
raise ValueError("letters should not contain space: " + str(letters))
phrases2positions = np.zeros((len(phrases), len(letters)), dtype=float)
# positions mapped to sets of ngrams starting from that position
position2ngrams = [set() for _ in range(len(letters))]
begin = 0
for begin in range(len(letters)):
for end in range(begin + 1, min(len(letters) + 1, begin + 7)):
ngram = " ".join(letters[begin:end])
if ngram not in ngram2phrases:
continue
for phrase_id, b, size, lp in ngram2phrases[ngram]:
phrases2positions[phrase_id, begin:end] = 1.0
position2ngrams[begin].add(ngram)
return phrases2positions, position2ngrams
@jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def get_all_candidates_coverage(phrases, phrases2positions):
"""Get maximum hit coverage for each phrase - within a moving window of length of the phrase.
Args:
phrases: List of all phrases in custom vocabulary. Position corresponds to phrase_id.
phrases2positions: a matrix of size (len(phrases), len(ASR-hypothesis)).
It is filled with 1.0 (hits) on intersection of letter n-grams and phrases that are indexed by these n-grams, 0.0 - elsewhere.
Returns:
candidate2coverage: list of size len(phrases) containing coverage (0.0 to 1.0) in best window.
candidate2position: list of size len(phrases) containing starting position of best window.
"""
candidate2coverage = [0.0] * len(phrases)
candidate2position = [-1] * len(phrases)
for i in range(len(phrases)):
phrase_length = phrases[i].count(" ") + 1
all_coverage = np.sum(phrases2positions[i]) / phrase_length
# if total coverage on whole ASR-hypothesis is too small, there is no sense in using moving window
if all_coverage < 0.4:
continue
moving_sum = np.sum(phrases2positions[i, 0:phrase_length])
max_sum = moving_sum
best_pos = 0
for pos in range(1, phrases2positions.shape[1] - phrase_length + 1):
moving_sum -= phrases2positions[i, pos - 1]
moving_sum += phrases2positions[i, pos + phrase_length - 1]
if moving_sum > max_sum:
max_sum = moving_sum
best_pos = pos
coverage = max_sum / (phrase_length + 2) # smoothing
candidate2coverage[i] = coverage
candidate2position[i] = best_pos
return candidate2coverage, candidate2position
def get_candidates(
ngram2phrases: Dict[str, List[Tuple[int, int, int, float]]],
phrases: List[str],
letters: Union[str, List[str]],
pool_for_random_candidates: List[str],
min_phrase_coverage: float = 0.8,
) -> List[Tuple[str, int, int, float, float]]:
"""Given an index of custom vocabulary and an ASR-hypothesis retrieve 10 candidates.
Args:
ngram2phrases: dict where key=ngram, value=list of tuples (phrase_id, begin_pos, size, logprob)
phrases: List of all phrases in custom vocabulary. Position corresponds to phrase_id.
letters: list of letters of ASR-hypothesis. Should not contain spaces - real spaces should be replaced with underscores.
pool_for_random_candidates: large list of strings, from which to sample random candidates in case when there are less than 10 real candidates
min_phrase_coverage: We discard candidates which are not covered by n-grams to at least to this extent
(to avoid cases where some repeating n-gram gives many hits to a phrase, but the phrase itself is not well covered).
Returns:
candidates: list of tuples (candidate_text, approximate_begin_position, length, coverage of window in ASR-hypothesis, coverage of phrase itself).
"""
phrases2positions, position2ngrams = search_in_index(ngram2phrases, phrases, letters)
candidate2coverage, candidate2position = get_all_candidates_coverage(phrases, phrases2positions)
# mask for each custom phrase, how many which symbols are covered by input ngrams
phrases2coveredsymbols = [[0 for x in phrases[i].split(" ")] for i in range(len(phrases))]
candidates = []
k = 0
for idx, coverage in sorted(enumerate(candidate2coverage), key=lambda item: item[1], reverse=True):
begin = candidate2position[idx] # this is most likely beginning of this candidate
phrase_length = phrases[idx].count(" ") + 1
for pos in range(begin, begin + phrase_length):
# we do not know exact end of custom phrase in text, it can be different from phrase length
if pos >= len(position2ngrams):
break
for ngram in position2ngrams[pos]:
for phrase_id, b, size, lp in ngram2phrases[ngram]:
if phrase_id != idx:
continue
for ppos in range(b, b + size):
if ppos >= phrase_length:
break
phrases2coveredsymbols[phrase_id][ppos] = 1
k += 1
if k > 100:
break
real_coverage = sum(phrases2coveredsymbols[idx]) / len(phrases2coveredsymbols[idx])
if real_coverage < min_phrase_coverage:
continue
candidates.append((phrases[idx], begin, phrase_length, coverage, real_coverage))
# no need to process this sentence further if it does not contain any real candidates
if len(candidates) == 0:
print("WARNING: no real candidates", candidates)
return []
while len(candidates) < 10:
dummy = random.choice(pool_for_random_candidates)
dummy = " ".join(list(dummy.replace(" ", "_")))
candidates.append((dummy, -1, dummy.count(" ") + 1, 0.0, 0.0))
candidates = candidates[:10]
random.shuffle(candidates)
if len(candidates) != 10:
print("WARNING: cannot get 10 candidates", candidates)
return []
return candidates
def read_spellmapper_predictions(filename: str) -> List[Tuple[str, List[Tuple[int, int, str, float]], List[int]]]:
"""Read results of SpellMapper inference from file.
Args:
filename: file with SpellMapper results
Returns:
list of tuples (sent, list of fragment predictions, list of letter predictions)
One fragment prediction is a tuple (begin, end, replacement_text, prob)
"""
results = []
with open(filename, "r", encoding="utf-8") as f:
for line in f:
text, candidate_str, fragment_predictions_str, letter_predictions_str = line.strip().split("\t")
text = text.replace(" ", "").replace("_", " ")
candidate_str = candidate_str.replace(" ", "").replace("_", " ")
candidates = candidate_str.split(";")
letter_predictions = list(map(int, letter_predictions_str.split()))
if len(candidates) != 10:
raise IndexError("expect 10 candidates, got: ", len(candidates))
if len(text) != len(letter_predictions):
raise IndexError("len(text)=", len(text), "; len(letter_predictions)=", len(letter_predictions))
replacements = []
if fragment_predictions_str != "":
for prediction in fragment_predictions_str.split(";"):
begin, end, candidate_id, prob = prediction.split(" ")
begin = int(begin)
end = int(end)
candidate_id = int(candidate_id)
prob = float(prob)
replacements.append((begin, end, candidates[candidate_id - 1], prob))
replacements.sort() # it will sort by begin, then by end
results.append((text, replacements, letter_predictions))
return results
def substitute_replacements_in_text(
text: str, replacements: List[Tuple[int, int, str, float]], replace_hyphen_to_space: bool
) -> str:
"""Substitute replacements to the input text, iterating from end to beginning, so that indexing does not change.
Note that we expect intersecting replacements to be already filtered.
Args:
text: sentence;
replacements: list of replacements, each is a tuple (begin, end, text, probability);
replace_hyphen_to_space: if True, hyphens in replacements will be converted to spaces;
Returns:
corrected sentence
"""
replacements.sort()
last_begin = len(text) + 1
corrected_text = text
for begin, end, candidate, prob in reversed(replacements):
if end > last_begin:
print("WARNING: skip intersecting replacement [", candidate, "] in text: ", text)
continue
if replace_hyphen_to_space:
candidate = candidate.replace("-", " ")
corrected_text = corrected_text[:begin] + candidate + corrected_text[end:]
last_begin = begin
return corrected_text
def apply_replacements_to_text(
text: str,
replacements: List[Tuple[int, int, str, float]],
min_prob: float = 0.5,
replace_hyphen_to_space: bool = False,
dp_data: Tuple[defaultdict, defaultdict, defaultdict, int] = None,
min_dp_score_per_symbol: float = -99.9,
) -> str:
"""Filter and apply replacements to the input sentence.
Args:
text: input sentence;
replacements: list of proposed replacements (probably intersecting), each is a tuple (begin, end, text, probability);
min_prob: threshold on replacement probability;
replace_hyphen_to_space: if True, hyphens in replacements will be converted to spaces;
dp_data: n-gram mapping vocabularies used by dynamic programming, if None - dynamic programming is not used;
min_dp_score_per_symbol: threshold on dynamic programming sum score averaged by hypothesis length
Returns:
corrected sentence
"""
# sort replacements by positions
replacements.sort()
# filter replacements
# Note that we do not skip replacements with same text, otherwise intersecting candidates with lower probability can win
filtered_replacements = []
for j in range(len(replacements)):
replacement = replacements[j]
begin, end, candidate, prob = replacement
fragment = text[begin:end]
candidate_spaced = " ".join(list(candidate.replace(" ", "_")))
fragment_spaced = " ".join(list(fragment.replace(" ", "_")))
# apply penalty if candidate length is bigger than fragment length
# to avoid cases like "forward-looking" replacing "looking" in "forward looking" resulting in "forward forward looking"
if len(candidate) > len(fragment):
penalty = len(fragment) / len(candidate)
prob *= penalty
# skip replacement with low probability
if prob < min_prob:
continue
# skip replacements with some predefined templates, e.g. "*'s" => "*s"
if check_banned_replacements(fragment, candidate):
continue
if dp_data is not None:
path = get_alignment_by_dp(candidate_spaced, fragment_spaced, dp_data)
# path[-1][3] is the sum of logprobs for best path of dynamic programming: divide sum_score by length
if path[-1][3] / (len(fragment)) < min_dp_score_per_symbol:
continue
# skip replacement if it intersects with previous replacement and has lower probability, otherwise remove previous replacement
if len(filtered_replacements) > 0 and filtered_replacements[-1][1] > begin:
if filtered_replacements[-1][3] > prob:
continue
else:
filtered_replacements.pop()
filtered_replacements.append((begin, end, candidate, prob))
return substitute_replacements_in_text(text, filtered_replacements, replace_hyphen_to_space)
def update_manifest_with_spellmapper_corrections(
input_manifest_name: str,
short2full_name: str,
output_manifest_name: str,
spellmapper_results_name: str,
min_prob: float = 0.5,
replace_hyphen_to_space: bool = True,
field_name: str = "pred_text",
use_dp: bool = True,
ngram_mappings: Union[str, None] = None,
min_dp_score_per_symbol: float = -1.5,
) -> None:
"""Post-process SpellMapper predictions and write corrected sentence to the specified field of nemo manifest.
The previous content of this field will be copied to "*_before_correction" field.
If the sentence was split into fragments before running SpellMapper, all replacements will be first gathered together and then applied to the original long sentence.
Args:
input_manifest_name: input nemo manifest;
short2full_name: text file with two columns: short_sent \t full_sent;
output_manifest_name: output nemo manifest;
spellmapper_results_name: text file with SpellMapper inference results;
min_prob: threshold on replacement probability;
replace_hyphen_to_space: if True, hyphens in replacements will be converted to spaces;
field_name: name of json field whose text we want to correct;
use_dp: bool = If True, additional replacement filtering will be applied using dynamic programming (works slow);
ngram_mappings: file with n-gram mappings, only needed if use_dp=True
min_dp_score_per_symbol: threshold on dynamic programming sum score averaged by hypothesis length
"""
short2full_sent = defaultdict(list)
sent2corrections = defaultdict(dict)
with open(short2full_name, "r", encoding="utf-8") as f:
for line in f:
s = line.strip()
short_sent, full_sent = s.split("\t")
short2full_sent[short_sent].append(full_sent)
sent2corrections[full_sent] = []
spellmapper_results = read_spellmapper_predictions(spellmapper_results_name)
dp_data = None
if use_dp:
dp_data = load_ngram_mappings_for_dp(ngram_mappings)
for text, replacements, _ in spellmapper_results:
short_sent = text
if short_sent not in short2full_sent:
continue
# it can happen that one short sentence occurred in multiple full sentences
for full_sent in short2full_sent[short_sent]:
offset = full_sent.find(short_sent)
for begin, end, candidate, prob in replacements:
sent2corrections[full_sent].append((begin + offset, end + offset, candidate, prob))
out = open(output_manifest_name, "w", encoding="utf-8")
with open(input_manifest_name, "r", encoding="utf-8") as f:
for line in f:
record = json.loads(line.strip())
sent = record[field_name]
record[field_name + "_before_correction"] = record[field_name]
if sent in sent2corrections:
record[field_name] = apply_replacements_to_text(
sent,
sent2corrections[sent],
min_prob=min_prob,
replace_hyphen_to_space=replace_hyphen_to_space,
dp_data=dp_data,
min_dp_score_per_symbol=min_dp_score_per_symbol,
)
out.write(json.dumps(record) + "\n")
out.close()
def extract_and_split_text_from_manifest(
input_name: str, output_name: str, field_name: str = "pred_text", len_in_words: int = 16, step_in_words: int = 8
) -> None:
"""Extract text of the specified field in nemo manifest and split it into fragments (possibly with intersection).
The result is saved to a text file with two columns: short_sent \t full_sent.
This is useful if we want to process shorter sentences and then apply the results to the original long sentence.
Args:
input_name: input nemo manifest,
output_name: output text file,
field_name: name of json field from which we extract the sentence text,
len_in_words: maximum number of words in a fragment,
step_in_words: on how many words we move at each step.
For example, if the len_in_words=16 and step_in_words=8 the fragments will be intersected by half.
"""
short2full_sent = set()
with open(input_name, "r", encoding="utf-8") as f:
for line in f:
record = json.loads(line.strip())
sent = record[field_name]
if " " in sent:
raise ValueError("found multiple space in: " + sent)
words = sent.split()
for i in range(0, len(words), step_in_words):
short_sent = " ".join(words[i : i + len_in_words])
short2full_sent.add((short_sent, sent))
with open(output_name, "w", encoding="utf-8") as out:
for short_sent, full_sent in short2full_sent:
out.write(short_sent + "\t" + full_sent + "\n")
def check_banned_replacements(src: str, dst: str) -> bool:
"""This function is used to check is a pair of words/phrases is matching some common template that we don't want to replace with one another.
Args:
src: first phrase
dst: second phrase
Returns True if this replacement should be banned.
"""
# customers' => customer's
if src.endswith("s'") and dst.endswith("'s") and src[0:-2] == dst[0:-2]:
return True
# customer's => customers'
if src.endswith("'s") and dst.endswith("s'") and src[0:-2] == dst[0:-2]:
return True
# customers => customer's
if src.endswith("s") and dst.endswith("'s") and src[0:-1] == dst[0:-2]:
return True
# customer's => customers
if src.endswith("'s") and dst.endswith("s") and src[0:-2] == dst[0:-1]:
return True
# customers => customers'
if src.endswith("s") and dst.endswith("s'") and src[0:-1] == dst[0:-2]:
return True
# customers' => customers
if src.endswith("s'") and dst.endswith("s") and src[0:-2] == dst[0:-1]:
return True
# utilities => utility's
if src.endswith("ies") and dst.endswith("y's") and src[0:-3] == dst[0:-3]:
return True
# utility's => utilities
if src.endswith("y's") and dst.endswith("ies") and src[0:-3] == dst[0:-3]:
return True
# utilities => utility
if src.endswith("ies") and dst.endswith("y") and src[0:-3] == dst[0:-1]:
return True
# utility => utilities
if src.endswith("y") and dst.endswith("ies") and src[0:-1] == dst[0:-3]:
return True
# group is => group's
if src.endswith(" is") and dst.endswith("'s") and src[0:-3] == dst[0:-2]:
return True
# group's => group is
if src.endswith("'s") and dst.endswith(" is") and src[0:-2] == dst[0:-3]:
return True
# trex's => trex
if src.endswith("'s") and src[0:-2] == dst:
return True
# trex => trex's
if dst.endswith("'s") and dst[0:-2] == src:
return True
# increases => increase (but trimass => trimas is ok)
if src.endswith("s") and (not src.endswith("ss")) and src[0:-1] == dst:
return True
# increase => increases ((but trimas => trimass is ok))
if dst.endswith("s") and (not dst.endswith("ss")) and dst[0:-1] == src:
return True
# anticipate => anticipated
if src.endswith("e") and dst.endswith("ed") and src[0:-1] == dst[0:-2]:
return True
# anticipated => anticipate
if src.endswith("ed") and dst.endswith("e") and src[0:-2] == dst[0:-1]:
return True
# blocks => blocked
if src.endswith("s") and dst.endswith("ed") and src[0:-1] == dst[0:-2]:
return True
# blocked => blocks
if src.endswith("ed") and dst.endswith("s") and src[0:-2] == dst[0:-1]:
return True
# lives => lived
if src.endswith("es") and dst.endswith("ed") and src[0:-2] == dst[0:-2]:
return True
# lived => lives
if src.endswith("ed") and dst.endswith("es") and src[0:-2] == dst[0:-2]:
return True
# regarded => regard
if src.endswith("ed") and src[0:-2] == dst:
return True
# regard => regarded
if dst.endswith("ed") and dst[0:-2] == src:
return True
# regardeding => regard
if src.endswith("ing") and src[0:-3] == dst:
return True
# regard => regarding
if dst.endswith("ing") and dst[0:-3] == src:
return True
# longer => long
if src.endswith("er") and src[0:-2] == dst:
return True
# long => longer
if dst.endswith("er") and dst[0:-2] == src:
return True
# discussed => discussing
if src.endswith("ed") and dst.endswith("ing") and src[0:-2] == dst[0:-3]:
return True
# discussing => discussed
if src.endswith("ing") and dst.endswith("ed") and src[0:-3] == dst[0:-2]:
return True
# live => living
if src.endswith("e") and dst.endswith("ing") and src[0:-1] == dst[0:-3]:
return True
# living => live
if src.endswith("ing") and dst.endswith("e") and src[0:-3] == dst[0:-1]:
return True
# discussion => discussing
if src.endswith("ion") and dst.endswith("ing") and src[0:-3] == dst[0:-3]:
return True
# discussing => discussion
if src.endswith("ing") and dst.endswith("ion") and src[0:-3] == dst[0:-3]:
return True
# alignment => aligning
if src.endswith("ment") and dst.endswith("ing") and src[0:-4] == dst[0:-3]:
return True
# aligning => alignment
if src.endswith("ing") and dst.endswith("ment") and src[0:-3] == dst[0:-4]:
return True
# dispensers => dispensing
if src.endswith("ers") and dst.endswith("ing") and src[0:-3] == dst[0:-3]:
return True
# dispensing => dispensers
if src.endswith("ing") and dst.endswith("ers") and src[0:-3] == dst[0:-3]:
return True
# integrate => integrity
if src.endswith("ate") and dst.endswith("ity") and src[0:-3] == dst[0:-3]:
return True
# integrity => integrate
if src.endswith("ity") and dst.endswith("ate") and src[0:-3] == dst[0:-3]:
return True
# discussion => discussed
if src.endswith("ion") and dst.endswith("ed") and src[0:-3] == dst[0:-2]:
return True
# discussed => discussion
if src.endswith("ed") and dst.endswith("ion") and src[0:-2] == dst[0:-3]:
return True
# anticipation => anticipate
if src.endswith("ion") and dst.endswith("e") and src[0:-3] == dst[0:-1]:
return True
# anticipate => anticipation
if src.endswith("e") and dst.endswith("ion") and src[0:-1] == dst[0:-3]:
return True
# incremental => increment
if src.endswith("ntal") and dst.endswith("nt") and src[0:-4] == dst[0:-2]:
return True
# increment => incremental
if src.endswith("nt") and dst.endswith("ntal") and src[0:-2] == dst[0:-4]:
return True
# national => nation
if src.endswith("nal") and dst.endswith("n") and src[0:-3] == dst[0:-1]:
return True
# nation => national
if src.endswith("n") and dst.endswith("nal") and src[0:-1] == dst[0:-3]:
return True
# significantly => significant
if src.endswith("ntly") and dst.endswith("nt") and src[0:-4] == dst[0:-2]:
return True
# significant => significantly
if src.endswith("nt") and dst.endswith("ntly") and src[0:-2] == dst[0:-4]:
return True
# delivery => deliverer
if src.endswith("ery") and dst.endswith("erer") and src[0:-3] == dst[0:-4]:
return True
# deliverer => delivery
if src.endswith("erer") and dst.endswith("ery") and src[0:-4] == dst[0:-3]:
return True
# deliver => deliverer
if src.endswith("er") and dst.endswith("erer") and src[0:-2] == dst[0:-4]:
return True
# deliverer => deliver
if src.endswith("erer") and dst.endswith("er") and src[0:-4] == dst[0:-2]:
return True
# comparably => comparable
if src.endswith("bly") and dst.endswith("ble") and src[0:-3] == dst[0:-3]:
return True
# comparable => comparably
if src.endswith("ble") and dst.endswith("bly") and src[0:-3] == dst[0:-3]:
return True
# comparably => comparability
if src.endswith("bly") and dst.endswith("bility") and src[0:-3] == dst[0:-6]:
return True
# comparability => comparably
if src.endswith("bility") and dst.endswith("bly") and src[0:-6] == dst[0:-3]:
return True
# beautiful => beautifully
if src.endswith("l") and dst.endswith("lly") and src[0:-1] == dst[0:-3]:
return True
# beautifully => beautiful
if src.endswith("lly") and dst.endswith("l") and src[0:-3] == dst[0:-1]:
return True
# active => actively
if src.endswith("e") and dst.endswith("ely") and src[0:-1] == dst[0:-3]:
return True
# actively => active
if src.endswith("ely") and dst.endswith("e") and src[0:-3] == dst[0:-1]:
return True
# america => american
if src.endswith("a") and dst.endswith("an") and src[0:-1] == dst[0:-2]:
return True
# american => america
if src.endswith("an") and dst.endswith("a") and src[0:-2] == dst[0:-1]:
return True
# reinvesting => investing
if src.startswith("re") and src[2:] == dst:
return True
# investing => reinvesting
if dst.startswith("re") and dst[2:] == src:
return True
# unchanged => changed
if src.startswith("un") and src[2:] == dst:
return True
# changed => unchanged
if dst.startswith("un") and dst[2:] == src:
return True
# disrespected => respected
if src.startswith("dis") and src[3:] == dst:
return True
# respected => disrespected
if dst.startswith("dis") and dst[3:] == src:
return True
# outperformance => performance
if src.startswith("out") and src[3:] == dst:
return True
# performance => outperformance
if dst.startswith("out") and dst[3:] == src:
return True
return False
| NeMo-main | nemo/collections/nlp/data/spellchecking_asr_customization/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import os
import pickle
import random
import re
import string
from collections import Counter
import numpy as np
import torch
from tqdm.auto import tqdm
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = [
"DataProcessor",
"get_label_stats",
"get_multi_label_stats",
"partition_data",
"write_files",
"write_data",
"create_dataset",
"read_csv",
"get_dataset",
"partition",
"map_entities",
"get_entities",
"get_data",
"reverse_dict",
"get_intent_labels",
"get_stats",
"DATABASE_EXISTS_TMP",
"MODE_EXISTS_TMP",
"is_whitespace",
"write_vocab",
"if_exist",
"remove_punctuation_from_sentence",
"dataset_to_ids",
"get_freq_weights",
"get_freq_weights_bce_with_logits_loss",
"fill_class_weights",
"normalize_answer",
"get_labels_to_labels_id_mapping",
"get_vocab",
"find_newlines",
"load_data_indices",
"chinese_punctuation",
"check_chinese_char",
"normalize_chinese_answer",
]
DATABASE_EXISTS_TMP = "{} dataset has already been processed and stored at {}"
MODE_EXISTS_TMP = "{} mode of {} dataset has already been processed and stored at {}"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# if sys.version_info[0] == 2:
# line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
chinese_punctuation = {
"——",
"‘",
"’",
"“",
"”",
"…",
"、",
"。",
"〈",
"〉",
"《",
"》",
"「",
"」",
"『",
"』",
"【",
"】",
"〔",
"〕",
"!",
"(",
")",
",",
".",
":",
";",
"?",
}
def check_chinese_char(ch):
"""Check if a character is in Chinese."""
if "\u4e00" <= ch <= "\u9fff" or ch in chinese_punctuation:
return True
else:
return False
def normalize_chinese_answer(text):
"""Remove the Chinese punctuation and separate Chinese answers to char-level"""
def remove_punc(text):
exclude = chinese_punctuation
return "".join(ch for ch in text if ch not in exclude)
def separate_char(text):
ch_list = []
for ch in text:
ch_list.append(ch)
return ch_list
return separate_char(remove_punc(text))
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_label_stats(labels, outfile="stats.tsv", verbose=True):
"""
Args:
labels: list of all labels
outfile: path to the file where to save label stats
Returns:
total (int): total number of labels
label_frequencies (list of tuples): each tuple represent (label, label frequency)
max id of the labels
"""
labels = Counter(labels)
total = sum(labels.values())
out = open(outfile, "w")
i = 0
freq_dict = {}
label_frequencies = labels.most_common()
for k, v in label_frequencies:
out.write(f"{k}\t\t{round(v/total,5)}\t\t{v}\n")
if verbose and i < 3:
logging.info(f"label: {k}, {v} out of {total} ({(v / total)*100.0:.2f}%).")
i += 1
freq_dict[k] = v
return total, freq_dict, max(labels.keys())
def get_multi_label_stats(labels, outfile="stats.tsv", verbose=True):
"""
Args:
labels: list of tuples containing labels for each utterance
Example: If there are 5 intents in total, then (0,1,1,1,0) represents the labels
for an individual utterance. (0,1,1,1,0) indicates that the utterance has labels
at index/line 1,2, and 3 in dict.intents. The list of tuples contain labels for
all utterances.
outfile: path to the file where to save label stats
Returns:
total (int): total number of labels
freq_dict (list of tuples): each tuple represents class counts in the form of (negative, positive)
"""
total = len(labels)
positive_class_total = 0
class_count_dict = {}
# Get the count of each label in the label dictionary, both the positive and negative classes
for label in labels:
for label_index, val in enumerate(label):
if label_index not in class_count_dict:
class_count_dict[label_index] = [0, 0]
if val == 1:
positive_class_total += 1
class_count_dict[label_index][1] += 1
else:
class_count_dict[label_index][0] += 1
if verbose:
three_most_frequent_classes = sorted(class_count_dict, key=lambda idx: class_count_dict[idx][1], reverse=True)
for cnt, idx in enumerate(three_most_frequent_classes):
if cnt > 2:
break
positives = class_count_dict[idx][1]
logging.info(
f"label: {idx}, {positives} out of {positive_class_total} ({(positives / positive_class_total)*100.0:.2f}%)."
)
return total, class_count_dict, len(labels[0]) - 1
def partition_data(intent_queries, slot_tags, split=0.1):
n = len(intent_queries)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev_intents, dev_slots, train_intents, train_slots = [], [], [], []
dev_intents.append("sentence\tlabel\n")
train_intents.append("sentence\tlabel\n")
for i, item in enumerate(intent_queries):
if i in dev_idx:
dev_intents.append(item)
dev_slots.append(slot_tags[i])
else:
train_intents.append(item)
train_slots.append(slot_tags[i])
return train_intents, train_slots, dev_intents, dev_slots
def write_files(data, outfile):
with open(outfile, "w") as f:
for item in data:
item = f"{item.strip()}\n"
f.write(item)
def write_data(data, slot_dict, intent_dict, outfold, mode, uncased):
intent_file = open(f"{outfold}/{mode}.tsv", "w")
intent_file.write("sentence\tlabel\n")
slot_file = open(f"{outfold}/{mode}_slots.tsv", "w")
for tokens, slots, intent in data:
text = " ".join(tokens)
if uncased:
text = text.lower()
intent_file.write(f"{text}\t{intent_dict[intent]}\n")
slots = [str(slot_dict[slot]) for slot in slots]
slot_file.write(" ".join(slots) + "\n")
intent_file.close()
slot_file.close()
def create_dataset(train, dev, slots, intents, uncased, outfold):
os.makedirs(outfold, exist_ok=True)
if "O" in slots:
slots.remove("O")
slots = sorted(list(slots)) + ["O"]
intents = sorted(list(intents))
slots = write_vocab(slots, f"{outfold}/dict.slots.csv")
intents = write_vocab(intents, f"{outfold}/dict.intents.csv")
write_data(train, slots, intents, outfold, "train", uncased)
write_data(dev, slots, intents, outfold, "test", uncased)
def read_csv(file_path):
rows = []
with open(file_path, "r") as csvfile:
read_csv = csv.reader(csvfile, delimiter=",")
for row in read_csv:
rows.append(row)
return rows
def get_dataset(files, dev_split=0.1):
# entity2value, value2entity = get_entities(files)
data, slots, intents = get_data(files)
if len(data) == 1:
train, dev = partition(data[0], split=dev_split)
else:
train, dev = data[0], data[1]
return train, dev, slots, intents
def partition(data, split=0.1):
n = len(data)
n_dev = int(n * split)
dev_idx = set(random.sample(range(n), n_dev))
dev, train = [], []
for i, item in enumerate(data):
if i in dev_idx:
dev.append(item)
else:
train.append(item)
return train, dev
def map_entities(entity2value, entities):
for key in entities:
if "data" in entities[key]:
if key not in entity2value:
entity2value[key] = set([])
values = []
for value in entities[key]["data"]:
values.append(value["value"])
values.extend(value["synonyms"])
entity2value[key] = entity2value[key] | set(values)
return entity2value
def get_entities(files):
entity2value = {}
for file in files:
with open(file, "r") as json_file:
data = json.load(json_file)
entity2value = map_entities(entity2value, data["entities"])
value2entity = reverse_dict(entity2value)
return entity2value, value2entity
def get_data(files):
all_data, all_slots, all_intents = [], set(["O"]), set()
for file in files:
file_data = []
with open(file, "r") as json_file:
data = json.load(json_file)
for intent in data["intents"]:
all_intents.add(intent)
utterances = data["intents"][intent]["utterances"]
for utterance in utterances:
tokens, slots = [], []
for frag in utterance["data"]:
frag_tokens = frag["text"].strip().split()
tokens.extend(frag_tokens)
if "slot_name" not in frag:
slot = "O"
else:
slot = frag["slot_name"]
all_slots.add(slot)
slots.extend([slot] * len(frag_tokens))
file_data.append((tokens, slots, intent))
all_data.append(file_data)
return all_data, all_slots, all_intents
def reverse_dict(entity2value):
value2entity = {}
for entity in entity2value:
for value in entity2value[entity]:
value2entity[value] = entity
return value2entity
def get_intent_labels(intent_file):
labels = {}
label = 0
with open(intent_file, "r") as f:
for line in f:
intent = line.strip()
labels[intent] = label
label += 1
return labels
def get_stats(lengths):
logging.info("Some stats of the lengths of the sequences:")
lengths = np.asarray(lengths)
logging.info(
f"Min: {np.min(lengths)} | \
Max: {np.max(lengths)} | \
Mean: {np.mean(lengths)} | \
Median: {np.median(lengths)}"
)
logging.info(f"75 percentile: {np.percentile(lengths, 75):.2f}")
logging.info(f"99 percentile: {np.percentile(lengths, 99):.2f}")
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def write_vocab(items, outfile):
vocab = {}
idx = 0
with open(outfile, "w") as f:
for item in items:
f.write(item + "\n")
vocab[item] = idx
idx += 1
return vocab
def get_labels_to_labels_id_mapping(file):
"""
Reads labels from the file and returns labels to id mapping dictionary
Args:
file: path to file
Returns:
labels to id mapping dictionary
"""
lines = open(file, "r").readlines()
lines = [line.strip() for line in lines if line.strip()]
label_ids = {lines[i]: i for i in range(len(lines))}
return label_ids
def if_exist(outfold, files):
if not os.path.exists(outfold):
return False
for file in files:
if not os.path.exists(f"{outfold}/{file}"):
return False
return True
def remove_punctuation_from_sentence(sentence):
sentence = re.sub("[" + string.punctuation + "]", "", sentence)
sentence = sentence.lower()
return sentence
def dataset_to_ids(
dataset,
tokenizer,
cache_ids=False,
add_bos_eos=True,
cache_data_per_node=False,
use_cache=False,
remove_trailing_newline=False,
):
"""
Reads dataset from file line by line, tokenizes each line with tokenizer,
and returns list of lists which corresponds to ids of tokenized strings.
Args:
dataset (str): path to dataset
tokenizer: tokenizer to convert text into ids
cache_ids (bool): if True, ids are saved to disk as pickle file
with similar name (e.g., data.txt --> data.txt.pkl)
add_bos_eos (bool): whether to add <s> and </s> symbols (e.g., for NMT)
cache_data_per_node (bool): Cache data on local_rank 0. Use when there is not a shared-filesystem.
use_cache (bool): Use cached ids if they exist.
remove_trailing_newline (bool): Remove trailing newline character.
Returns:
ids: list of ids which correspond to tokenized strings of the dataset
"""
cached_ids_dataset = dataset + str(".pkl")
if use_cache and os.path.isfile(cached_ids_dataset):
logging.info("Loading cached tokenized dataset ...")
ids = pickle.load(open(cached_ids_dataset, "rb"))
else:
logging.info(f"Tokenizing dataset {dataset}...")
data = open(dataset, "rb").readlines()
ids = []
for sentence in tqdm(data, desc="Tokenizing sentence"):
text = sentence.decode("utf-8")
if remove_trailing_newline:
text = text.rstrip("\n")
sent_ids = tokenizer.text_to_ids(text)
if add_bos_eos:
sent_ids = [tokenizer.bos_id] + sent_ids + [tokenizer.eos_id]
ids.append(sent_ids)
if cache_ids and (
not torch.distributed.is_initialized() or (cache_data_per_node and get_envint("LOCAL_RANK", 0) == 0)
):
logging.info("Caching tokenized dataset ...")
pickle.dump(ids, open(cached_ids_dataset, "wb"))
return ids
def get_freq_weights(label_freq):
"""
Goal is to give more weight to the classes with less samples
so as to match the ones with the higher frequencies. We achieve this by
dividing the total frequency by the freq of each label to calculate its weight.
"""
total_size = 0
for lf in label_freq.values():
total_size += lf
weighted_slots = {label: (total_size / (len(label_freq) * freq)) for label, freq in label_freq.items()}
return weighted_slots
def get_freq_weights_bce_with_logits_loss(label_freq):
"""
Calculate positive class weights to be passed to BCEWithLogitsLoss
https://pytorch.org/docs/1.9.1/generated/torch.nn.BCEWithLogitsLoss.html
Args:
label_freq: dictionary of tuples where keys represents class id, and tuple represents counts of positive and negative classes,
positive classes are at index 1 and negative at index 0
Returns:
weights: dictionary of labels with their weights
"""
weights = {}
for label_id, class_values in label_freq.items():
positive_class = class_values[1]
negative_class = class_values[0]
if positive_class == 0:
weights[label_id] = 0
else:
weights[label_id] = float(negative_class) / float(positive_class)
return weights
def fill_class_weights(weights, max_id=-1):
"""
Gets a dictionary of labels with their weights and creates a list with size of the labels filled with those weights.
Missing labels in the dictionary would get value 1.
Args:
weights: dictionary of weights for labels, labels as keys and weights are their values
max_id: the largest label id in the dataset, default=-1 would consider the largest label in the weights dictionary as max_id
Returns:
weights_list: list of weights for labels
"""
if max_id < 0:
max_id = 0
for l in weights.keys():
max_id = max(max_id, l)
all_weights = [1.0] * (max_id + 1)
for i in range(len(all_weights)):
if i in weights:
all_weights[i] = weights[i]
return all_weights
def get_vocab(file):
lines = open(file, "r").readlines()
lines = [line.strip() for line in lines if line.strip()]
labels = {i: lines[i] for i in range(len(lines))}
return labels
def find_newlines(contents):
"""
Finds all of the newline positions in a text file.
"""
start = 0
while True:
try:
# index and split are much faster than Python for loops
new_start = contents.index(b"\n", start)
line = (
contents[start:new_start]
.replace(b"\xc2\x99", b" ")
.replace(b"\xc2\xa0", b" ")
.decode("utf-8", errors="ignore")
)
if len(line.split()) > 0:
yield start
start = new_start + 1
except ValueError:
break
def load_data_indices(idx_file: str, data_file: str, savename: str):
"""
Loads dataset index file if it exsits
"""
data_dir = data_file[: data_file.rfind("/")]
mode = data_file[data_file.rfind("/") + 1 : data_file.rfind(".")]
idx_file = f"{data_dir}/{mode}_{savename}.pkl"
if os.path.isfile(idx_file):
# If the sentence indices file already exists, load from it
with open(idx_file, "rb") as f:
indices = pickle.load(f)
return indices, idx_file, data_dir
return None, idx_file, data_dir
| NeMo-main | nemo/collections/nlp/data/data_utils/data_preprocessing.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.data_utils.data_preprocessing import *
| NeMo-main | nemo/collections/nlp/data/data_utils/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import (
TarredTranslationDataset,
TranslationDataset,
)
| NeMo-main | nemo/collections/nlp/data/machine_translation/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytorch Dataset for training Neural Machine Translation."""
import io
import json
import pickle
from collections import OrderedDict
from dataclasses import dataclass
from typing import Any, List, Optional
import braceexpand
import numpy as np
import webdataset as wd
from torch.utils.data import IterableDataset
from nemo.collections.nlp.data.data_utils.data_preprocessing import dataset_to_ids
from nemo.core import Dataset
from nemo.utils import logging
__all__ = ['TranslationDataset', 'TarredTranslationDataset']
@dataclass
class TranslationDataConfig:
src_file_name: Optional[Any] = None # Any = str or List[str]
tgt_file_name: Optional[Any] = None # Any = str or List[str]
use_tarred_dataset: bool = False
tar_files: Optional[Any] = None # Any = str or List[str]
metadata_file: Optional[Any] = None # Any = str or List[str]
lines_per_dataset_fragment: Optional[int] = 1000000
num_batches_per_tarfile: Optional[int] = 1000
shard_strategy: Optional[str] = 'scatter'
tokens_in_batch: int = 512
clean: bool = False
max_seq_length: int = 512
min_seq_length: int = 1
cache_ids: bool = False
cache_data_per_node: bool = False
use_cache: bool = False
shuffle: bool = False
num_samples: int = -1
drop_last: bool = False
pin_memory: bool = False
num_workers: int = 8
reverse_lang_direction: bool = False
load_from_tarred_dataset: bool = False
metadata_path: Optional[str] = None
tar_shuffle_n: int = 100
n_preproc_jobs: int = -2
tar_file_prefix: str = 'parallel'
concat_sampling_technique: Optional[str] = 'temperature'
concat_sampling_temperature: Optional[int] = 5
concat_sampling_probabilities: Optional[List[float]] = None
class TranslationDataset(Dataset):
def __init__(
self,
dataset_src: str,
dataset_tgt: str,
tokens_in_batch: int = 1024,
clean: bool = False,
max_seq_length: int = 512,
min_seq_length: int = 1,
max_seq_length_diff: int = 512,
max_seq_length_ratio: int = 512,
cache_ids: bool = False,
cache_data_per_node: bool = False,
use_cache: bool = False,
reverse_lang_direction: bool = False,
prepend_id: int = None,
add_bos_eos_to_encoder: bool = True,
):
self.dataset_src = dataset_src
self.dataset_tgt = dataset_tgt
self.tokens_in_batch = tokens_in_batch
self.cache_ids = cache_ids
self.use_cache = use_cache
self.clean = clean
self.cache_data_per_node = cache_data_per_node
self.max_seq_length = max_seq_length
self.min_seq_length = min_seq_length
self.max_seq_length_diff = max_seq_length_diff
self.max_seq_length_ratio = max_seq_length_ratio
self.reverse_lang_direction = reverse_lang_direction
self.prepend_id = prepend_id
self.add_bos_eos_to_encoder = add_bos_eos_to_encoder
# deprecation warnings for cache_ids, use_cache, and cache_data_per_node
if self.cache_ids is True or self.use_cache is True or self.cache_data_per_node is True:
logging.warning(
'Deprecation warning. self.cache_ids, self.use_cache, and self.cache_data_per_node will be removed. Data caching to be done with tarred datasets moving forward.'
)
def batchify(self, tokenizer_src, tokenizer_tgt):
src_ids = dataset_to_ids(
self.dataset_src,
tokenizer_src,
cache_ids=self.cache_ids,
cache_data_per_node=self.cache_data_per_node,
use_cache=self.use_cache,
add_bos_eos=self.add_bos_eos_to_encoder,
remove_trailing_newline=True,
)
tgt_ids = dataset_to_ids(
self.dataset_tgt,
tokenizer_tgt,
cache_ids=self.cache_ids,
cache_data_per_node=self.cache_data_per_node,
use_cache=self.use_cache,
remove_trailing_newline=True,
)
if self.clean:
src_ids, tgt_ids = self.clean_src_and_target(
src_ids,
tgt_ids,
max_tokens=self.max_seq_length,
min_tokens=self.min_seq_length,
max_tokens_diff=self.max_seq_length_diff,
max_tokens_ratio=self.max_seq_length_ratio,
)
self.src_pad_id = tokenizer_src.pad_id
self.tgt_pad_id = tokenizer_tgt.pad_id
self.batch_indices = self.pack_data_into_batches(src_ids, tgt_ids)
self.batches = self.pad_batches(src_ids, tgt_ids, self.batch_indices)
def __len__(self):
return len(self.batches)
def __getitem__(self, idx):
src_ids = self.batches[idx]["src"]
tgt = self.batches[idx]["tgt"]
if self.reverse_lang_direction:
src_ids, tgt = tgt, src_ids
labels = tgt[:, 1:]
tgt_ids = tgt[:, :-1]
if self.prepend_id:
src_ids = np.insert(src_ids, 0, self.prepend_id, axis=-1)
src_mask = (src_ids != self.src_pad_id).astype(np.int32)
tgt_mask = (tgt_ids != self.tgt_pad_id).astype(np.int32)
return src_ids, src_mask, tgt_ids, tgt_mask, labels
def pad_batches(self, src_ids, tgt_ids, batch_indices):
"""
Augments source and target ids in the batches with padding symbol
to make the lengths of all sentences in the batches equal.
"""
batches = {}
for batch_idx, b in enumerate(batch_indices):
src_len = max([len(src_ids[i]) for i in b])
tgt_len = max([len(tgt_ids[i]) for i in b])
src_ids_ = self.src_pad_id * np.ones((len(b), src_len), dtype=np.int)
tgt_ids_ = self.tgt_pad_id * np.ones((len(b), tgt_len), dtype=np.int)
for i, sentence_idx in enumerate(b):
src_ids_[i][: len(src_ids[sentence_idx])] = src_ids[sentence_idx]
tgt_ids_[i][: len(tgt_ids[sentence_idx])] = tgt_ids[sentence_idx]
batches[batch_idx] = {"src": src_ids_, "tgt": tgt_ids_}
return batches
def pack_data_into_batches(self, src_ids, tgt_ids):
"""
Takes two lists of source and target sentences, sorts them, and packs
into batches to minimize the use of padding tokens. Returns a list of
batches where each batch contains indices of sentences included into it
"""
# create buckets sorted by the number of src tokens
# each bucket is also sorted by the number of tgt tokens
buckets = {}
for i, src_id in enumerate(src_ids):
src_len, tgt_len = len(src_id), len(tgt_ids[i])
if src_len not in buckets:
buckets[src_len] = [(tgt_len, i)]
else:
buckets[src_len].append((tgt_len, i))
for b_idx in buckets:
buckets[b_idx] = sorted(buckets[b_idx])
buckets = OrderedDict(sorted(buckets.items()))
indices = list(buckets.keys())
batches = [[]]
num_batches = 0
batch_size = 0
i = 0
src_len = 0
tgt_len = 0
while i < len(buckets):
while buckets[indices[i]]:
i_src = max(src_len, indices[i])
i_tgt = max(tgt_len, buckets[indices[i]][0][0])
try:
ip1_src = max(src_len, indices[i + 1])
ip1_tgt = max(tgt_len, buckets[indices[i + 1]][0][0])
except IndexError:
ip1_src = i_src + 1
ip1_tgt = i_tgt + 1
if i_src + i_tgt <= ip1_src + ip1_tgt:
src_len = i_src
tgt_len = i_tgt
_, idx = buckets[indices[i]].pop(0)
else:
src_len = ip1_src
tgt_len = ip1_tgt
_, idx = buckets[indices[i + 1]].pop(0)
batches[num_batches].append(idx)
batch_size += 1
if batch_size * (src_len + tgt_len) > self.tokens_in_batch:
num_examples_to_split = len(batches[num_batches])
batches_to_evict = 8 * ((num_examples_to_split - 1) // 8)
if batches_to_evict == 0:
batches_to_evict = num_examples_to_split
batches.append(batches[num_batches][batches_to_evict:])
batches[num_batches] = batches[num_batches][:batches_to_evict]
batch_size = num_examples_to_split - batches_to_evict
num_batches += 1
if batch_size > 0:
src_len = max([len(src_ids[j]) for j in batches[num_batches]])
tgt_len = max([len(tgt_ids[j]) for j in batches[num_batches]])
else:
src_len = 0
tgt_len = 0
break
if not buckets[indices[i]]:
i = i + 1
if not batches[-1]:
batches.pop(-1)
return batches
def clean_src_and_target(
self,
src_ids,
tgt_ids,
max_tokens=None,
min_tokens=None,
max_tokens_diff=None,
max_tokens_ratio=None,
filter_equal_src_and_dest=False,
):
"""
Cleans source and target sentences to get rid of noisy data.
Specifically, a pair of sentences is removed if
-- either source or target is longer than *max_tokens*
-- either source or target is shorter than *min_tokens*
-- absolute difference between source and target is larger than
*max_tokens_diff*
-- one sentence is *max_tokens_ratio* times longer than the other
"""
if len(src_ids) != len(tgt_ids):
raise ValueError("Source and target corpora have different lengths!")
src_ids_, tgt_ids_ = [], []
for i in range(len(src_ids)):
src_len, tgt_len = len(src_ids[i]), len(tgt_ids[i])
if (
(max_tokens is not None and (src_len > max_tokens or tgt_len > max_tokens))
or (min_tokens is not None and (src_len < min_tokens or tgt_len < min_tokens))
or (filter_equal_src_and_dest and src_ids[i] == tgt_ids[i])
or (max_tokens_diff is not None and np.abs(src_len - tgt_len) > max_tokens_diff)
):
continue
if max_tokens_ratio is not None:
ratio = max(src_len - 2, 1) / max(tgt_len - 2, 1)
if ratio > max_tokens_ratio or ratio < (1 / max_tokens_ratio):
continue
src_ids_.append(src_ids[i])
tgt_ids_.append(tgt_ids[i])
return src_ids_, tgt_ids_
class TarredTranslationDataset(IterableDataset):
"""
A similar Dataset to the TranslationDataset, but which loads tarred tokenized pickle files.
Accepts a single JSON metadata file containing the total number of batches
as well as the path(s) to the tarball(s) containing the pickled parallel dataset batch files.
Valid formats for the text_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/text.tar' or 'path/to/text_{1..100}.tar', or
(2) a list of file paths that will not be brace-expanded, e.g. ['text_1.tar', 'text_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
Additionally, please note that the len() of this DataLayer is assumed to be the number of tokens
of the text data. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
text_tar_filepaths: Either a list of tokenized text tarball filepaths, or a
string (can be brace-expandable).
metadata_path (str): Path to the metadata manifest.
encoder_tokenizer: Autokenizer wrapped BPE tokenizer model, such as YTTM
decoder_tokenizer: Autokenizer wrapped BPE tokenizer model, such as YTTM
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 1.
reverse_lang_direction (bool): When True, swaps the source and target directions when returning minibatches.
prepend_id (int): Prepends the specificed token id to the start of every source sentence. Defaults to None.
"""
def __init__(
self,
text_tar_filepaths: str,
metadata_path: str,
encoder_tokenizer: str,
decoder_tokenizer: str,
shuffle_n: int = 1,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 1,
reverse_lang_direction: bool = False,
prepend_id: int = None,
):
super(TarredTranslationDataset, self).__init__()
self.encoder_tokenizer = encoder_tokenizer
self.decoder_tokenizer = decoder_tokenizer
self.reverse_lang_direction = reverse_lang_direction
self.src_pad_id = encoder_tokenizer.pad_id
self.tgt_pad_id = decoder_tokenizer.pad_id
self.prepend_id = prepend_id
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(
f"Invalid shard strategy of type {type(shard_strategy)} "
f"{repr(shard_strategy) if len(repr(shard_strategy)) < 100 else repr(shard_strategy)[:100] + '...'}! "
f"Allowed values are: {valid_shard_strategies}."
)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
self.metadata = metadata
if isinstance(text_tar_filepaths, str):
# Replace '(', '[', '<' and '_OP_' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "{")
# Replace ')', ']', '>' and '_CL_' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "}")
if isinstance(text_tar_filepaths, str):
# Brace expand
text_tar_filepaths = list(braceexpand.braceexpand(text_tar_filepaths))
if shard_strategy == 'scatter':
logging.info("Tarred dataset shards will be scattered evenly across all nodes.")
if len(text_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(text_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size}). "
f"Some shards will not be used ({len(text_tar_filepaths) % world_size})."
)
batches_per_tar = self.metadata['num_batches'] // len(text_tar_filepaths)
begin_idx = (len(text_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(text_tar_filepaths) // world_size)
logging.info('Begin Index : %d' % (begin_idx))
logging.info('End Index : %d' % (end_idx))
text_tar_filepaths = text_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
self.length = batches_per_tar * len(text_tar_filepaths) * world_size
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
self.length = self.metadata['num_batches']
else:
raise ValueError(f"Invalid shard strategy ! Allowed values are : {valid_shard_strategies}")
self.tarpath = text_tar_filepaths
# Put together WebDataset
self._dataset = wd.WebDataset(urls=text_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n, initial=shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.rename(pkl='pkl', key='__key__').to_tuple('pkl', 'key').map(f=self._build_sample)
def _build_sample(self, fname):
# Load file
pkl_file, _ = fname
pkl_file = io.BytesIO(pkl_file)
data = pickle.load(pkl_file) # loads np.int64 vector
pkl_file.close()
src_ids = data["src"]
tgt = data["tgt"]
if self.reverse_lang_direction:
src_ids, tgt = tgt, src_ids
labels = tgt[:, 1:]
tgt_ids = tgt[:, :-1]
if self.prepend_id:
src_ids = np.insert(src_ids, 0, self.prepend_id, axis=-1)
src_mask = (src_ids != self.src_pad_id).astype(np.int32)
tgt_mask = (tgt_ids != self.tgt_pad_id).astype(np.int32)
return src_ids, src_mask, tgt_ids, tgt_mask, labels
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return self.length
| NeMo-main | nemo/collections/nlp/data/machine_translation/machine_translation_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import os
import pickle
import tarfile
import tempfile
import youtokentome as yttm
from joblib import Parallel, delayed
from omegaconf import ListConfig, OmegaConf
from pytorch_lightning import Trainer
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer, create_spt_model
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset
from nemo.collections.nlp.data.machine_translation.machine_translation_dataset import TranslationDataset
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_config import MTEncDecModelConfig
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer, get_tokenizer
from nemo.utils import logging
class MTDataPreproc:
""" Automatically trains tokenizers and preprocesses machine translation data based on the MTEncDecModelConfig.
For training NMT models with datasets larger than 5M sentence pairs,
it can be inefficient to train them without first creating a tarred dataset.
If the user wants to change the tokenizer, vocab size, or batch size, for example,
they must reprocess the data with the correct configuration.
With MTDataPreproc users can sweep through data configurations and the tarred dataset will
be automatically created according to the model configuration.
To train tokenizer model and create tarred dataset specify in configuration:
model.preproc_out_dir=/path/to/preproc_out
model.encoder_tokenizer.vocab_size=32000
model.decoder_tokenizer.vocab_size=32000
model.train_ds.use_tarred_dataset=True
model.train_ds.src_file_name=/path/to/src.txt
model.train_ds.tgt_file_name=/path/to/tgt.txt
model.train_ds.tokens_in_batch=16000
Once a dataset has been constructed based on this configuration, MTDataPreproc will not process it again.
If a previously trained tokenizer model or tarred dataset is found, MTDataPreproc will not preprocess the data.
Note: the only tokenizer currently supported is YouTokenToMe.
"""
def __init__(self, cfg: MTEncDecModelConfig, trainer: Trainer = None) -> None:
self._cfg = cfg
self.global_rank = 0
self.world_size = 1
if trainer is not None:
self.global_rank = (trainer.node_rank * trainer.num_devices) + trainer.local_rank
self.world_size = trainer.num_nodes * trainer.num_devices
if hasattr(cfg, 'train_ds'):
supported_tokenizers = ['yttm', 'huggingface', 'sentencepiece', 'megatron', 'byte-level']
supported_multilingual_tokenizers = ['sentencepiece', 'byte-level']
supported_train_tokenizers = ['yttm', 'sentencepiece']
if (
cfg.encoder_tokenizer.get('library') not in supported_tokenizers
or cfg.decoder_tokenizer.get('library') not in supported_tokenizers
):
raise NotImplementedError(f"Currently we only support {supported_tokenizers}.")
if cfg.get('multilingual') and (
cfg.encoder_tokenizer.get('library') not in supported_multilingual_tokenizers
or cfg.decoder_tokenizer.get('library') not in supported_multilingual_tokenizers
):
raise NotImplementedError(
f"Currently we only support {supported_multilingual_tokenizers} for multilingual models."
)
if cfg.get('shared_tokenizer') and cfg.encoder_tokenizer.get('library') != cfg.decoder_tokenizer.get(
'library'
):
raise ValueError("Shared tokenizers cannot be from different libraries.")
# Prepare tokenizers
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
or cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
):
# Train tokenizer models if using yttm or sentencepiece and they don't exist
if (
cfg.encoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.encoder_tokenizer.get('tokenizer_model') is None
) or (
cfg.decoder_tokenizer.get('library') in supported_train_tokenizers
and cfg.decoder_tokenizer.get('tokenizer_model') is None
):
if cfg.get('preproc_out_dir') is None:
raise ValueError('Tokenizer model training required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to train tokenizers but could not be found.'
)
src_fname = cfg.train_ds.get('src_file_name')
tgt_fname = cfg.train_ds.get('tgt_file_name')
src_language = cfg.get('src_language')
tgt_language = cfg.get('tgt_language')
spt_symbols = None
tempdir = tempfile.TemporaryDirectory()
if cfg.get('multilingual'):
spt_symbols = []
if isinstance(src_fname, ListConfig):
fnames = (" ").join(src_fname)
src_fname = os.path.join(tempdir.name, 'src.txt')
os.system('cat %s > %s' % (fnames, src_fname))
if isinstance(tgt_fname, ListConfig):
fnames = (" ").join(tgt_fname)
tgt_fname = os.path.join(tempdir.name, 'tgt.txt')
os.system('cat %s > %s' % (fnames, tgt_fname))
if isinstance(src_language, ListConfig):
for lng in src_language:
spt_symbols.append("<" + lng + ">")
if isinstance(tgt_language, ListConfig):
for lng in tgt_language:
spt_symbols.append("<" + lng + ">")
# train tokenizer model on training data
self.encoder_tokenizer_model, self.decoder_tokenizer_model = MTDataPreproc.train_tokenizers(
out_dir=cfg.get('preproc_out_dir'),
src_fname=src_fname,
tgt_fname=tgt_fname,
shared_tokenizer=cfg.get('shared_tokenizer'),
encoder_tokenizer_vocab_size=cfg.encoder_tokenizer.get('vocab_size'),
decoder_tokenizer_vocab_size=cfg.decoder_tokenizer.get('vocab_size'),
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
encoder_tokenizer_coverage=cfg.encoder_tokenizer.get('coverage', 0.999),
decoder_tokenizer_coverage=cfg.decoder_tokenizer.get('coverage', 0.999),
global_rank=self.global_rank,
encoder_training_sample_size=cfg.encoder_tokenizer.get('training_sample_size', -1),
decoder_training_sample_size=cfg.decoder_tokenizer.get('training_sample_size', -1),
encoder_special_tokens=OmegaConf.to_container(cfg.encoder_tokenizer.special_tokens)
if cfg.encoder_tokenizer.special_tokens
else None,
decoder_special_tokens=OmegaConf.to_container(cfg.decoder_tokenizer.special_tokens)
if cfg.decoder_tokenizer.special_tokens
else None,
spt_symbols=spt_symbols,
)
# update config
self._cfg.encoder_tokenizer.tokenizer_model = self.encoder_tokenizer_model
self._cfg.decoder_tokenizer.tokenizer_model = self.decoder_tokenizer_model
tempdir.cleanup()
else:
self.encoder_tokenizer_model = cfg.encoder_tokenizer.get('tokenizer_model')
self.decoder_tokenizer_model = cfg.decoder_tokenizer.get('tokenizer_model')
self.encoder_tokenizer, self.decoder_tokenizer = self.get_enc_dec_tokenizers(
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=getattr(self, "encoder_tokenizer_model", None),
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_r2l=cfg.encoder_tokenizer.get('r2l', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=getattr(self, "decoder_tokenizer_model", None),
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_r2l=cfg.decoder_tokenizer.get('r2l', False),
encoder_tokenizer_legacy=cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_tokenizer_legacy=cfg.decoder_tokenizer.get('sentencepiece_legacy', False),
)
# If using tarred dataset for training, automatically create it if needed
if cfg.train_ds.get('use_tarred_dataset'):
if cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is None:
if cfg.get('preproc_out_dir') is None:
raise ValueError('Data preprocessing required but cfg.preproc_out_dir is None.')
if cfg.train_ds.get('src_file_name') is None or cfg.train_ds.get('tgt_file_name') is None:
raise ValueError(
'src_file_name and tgt_file_name needed to create tarred dataset but could not be found.'
)
# Preprocess data and cache for use during training
if self.global_rank == 0:
logging.info(
f"Creating tarred dataset for src: {cfg.train_ds.get('src_file_name')} and tgt: {cfg.train_ds.get('tgt_file_name')}"
)
if isinstance(cfg.train_ds.get('src_file_name'), str):
src_file_list = [cfg.train_ds.get('src_file_name')]
tgt_file_list = [cfg.train_ds.get('tgt_file_name')]
outdir_list = [cfg.get('preproc_out_dir')]
else:
src_file_list = cfg.train_ds.get('src_file_name')
tgt_file_list = cfg.train_ds.get('tgt_file_name')
if isinstance(cfg.get('src_language'), ListConfig):
langs = cfg.get('src_language')
elif isinstance(cfg.get('tgt_language'), ListConfig):
langs = cfg.get('tgt_language')
outdir_list = []
for lang in langs:
outdir_list.append(os.path.join(cfg.get('preproc_out_dir'), lang))
if len(src_file_list) != len(tgt_file_list) or len(src_file_list) != len(outdir_list):
raise ValueError(
"Number of source files, target files, and multilingual language pairs must be the same."
)
# TODO: have to get tokenizers instide .preprocess_parallel because they can't be pickled
metadata_file_list = []
for idx, src_file in enumerate(src_file_list):
self.train_tar_files, self.train_metadata_file = MTDataPreproc.preprocess_parallel_dataset(
clean=cfg.train_ds.clean,
src_fname=src_file,
tgt_fname=tgt_file_list[idx],
out_dir=outdir_list[idx],
encoder_tokenizer_name=cfg.encoder_tokenizer.get('library'),
encoder_model_name=cfg.encoder.get('model_name'),
encoder_tokenizer_model=getattr(self, "encoder_tokenizer_model", None),
encoder_bpe_dropout=cfg.encoder_tokenizer.get('bpe_dropout', 0.0),
encoder_tokenizer_r2l=cfg.encoder_tokenizer.get('r2l', False),
encoder_tokenizer_legacy=cfg.encoder_tokenizer.get('sentencepiece_legacy', False),
decoder_tokenizer_name=cfg.decoder_tokenizer.get('library'),
decoder_model_name=cfg.decoder.get('model_name'),
decoder_tokenizer_model=getattr(self, "decoder_tokenizer_model", None),
decoder_bpe_dropout=cfg.decoder_tokenizer.get('bpe_dropout', 0.0),
decoder_tokenizer_r2l=cfg.decoder_tokenizer.get('r2l', False),
decoder_tokenizer_legacy=cfg.decoder_tokenizer.get('sentencepiece_legacy', False),
max_seq_length=cfg.train_ds.get('max_seq_length', 512),
tokens_in_batch=cfg.train_ds.get('tokens_in_batch', 8192),
lines_per_dataset_fragment=cfg.train_ds.get('lines_per_dataset_fragment', 1000000),
num_batches_per_tarfile=cfg.train_ds.get('num_batches_per_tarfile', 1000),
min_seq_length=1,
global_rank=self.global_rank,
world_size=self.world_size,
n_jobs=cfg.train_ds.get('n_preproc_jobs', -2),
tar_file_prefix=cfg.train_ds.get('tar_file_prefix', 'parallel'),
)
metadata_file_list.append(self.train_metadata_file)
# update config
# self._cfg.train_ds.tar_files = self.tar_files_to_string(self.train_tar_files)
# self._cfg.train_ds.tar_files = self.train_tar_files
if isinstance(cfg.train_ds.get('metadata_file'), str):
self._cfg.train_ds.metadata_file = metadata_file_list[0]
else:
self._cfg.train_ds.metadata_file = metadata_file_list
logging.info(
f"Using tarred dataset created in folder(s) {outdir_list} and metadata created at {self._cfg.train_ds.metadata_file}"
)
elif cfg.train_ds.get('tar_files') is not None and cfg.train_ds.get('metadata_file') is None:
raise ValueError('A metadata file is required for tarred dataset but cfg.metadata_file is None.')
elif cfg.train_ds.get('tar_files') is None and cfg.train_ds.get('metadata_file') is not None:
if isinstance(cfg.train_ds.get('metadata_file'), str):
metadata_file_list = [cfg.train_ds.get('metadata_file')]
else:
metadata_file_list = cfg.train_ds.get('metadata_file')
for metadata_file in metadata_file_list:
with open(metadata_file) as metadata_reader:
metadata = json.load(metadata_reader)
if metadata['tar_files']:
logging.info(f"Using tarred dataset: {metadata['tar_files']}")
else:
raise ValueError(f'tar_files not provided and metadata does not have tar files')
else:
self.train_tar_files = cfg.train_ds.get('tar_files')
self.train_metadata_file = cfg.train_ds.get('metadata_file')
logging.info(
f"Using tarred dataset from config at {self.train_tar_files} and metadata from {self.train_metadata_file}"
)
def tar_files_to_string(self, tar_files):
""" Tar files are generated in the following format: basename.number.tar
Where number is an integer from 1 to the number of tar files.
We convert this list to a string that can be used in the model config to specify
tarred datasets: basename_OP_1..num_tar_files_CL_.tar
Args:
tar_files (List[str]): List of tar files generated by preprocess_parallel_dataset
"""
num_tar_files = len(tar_files)
split_on_dot = tar_files[0].split('.')
basename = '.'.join(split_on_dot[0:-2])
tar_file_string = f'{basename}._OP_1..{num_tar_files}_CL_.tar'
return tar_file_string
@staticmethod
def get_enc_dec_tokenizers(
encoder_tokenizer_name=None,
encoder_tokenizer_model=None,
encoder_bpe_dropout=0.0,
encoder_model_name=None,
encoder_r2l=False,
decoder_tokenizer_name=None,
decoder_tokenizer_model=None,
decoder_bpe_dropout=0.0,
decoder_model_name=None,
decoder_r2l=False,
encoder_tokenizer_legacy=False,
decoder_tokenizer_legacy=False,
):
# if encoder_tokenizer_name != 'yttm' or decoder_tokenizer_name != 'yttm':
# raise NotImplementedError(f"Currently we only support yttm tokenizer.")
if encoder_bpe_dropout is None:
encoder_bpe_dropout = 0.0
if decoder_bpe_dropout is None:
decoder_bpe_dropout = 0.0
encoder_tokenizer = get_nmt_tokenizer(
library=encoder_tokenizer_name,
model_name=encoder_model_name,
tokenizer_model=encoder_tokenizer_model,
bpe_dropout=encoder_bpe_dropout,
r2l=encoder_r2l,
legacy=encoder_tokenizer_legacy,
)
decoder_tokenizer = get_nmt_tokenizer(
library=decoder_tokenizer_name,
model_name=decoder_model_name,
tokenizer_model=decoder_tokenizer_model,
bpe_dropout=decoder_bpe_dropout,
r2l=decoder_r2l,
legacy=decoder_tokenizer_legacy,
)
return encoder_tokenizer, decoder_tokenizer
@staticmethod
def get_monolingual_tokenizer(
tokenizer_name=None, tokenizer_model=None, bpe_dropout=0.0,
):
if tokenizer_name == 'yttm':
if bpe_dropout is None:
bpe_dropout = 0.0
tokenizer = get_tokenizer(
tokenizer_name=tokenizer_name, tokenizer_model=tokenizer_model, bpe_dropout=bpe_dropout,
)
elif tokenizer_name == 'sentencepiece':
tokenizer = SentencePieceTokenizer(model_path=tokenizer_model)
else:
try:
tokenizer = get_tokenizer(tokenizer_name, special_tokens={"pad_token": "[PAD]"})
except Exception as e:
raise ValueError(f'{tokenizer_name} is not supported by either NeMo or HuggingFace. {e}')
return tokenizer
# TODO: add local or global rank 0 decorator
@staticmethod
def preprocess_parallel_dataset(
clean,
src_fname,
tgt_fname,
out_dir,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
global_rank,
world_size,
n_jobs=-2,
tar_file_prefix='parallel',
encoder_tokenizer_legacy=False,
decoder_tokenizer_legacy=False,
):
"""Create tarred dataset from large paired translation data.
Args:
clean (str): Cleans source and target sentences to get rid of noisy data.
src_fname (str): path to source text data
tgt_fname (str): path to target text data
out_dir (str): path to write tarred dataset
encoder_tokenizer (Any): tokenizer for encoder
decoder_tokenizer (Any): tokenizer for decoder
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
tar_file_prefix (str) : add string prefix to tar files
n_jobs (int): number of processes to use for data processing (-2 to use all but 2)
"""
os.makedirs(out_dir, exist_ok=True)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
if global_rank == 0:
tar_files_in_out_dir = glob.glob(f'{out_dir}/*.tar')
if tar_files_in_out_dir:
logging.info(
f'Tarred dataset detected: {tar_files_in_out_dir} and will be used. Remove if reprocessing.'
)
else:
filenames = [src_fname, tgt_fname]
# get number of lines so that we can create a partition of the lines of the text file
num_src_lines, num_tgt_lines = Parallel(n_jobs=2)(
delayed(MTDataPreproc._get_num_lines)(filename) for filename in filenames
)
logging.info(f'Found {num_src_lines} source lines and {num_tgt_lines} target lines.')
assert num_src_lines == num_tgt_lines, 'Number of source lines should equal number of target lines.'
# create a partition of lines that we can parallelize over
lines_partition = MTDataPreproc._get_lines_partition(num_src_lines, lines_per_dataset_fragment)
logging.info(f"Found {len(lines_partition)} fragments to parallelize over.")
# create tarfiles for each fragment in parallel
results_list = Parallel(n_jobs=n_jobs)(
delayed(MTDataPreproc._process_fragment)(
src_filename=src_fname,
tgt_filename=tgt_fname,
lines_indices=lines_indices,
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
tokens_in_batch=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
fragment_index=fragment_index,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
encoder_tokenizer_legacy=encoder_tokenizer_legacy,
decoder_tokenizer_legacy=decoder_tokenizer_legacy,
)
for fragment_index, lines_indices in enumerate(lines_partition)
)
# compute total batches so far
total_batches = sum([batch_count for batch_count, _ in results_list])
# save batches from tar files containing the left over batches (if there's enough batches)
remainder_tar_file_ctr = 0
remainder_tar_file_path = os.path.join(
out_dir, f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar'
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w')
batch_in_tar_ctr = 0
for _, tar_file_path in results_list:
tar_file_ptr = tarfile.open(tar_file_path, 'r')
for member in tar_file_ptr.getmembers():
remainder_tar_file_ptr.addfile(member, tar_file_ptr.extractfile(member.name))
batch_in_tar_ctr += 1
if batch_in_tar_ctr == num_batches_per_tarfile:
remainder_tar_file_ctr += 1
remainder_tar_file_ptr.close()
remainder_tar_file_path = os.path.join(
out_dir,
f'remainder-batches.tokens.{tokens_in_batch}.tar_file_{remainder_tar_file_ctr}.tar',
)
remainder_tar_file_ptr = tarfile.open(remainder_tar_file_path, 'w',)
batch_in_tar_ctr = 0
tar_file_ptr.close()
os.remove(tar_file_path)
# log the number of batches remaining as they will be discarded
num_batches_discarded = len(remainder_tar_file_ptr.getmembers())
total_batches -= num_batches_discarded
logging.info(
f'Number of batches discarded: {num_batches_discarded}, total batches kept: {total_batches}'
)
remainder_tar_file_ptr.close()
os.remove(remainder_tar_file_path)
# dump metadata to json
metadata = {}
metadata['num_batches'] = total_batches
# rename tar files so they can be more easily used with CLI and YAML
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
for index, path in enumerate(tar_file_paths):
os.rename(
path, os.path.join(out_dir, f'{tar_file_prefix}.batches.tokens.{tokens_in_batch}.{index}.tar')
)
# add tar files to manifest
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
metadata['tar_files'] = tar_file_paths
json.dump(metadata, open(metadata_path, 'w'))
tar_file_paths = glob.glob(f'{out_dir}/*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def _get_num_lines(filename):
with open(filename) as f:
for i, l in enumerate(f):
pass
return i + 1
@staticmethod
def _get_lines_partition(num_lines, lines_per_dataset_fragment):
# create partition based on fragment size
fragment_indices = []
for i in range(0, num_lines, lines_per_dataset_fragment):
fragment_indices.append([i, i + lines_per_dataset_fragment])
# modify last indices
last_indices = fragment_indices.pop()
last_indices[1] = -1
fragment_indices.append(last_indices)
# if fragment_indices[-1][1] >= num_lines:
# fragment_indices.pop()
return fragment_indices
@staticmethod
def _process_fragment(
src_filename,
tgt_filename,
lines_indices,
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tokens_in_batch,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_bpe_dropout,
encoder_model_name,
encoder_tokenizer_r2l,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
encoder_tokenizer_legacy,
decoder_tokenizer_legacy,
):
start = lines_indices[0]
stop = lines_indices[1]
# write lines in partition to temporary files to be consumed by write_parallel_batches_to_tarfiles
tmp_f_src = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f_tgt = tempfile.NamedTemporaryFile(delete=False, mode='w')
with open(src_filename, 'r') as src_in, open(tgt_filename) as tgt_in:
for line_number, (src_line, tgt_line) in enumerate(zip(src_in, tgt_in)):
if start <= line_number and line_number < stop:
if src_line and tgt_line:
tmp_f_src.write(src_line)
tmp_f_tgt.write(tgt_line)
tmp_f_src.close()
tmp_f_tgt.close()
num_batches_from_fragment, remainder_tar_file_path = MTDataPreproc.write_parallel_batches_to_tarfiles(
out_dir=out_dir,
num_batches_per_tarfile=num_batches_per_tarfile,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
src_fname=tmp_f_src.name,
tgt_fname=tmp_f_tgt.name,
num_tokens=tokens_in_batch,
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_tokenizer_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_tokenizer_r2l=decoder_tokenizer_r2l,
fragment_index=fragment_index,
encoder_tokenizer_legacy=encoder_tokenizer_legacy,
decoder_tokenizer_legacy=decoder_tokenizer_legacy,
)
os.remove(tmp_f_src.name)
os.remove(tmp_f_tgt.name)
return num_batches_from_fragment, remainder_tar_file_path
@staticmethod
def preprocess_monolingual_dataset(
clean,
fname,
out_dir,
tokenizer,
max_seq_length,
min_seq_length,
tokens_in_batch,
lines_per_dataset_fragment,
num_batches_per_tarfile,
pkl_file_prefix,
global_rank,
world_size,
):
"""Create tarred dataset from a large monolingual corpus.
Args:
clean (str): Cleans sentences to get rid of very long or short sentences.
fname (str): Path to source text data
out_dir (str): Path to write tarred dataset
tokenizer (Any): Path to tokenizer model
max_seq_length (int): maximum sequence length
min_seq_length (int): minimum sequence length
tokens_in_batch (int): tokens per batch per GPU, effectively batch size
lines_per_dataset_fragment (int): number of lines to consider for bucketing and padding
num_batches_per_tarfile (int): number of batches (pickle files) within each tarfile
global_rank (int): if set to zero, data will be processed on this node
world_size (int): total number of processes being run (for training only, set to 1 when preproc only)
"""
os.makedirs(out_dir, exist_ok=True)
tar_file_ctr = 1
num_files_in_tar = 0
num_lines = 0
shard_num = 0
global_batch_ctr = 0
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, 1)), 'w'
)
metadata_path = os.path.join(out_dir, f'metadata.tokens.{tokens_in_batch}.json')
with open(fname, 'r') as f:
for line in f:
tmp_f.write(line)
num_lines += 1
if num_lines == lines_per_dataset_fragment:
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
num_lines = 0
shard_num += 1
os.remove(tmp_f.name)
tmp_f = tempfile.NamedTemporaryFile(delete=False, mode='w')
tmp_f.close()
(
tar_file_ptr,
global_batch_ctr,
num_files_in_tar,
tar_file_ctr,
) = MTDataPreproc.write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
tmp_f.name,
tokens_in_batch,
tokenizer,
num_files_in_tar=num_files_in_tar,
tar_file_ptr=tar_file_ptr,
tar_file_ctr=tar_file_ctr,
global_batch_ctr=global_batch_ctr,
pkl_file_prefix=pkl_file_prefix,
)
tar_file_ptr.close()
os.remove(tmp_f.name)
if num_files_in_tar != num_batches_per_tarfile:
os.remove(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, tokens_in_batch, tar_file_ctr))
)
global_batch_ctr -= num_files_in_tar
print('Dropping %d batches because of overflow' % (num_files_in_tar))
json.dump({'num_batches': global_batch_ctr}, open(os.path.join(out_dir, 'metadata.json'), 'w'))
tar_file_paths = glob.glob(f'{out_dir}/{pkl_file_prefix}-batches.tokens.{tokens_in_batch}.*.tar')
num_tar_files = len(tar_file_paths)
if num_tar_files < world_size:
raise ValueError(
(
f'Number of tar files found: {num_tar_files} is less than world size: {world_size}. '
f'There should be at least one tar file per GPU (ideally many tar files per GPU). '
f'This may be due to dataset size, it is advisable to use at least 5M sentence pairs for tarred datasets. '
f'Decrease num_batches_per_tarfile or num_tokens_per_batch to increase the number of tarfiles. '
f'Also using shard_strategy=replicate will use all available tarfiles for every GPU. '
)
)
return tar_file_paths, metadata_path
@staticmethod
def train_tokenizers(
out_dir,
src_fname,
tgt_fname,
shared_tokenizer,
encoder_tokenizer_name,
encoder_tokenizer_vocab_size,
encoder_tokenizer_coverage,
decoder_tokenizer_name,
decoder_tokenizer_vocab_size,
decoder_tokenizer_coverage,
global_rank,
encoder_training_sample_size=-1,
decoder_training_sample_size=-1,
encoder_special_tokens=None,
decoder_special_tokens=None,
spt_symbols=None,
byte_fallback=False,
split_digits=False,
split_by_whitespace=True,
):
"""Trains a tokenizer with requested parameters, returns None if the tokenizer is not trainable"""
encoder_tokenizer_model = None
decoder_tokenizer_model = None
os.makedirs(out_dir, exist_ok=True)
supported_train_tokenizers = ['yttm', 'sentencepiece']
if encoder_special_tokens:
if isinstance(encoder_special_tokens, dict):
encoder_special_tokens = list(encoder_special_tokens.values())
print(encoder_special_tokens)
if decoder_special_tokens:
if isinstance(decoder_special_tokens, dict):
decoder_special_tokens = list(decoder_special_tokens.values())
if shared_tokenizer:
if encoder_tokenizer_name in supported_train_tokenizers:
encoder_tokenizer_model = os.path.join(
out_dir, 'shared_tokenizer.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
decoder_tokenizer_model = encoder_tokenizer_model
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Shared tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
with tempfile.TemporaryDirectory() as tmp:
concat_data_path = os.path.join(tmp, 'concat_dataset.txt')
os.system('cat %s %s > %s' % (src_fname, tgt_fname, concat_data_path))
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
model=encoder_tokenizer_model,
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
create_spt_model(
data_file=concat_data_path,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=out_dir,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
byte_fallback=byte_fallback,
split_digits=split_digits,
split_by_whitespace=split_by_whitespace,
)
os.rename(
os.path.join(out_dir, 'tokenizer.model'), encoder_tokenizer_model,
)
else:
if encoder_tokenizer_name in supported_train_tokenizers:
encoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.encoder.%d.BPE.model' % (encoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(encoder_tokenizer_model):
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Encoder tokenizer model {encoder_tokenizer_model} not found. Training tokenizer model.'
)
if encoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
model=encoder_tokenizer_model,
coverage=encoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(encoder_tokenizer_model)
create_spt_model(
data_file=src_fname,
vocab_size=encoder_tokenizer_vocab_size,
sample_size=encoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=encoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=encoder_special_tokens,
byte_fallback=byte_fallback,
split_digits=split_digits,
split_by_whitespace=split_by_whitespace,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), encoder_tokenizer_model)
if decoder_tokenizer_name in supported_train_tokenizers:
decoder_tokenizer_model = os.path.join(
out_dir, 'tokenizer.decoder.%d.BPE.model' % (decoder_tokenizer_vocab_size)
)
if global_rank == 0:
if os.path.isfile(decoder_tokenizer_model):
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} already exists. Remove file if training a new tokenizer model.'
)
else:
logging.info(
f'Decoder tokenizer model {decoder_tokenizer_model} not found. Training tokenizer model.'
)
if decoder_tokenizer_name == "yttm":
yttm.BPE.train(
data=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
model=decoder_tokenizer_model,
coverage=decoder_tokenizer_coverage,
n_threads=-1,
)
else:
dir_name = os.path.dirname(decoder_tokenizer_model)
create_spt_model(
data_file=tgt_fname,
vocab_size=decoder_tokenizer_vocab_size,
sample_size=decoder_training_sample_size,
do_lower_case=False,
tokenizer_type='bpe',
character_coverage=decoder_tokenizer_coverage,
output_dir=dir_name,
bos=True,
eos=True,
pad=True,
control_symbols=spt_symbols,
user_defined_symbols=decoder_special_tokens,
byte_fallback=byte_fallback,
split_digits=split_digits,
split_by_whitespace=split_by_whitespace,
)
os.rename(os.path.join(dir_name, 'tokenizer.model'), decoder_tokenizer_model)
return encoder_tokenizer_model, decoder_tokenizer_model
@staticmethod
def write_parallel_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
src_fname,
tgt_fname,
num_tokens,
encoder_tokenizer_name,
encoder_tokenizer_model,
encoder_tokenizer_r2l,
encoder_bpe_dropout,
encoder_model_name,
decoder_tokenizer_name,
decoder_tokenizer_model,
decoder_bpe_dropout,
decoder_model_name,
decoder_tokenizer_r2l,
fragment_index,
encoder_tokenizer_legacy=False,
decoder_tokenizer_legacy=False,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a TranslationDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = TranslationDataset(
dataset_src=src_fname,
dataset_tgt=tgt_fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
max_seq_length_diff=max_seq_length,
max_seq_length_ratio=max_seq_length,
cache_ids=False,
cache_data_per_node=False,
use_cache=False,
)
encoder_tokenizer, decoder_tokenizer = MTDataPreproc.get_enc_dec_tokenizers(
encoder_tokenizer_name=encoder_tokenizer_name,
encoder_tokenizer_model=encoder_tokenizer_model,
encoder_bpe_dropout=encoder_bpe_dropout,
encoder_model_name=encoder_model_name,
encoder_r2l=encoder_tokenizer_r2l,
decoder_tokenizer_name=decoder_tokenizer_name,
decoder_tokenizer_model=decoder_tokenizer_model,
decoder_bpe_dropout=decoder_bpe_dropout,
decoder_model_name=decoder_model_name,
decoder_r2l=decoder_tokenizer_r2l,
encoder_tokenizer_legacy=encoder_tokenizer_legacy,
decoder_tokenizer_legacy=decoder_tokenizer_legacy,
)
# validate no token is negative for sentencepiece tokenizers and add missing special tokens.
for tok_name, tok_library, tok_model, legacy in [
("encoder_tokenizer", encoder_tokenizer_name, encoder_tokenizer, encoder_tokenizer_legacy),
("decoder_tokenizer", decoder_tokenizer_name, decoder_tokenizer, decoder_tokenizer_legacy),
]:
if tok_library == 'sentencepiece':
negative_tokens = []
for n in ["eos_id", "bos_id", "unk_id", "pad_id"]:
v = getattr(tok_model.tokenizer, n)()
if v < 0:
negative_tokens.append(f"{n}={v}")
if negative_tokens and not legacy:
raise ValueError(
f"{tok_name}=sentencepiece has invalid negative special tokens = {negative_tokens}"
)
# If using the legacy sentencepiece tokenizer, we can add the missing tokens as "special" tokens.
else:
# If using sentencepiece legacy, eos, bos and pad need to be set/added differently.
if legacy:
# bos, eos, pad and unk may be present in the provided spm .model file, if they are, use it.
if not hasattr(tok_model, 'pad_token'):
if hasattr(tok_model.tokenizer, 'pad_id') and tok_model.tokenizer.pad_id() > 0:
tok_model.pad_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.pad_id())
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
else:
tok_model.add_special_tokens({'pad_token': '<pad>'})
if not hasattr(tok_model, 'bos_token'):
if hasattr(tok_model.tokenizer, 'bos_id') and tok_model.tokenizer.bos_id() > 0:
tok_model.bos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.bos_id())
else:
tok_model.add_special_tokens({'bos_token': '<bos>'})
else:
tok_model.add_special_tokens({'bos_token': '<s>'})
if not hasattr(tok_model, 'eos_token'):
if hasattr(tok_model.tokenizer, 'eos_id') and tok_model.tokenizer.eos_id() > 0:
tok_model.eos_token = tok_model.tokenizer.id_to_piece(tok_model.tokenizer.eos_id())
else:
tok_model.add_special_tokens({'eos_token': '<eos>'})
else:
tok_model.add_special_tokens({'eos_token': '</s>'})
dataset.batchify(encoder_tokenizer, decoder_tokenizer)
tar_file_ctr = 0
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w')
total_batch_ctr = 0
batch_ctr = 0
for _, batch in dataset.batches.items():
total_batch_ctr += 1
batch_ctr += 1
pickle.dump(
batch,
open(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)), 'wb'),
)
tar_file_ptr.add(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
os.remove(os.path.join(out_dir, 'fragment-%s-batch-%d.pkl' % (fragment_index, total_batch_ctr)))
if batch_ctr == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_path = os.path.join(
out_dir, 'fragment-%s-batches.tokens.%d.%d.tar' % (fragment_index, num_tokens, tar_file_ctr)
)
tar_file_ptr = tarfile.open(tar_file_path, 'w',)
batch_ctr = 0
# return tar files paths that have batches remaining
remainder_tar_file_path = tar_file_ptr.name
tar_file_ptr.close()
return total_batch_ctr, remainder_tar_file_path
@staticmethod
def write_monolingual_batches_to_tarfiles(
out_dir,
num_batches_per_tarfile,
clean,
max_seq_length,
min_seq_length,
fname,
num_tokens,
tokenizer,
num_files_in_tar,
tar_file_ptr,
tar_file_ctr,
global_batch_ctr,
pkl_file_prefix,
):
"""
Writes current fragment of the overall parallel corpus to tarfiles by:
(1) Creating a minibatches using a SentenceDataset object.
(2) Writing each minibatch to a pickle file.
(3) Adding pickle files to a tarfile until it reaches num_batches_per_tarfile.
"""
dataset = SentenceDataset(
tokenizer=tokenizer,
dataset=fname,
tokens_in_batch=num_tokens,
clean=clean,
max_seq_length=max_seq_length,
min_seq_length=min_seq_length,
cache_ids=False,
)
for batch in dataset.batches:
global_batch_ctr += 1
batch = {'src': batch}
pickle.dump(
batch, open(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)), 'wb')
)
if num_files_in_tar == num_batches_per_tarfile:
tar_file_ctr += 1
tar_file_ptr.close()
tar_file_ptr = tarfile.open(
os.path.join(out_dir, '%s-batches.tokens.%d.%d.tar' % (pkl_file_prefix, num_tokens, tar_file_ctr)),
'w',
)
num_files_in_tar = 0
tar_file_ptr.add(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
num_files_in_tar += 1
os.remove(os.path.join(out_dir, '%s-batch-%d.pkl' % (pkl_file_prefix, global_batch_ctr)))
return tar_file_ptr, global_batch_ctr, num_files_in_tar, tar_file_ctr
@property
def cfg(self):
return self._cfg
| NeMo-main | nemo/collections/nlp/data/machine_translation/preproc_mt_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.information_retrieval.information_retrieval_dataset import (
BertInformationRetrievalDataset,
)
| NeMo-main | nemo/collections/nlp/data/information_retrieval/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing as mp
import os
import pickle
import random
from typing import Optional
import numpy as np
from torch.utils.data import Dataset
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
__all__ = ["BertInformationRetrievalDataset"]
class BaseInformationRetrievalDataset(Dataset):
"""
Base information retrieval dataset on which other datasets are built.
Args:
tokenizer: tokenizer
max_query_length: maximum length of query in tokens
max_passage_length: maximum length of passage in tokens
"""
def __init__(
self, tokenizer: TokenizerSpec, max_query_length: Optional[int] = 31, max_passage_length: Optional[int] = 190,
):
self.tokenizer = tokenizer
self.max_query_length = max_query_length
self.max_passage_length = max_passage_length
def parse_npz(self, file, max_seq_length):
"""
Function which parses passages (documents) in npz format.
After pre-processing and tokenization, the dataset will be saved
as numpy matrix, i_th entry of which corresponds to i_th passage (document)
and has the following form:
[n, token_1, ..., token_n, 0, ..., 0]
where n is the passage length (in tokens) and 0s correspond to pad tokens.
Args:
file: str, path to file with passages (documents)
max_seq_length: maximum length of sequence in tokens
"""
cached_collection = file + ".npz"
if os.path.isfile(cached_collection):
dataset_npz = np.load(cached_collection)["data"]
else:
dataset_dict = self.tokenize_dataset(file, max_seq_length)
dataset_npz = np.zeros((len(dataset_dict), max_seq_length + 1))
for key in dataset_dict:
dataset_npz[key][0] = len(dataset_dict[key])
dataset_npz[key][1 : len(dataset_dict[key]) + 1] = dataset_dict[key]
np.savez(cached_collection, data=dataset_npz)
return dataset_npz
def parse_pkl(self, file, max_seq_length):
"""
Function which parses passages (documents, queries) in pkl format.
After pre-processing and tokenization, the dataset will be saved
as pkl dict, i_th entry of which corresponds to i_th passage (document, query)
and has the following form:
{passage_id: [token_1, ..., token_n]}
where n is the passage length (in tokens).
Args:
file: str, path to file with passages (documents)
max_seq_length: maximum length of sequence in tokens
"""
cached_collection = file + ".pkl"
if os.path.isfile(cached_collection):
dataset_dict = pickle.load(open(cached_collection, "rb"))
else:
dataset_dict = self.tokenize_dataset(file, max_seq_length)
pickle.dump(dataset_dict, open(cached_collection, "wb"))
return dataset_dict
def tokenize_dataset(self, file, max_seq_length):
"""
Function which pre-tokenizes the dataset.
"""
lines = open(file, "r").readlines()
with mp.Pool() as pool:
dataset_dict = pool.map(self.preprocess_line, lines)
dataset_dict = {id_: tokens[:max_seq_length] for (id_, tokens) in dataset_dict}
return dataset_dict
def preprocess_line(self, line):
"""
Parse a single entry (line) of tsv file.
"""
if "\t" not in line:
raise ValueError(f"Provided dataset does not have a form of tsv file")
id_, text = line.split("\t")
token_ids = self.tokenizer.text_to_ids(text.strip())
return int(id_), token_ids
def construct_input(self, token_ids1, max_seq_length, token_ids2=None):
"""
Function which constructs a valid input to BERT from tokens.
If only one list of tokens (token_ids1) is passed, the input will be
[CLS] token_ids1 [SEP]
if two lists of tokens are passed, the input will be
[CLS] token_ids1 [SEP] token_ids2 [SEP]
"""
input_ids = [self.tokenizer.pad_id] * max_seq_length
bert_input = [self.tokenizer.cls_id] + token_ids1 + [self.tokenizer.sep_id]
sentence1_length = len(bert_input)
if token_ids2 is not None:
bert_input = bert_input + token_ids2 + [self.tokenizer.sep_id]
bert_input = bert_input[:max_seq_length]
num_nonpad_tokens = len(bert_input)
input_ids[:num_nonpad_tokens] = bert_input
input_ids = np.array(input_ids, dtype=np.long)
input_mask = input_ids != self.tokenizer.pad_id
input_type_ids = np.ones_like(input_ids)
input_type_ids[:sentence1_length] = 0
return input_ids, input_mask, input_type_ids
def preprocess_bert(self, query_id, psg_ids):
"""
Transforms query id (Q) and a list of passages ids (P1, ..., Pk)
into a tensor of size [k, max_length] with the following rows:
[CLS] Q_text [SEP] Pi_text [SEP], i = 1, ..., k
"""
max_seq_length = self.max_query_length + self.max_passage_length + 3
input_ids, input_mask, input_type_ids = [], [], []
for psg_id in psg_ids:
inputs = self.construct_input(self.queries[query_id], max_seq_length, self._psgid2tokens(psg_id))
input_ids.append(inputs[0])
input_mask.append(inputs[1])
input_type_ids.append(inputs[2])
input_ids = np.stack(input_ids)
input_mask = np.stack(input_mask)
input_type_ids = np.stack(input_type_ids)
return input_ids, input_mask, input_type_ids
def preprocess_dpr(self, query_id, psg_ids):
"""
Transforms query id (Q) and a list of passages ids (P1, ..., Pk)
into two tensors of sizes [1, max_q_length] and [k, max_p_length]
with the following rows:
1) [CLS] Q_text [SEP]
2) [CLS] Pi_text [SEP], i = 1, ..., k
"""
q_input_ids, q_input_mask, q_type_ids = self.construct_input(self.queries[query_id], self.max_query_length + 2)
input_ids, input_mask, input_type_ids = [], [], []
for psg_id in psg_ids:
inputs = self.construct_input(self._psgid2tokens(psg_id), self.max_passage_length + 2)
input_ids.append(inputs[0])
input_mask.append(inputs[1])
input_type_ids.append(inputs[2])
input_ids = np.stack(input_ids)
input_mask = np.stack(input_mask)
input_type_ids = np.stack(input_type_ids)
return (
q_input_ids[None, ...],
q_input_mask[None, ...],
q_type_ids[None, ...],
input_ids,
input_mask,
input_type_ids,
)
def _psgid2tokens(self, psg_id):
"""
Internal function which maps passage id to its tokens.
"""
pass
def psgid2tokens_npz(self, psg_id):
"""
Mapping from passage id to its tokens in case of npz cache format.
"""
seq_len = self.passages[psg_id][0]
return self.passages[psg_id][1 : seq_len + 1].tolist()
def psgid2tokens_pkl(self, psg_id):
"""
Mapping from passage id to its tokens in case of pkl cache format.
"""
return self.passages[psg_id]
class BertInformationRetrievalDataset(BaseInformationRetrievalDataset):
def __init__(
self,
tokenizer: TokenizerSpec,
passages: str,
queries: str,
query_to_passages: str,
max_query_length: Optional[int] = 31,
max_passage_length: Optional[int] = 190,
num_negatives: Optional[int] = 10,
preprocess_fn: Optional[str] = "preprocess_bert",
psg_cache_format: Optional[str] = "npz",
):
"""
Dataset for training information retrieval models.
Args:
tokenizer: tokenizer
passages: path to tsv with [psg_id, psg_text] entries
queries: path to tsv with [query_id, query_text] entries
query_to_passages: path to tsv with
[query_id, pos_psg_id, neg_psg_id_1, ..., neg_psg_id_k] entries
max_query_length: maximum length of query in tokens
max_passage_length: maximum length of passage in tokens
num_negatives: number of negative passages per positive to use for training
preprocess_fn: either preprocess_bert or preprocess_dpr
preprocess_bert: joint input: [CLS] query [SEP] passage [SEP]
preprocess_dpr: separate inputs: [CLS] query [SEP], [CLS] passage [SEP]
psg_cache_format: either pkl or npz
"""
super().__init__(tokenizer, max_query_length, max_passage_length)
self.num_negatives = num_negatives
self.passages = getattr(self, f"parse_{psg_cache_format}")(passages, max_passage_length)
self._psgid2tokens = getattr(self, f"psgid2tokens_{psg_cache_format}")
self.queries = self.parse_pkl(queries, max_query_length)
self.idx2psgs = self.parse_query_to_passages(query_to_passages)
self._preprocess_fn = getattr(self, preprocess_fn)
def __getitem__(self, idx):
query_and_psgs = self.idx2psgs[idx]
query_id, psg_ids = query_and_psgs[0], query_and_psgs[1:]
inputs = self._preprocess_fn(query_id, psg_ids)
return [*inputs, query_id, np.array(psg_ids)]
def __len__(self):
return len(self.idx2psgs)
def parse_query_to_passages(self, file):
"""
Function which parses query to passages correspondence file.
"""
idx2psgs = {}
idx = 0
for line in open(file, "r").readlines():
if "\t" not in line:
raise ValueError(f"Provided dataset does not have a form of tsv file")
query_and_psgs = line.split("\t")
query_and_psgs_ids = [int(id_) for id_ in query_and_psgs]
query_and_rel_psg_ids, irrel_psgs_ids = query_and_psgs_ids[:2], query_and_psgs_ids[2:]
random.shuffle(irrel_psgs_ids)
num_samples = len(irrel_psgs_ids) // self.num_negatives
for j in range(num_samples):
left = self.num_negatives * j
right = self.num_negatives * (j + 1)
idx2psgs[idx] = query_and_rel_psg_ids + irrel_psgs_ids[left:right]
idx += 1
return idx2psgs
| NeMo-main | nemo/collections/nlp/data/information_retrieval/information_retrieval_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.data_processor.sgd_data_processor import DialogueSGDDataProcessor
from nemo.collections.nlp.data.dialogue.dataset import (
DialogueBERTDataset,
DialogueGPTClassificationDataset,
DialogueSGDBERTDataset,
DialogueZeroShotIntentDataset,
)
from nemo.collections.nlp.data.dialogue.sgd.schema import Schema
| NeMo-main | nemo/collections/nlp/data/dialogue/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
__all__ = ['DialogueNearestNeighbourDataset']
class DialogueNearestNeighbourDataset(DialogueDataset):
"""
Dataset for training a Nearest Neighbour model for zero shot intent recognition.
"""
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer to split text into sub-word tokens
"""
self.cfg = cfg
self.tokenizer = tokenizer
self.raw_features = dialogues_processor.get_dialog_examples(dataset_split)
self.max_n = self.find_max_n_candidates()
self.examples = self._create_examples(self.raw_features)
def find_max_n_candidates(self):
max_n = 0
for idx in range(len(self.raw_features)):
ex = self.raw_features[idx].data
n = len(ex["possible_labels"]["intent"])
max_n = max(max_n, n)
return max_n
def _create_examples(self, raw_features):
"""Creates examples for the training and dev sets."""
examples = []
seen_utterances = set()
for idx in range(len(raw_features)):
ex = self.raw_features[idx].data
user_utterance = ex["utterance"]
if user_utterance in seen_utterances:
continue
seen_utterances.add(user_utterance)
intent = ex["labels"]["intent"]
sentences = [user_utterance]
labels = [-1]
for candidate_intent in ex["possible_labels"]["intent"]:
text_b = "{} {}".format(self.cfg.prompt_template, candidate_intent)
label = 1 if candidate_intent == intent else 0
labels.append(label)
sentences.append(text_b)
while self.max_n > len(labels) - 1:
labels.append(label)
sentences.append(text_b)
encoded_input = self.tokenizer.tokenizer(
sentences,
padding='max_length',
truncation=True,
return_tensors='pt',
max_length=self.cfg.max_seq_length,
)
examples.append((encoded_input['input_ids'], encoded_input['attention_mask'], torch.tensor(labels)))
return examples
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
return self.examples[idx]
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_nearest_neighbour_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import random
from collections import defaultdict
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
from nemo.utils import logging
class DialogueGPTClassificationDataset(DialogueDataset):
'''
Designed for classification tasks such as intent/domain classification as well as slot tagging
Dataset Class
1. Performs Model-dependent (but Data-independent) operations (tokenization etc)
2. This can allow the same model preprocessing for multiple datasources
3. Users can configurate which labels to use for modelling
(e.g. intent classification, slot filling or both together etc)
'''
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
""" Constructor
Args:
dataset_split: dataset split
dialogues_processor: Data generator for SGD dialogues
tokenizer: tokenizer
cfg: cfg container for dataset
"""
self.cfg = cfg
if self.cfg.target_template == "with_slots" and self.cfg.eval_mode != "generation":
raise ValueError(
"slot-filling is not supported by eval_mode {}, please set model.dataset.eval_mode=generation instead".format(
self.cfg.eval_mode
)
)
if self.cfg.target_template != "with_slots" and self.cfg.field == "slots":
raise ValueError("please set model.dataset.target_template='with_slots' if model.dataset.field='slots'")
self.label_type = self.cfg.field
if self.cfg.target_template == "with_description":
self.label_to_description = defaultdict(str)
self.all_possible_labels = set()
self.tokenizer = tokenizer
self.tokenizer.tokenizer.padding_side = "right"
self.max_candidates = 2
if not isinstance(dataset_split, str):
dataset_split = dataset_split[0]
self.features = dialogues_processor.get_dialog_examples(dataset_split)
for idx in range(len(self.features)):
self.preprocess_feature(idx)
if self.cfg.debug_mode:
self.features = self.features[:16]
# for few shot learning to append in the prompt
self.lm_features = self.get_lm_samples()
def transform(self, label):
"""
Normalize labels by replacing underscore with space
Args:
label: str
Returns:
normalized_label: str
"""
if self.cfg.task == "assistant" and self.cfg.prompt_template != "prompt_tuning":
label = label.replace('_', ' ')
return label
def __len__(self):
return len(self.features)
def get_n_tokens_in_sentence(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding=False, return_tensors="pt"
)
output = torch.squeeze(encodings_dict['input_ids'])
return len(output) if len(output.size()) > 0 else 0
def preprocess_feature(self, idx):
ex = self.features[idx].data
label = ex["labels"][self.label_type]
candidates = ex["possible_labels"][self.label_type]
if self.label_type in ["service", "intent"]:
label = self.transform(label)
candidates = [self.transform(candidate) for candidate in candidates]
self.features[idx].data["labels"][self.label_type] = label
self.features[idx].data["possible_labels"][self.label_type] = candidates
if self.cfg.target_template == "with_description":
description = ex["description"][self.label_type]
self.label_to_description[label] = description
for candidate in candidates:
self.all_possible_labels.add(candidate)
self.max_candidates = max(self.max_candidates, len(candidates))
def default_encode(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding="max_length", return_tensors="pt"
)
input_ids = torch.squeeze(encodings_dict['input_ids'])
attn_masks = torch.squeeze(encodings_dict['attention_mask'])
return encodings_dict, input_ids, attn_masks
@staticmethod
def linearize_slots(slots):
"""
Serialize slots into a linear text
Args:
slots: dict with each slot_name as key and possible slot values as value
Returns:
linear_slots: text based representation of slot names and values
"""
if not slots:
return "None"
return ", ".join(
["{}({})".format(slot, value if isinstance(value, str) else value[0]) for slot, value in slots.items()]
)
def format_target(self, target, slots=None):
"""
Formats the back part of the training example, after the base_template
for instance, "restaurant" in "<utterance> service: restaurant"
or "set alarm\nslots: <slot_name1>(<slot_value1>), <slot_name1>(<slot_value1>)" in \
"<utterance>\nintent: set alarm\nslots: <slot_name1>(<slot_value1>), <slot_name1>(<slot_value1>)"
"""
if self.cfg.target_template == "with_description":
return target + ' (' + self.label_to_description[target] + ')'
elif self.cfg.target_template == "default":
return target
elif self.cfg.target_template == "with_slots" and slots is not None and self.cfg.field == "intent":
return target + '\nslots: ' + DialogueGPTClassificationDataset.linearize_slots(slots)
elif self.cfg.target_template == "with_slots" and slots is not None and self.cfg.field == "slots":
return DialogueGPTClassificationDataset.linearize_slots(slots)
else:
raise ValueError("Please choose a target format from {default, with_description, with_slots}")
def get_lm_samples(self):
max_sample_length = 0
lm_features = []
for idx in range(len(self.features)):
ex = self.features[idx].data
utterance = ex["utterance"]
label = ex["labels"][self.label_type]
slots = ex["labels"]["slots"] if self.cfg.target_template == "with_slots" else None
lm_feature = self.format_prompt(utterance) + ' ' + self.format_target(label, slots=slots)
feature_len = self.get_n_tokens_in_sentence(lm_feature)
max_sample_length = max(max_sample_length, feature_len)
lm_features.append(lm_feature)
logging.info("max feature length per sample with label: ".format(max_sample_length))
logging.info(
"please adjust max seq len to at least {} * ({} + 1) = {} but not too much more for efficiency".format(
max_sample_length, self.cfg.few_shot, max_sample_length * (1 + self.cfg.few_shot)
)
)
return lm_features
def format_prompt(self, utterance, few_shot=0, idx=None):
if self.cfg.prompt_template == "default":
base_template = utterance + ' ' + self.label_type + ':'
elif self.cfg.prompt_template == "i_want_to":
base_template = utterance + ' ' + 'I want to'
elif self.cfg.prompt_template == "prompt_tuning":
base_template = utterance + '\n' + self.label_type + ':'
elif self.cfg.prompt_template == "prompt_tuning_with_options":
base_template = (
'possible intents: '
+ ', '.join(sorted(list(self.all_possible_labels)))
+ '\n\n'
+ utterance
+ '\n'
+ self.label_type
+ ':'
)
if few_shot > 0:
few_shot_indices = random.sample(range(len(self.features)), few_shot + 1)
few_shot_indices = [i for i in few_shot_indices if i != idx][:few_shot]
few_shot_samples = [self.lm_features[i] for i in few_shot_indices]
base_template = (
self.tokenizer.tokenizer.pad_token.join(few_shot_samples)
+ self.tokenizer.tokenizer.pad_token
+ base_template
)
return base_template
def collate_fn(self, batch):
"""
Truncates elements to max length in batch
"""
_, _, _, _, candidate_attn_masks, _, _, _ = zip(*batch)
# determine max length in batch
batch_max_length = 0
for candidate_attn_mask in candidate_attn_masks:
for one_attn_mask in candidate_attn_mask:
batch_max_length = max(batch_max_length, torch.sum(one_attn_mask).item())
# padding for tp=2 situation
if batch_max_length % 2:
batch_max_length += 1
all_items = []
for item in zip(*batch):
if isinstance(item[0], int):
item = [torch.tensor(i) for i in item]
item_stack = torch.stack(item)
# if item_stack is 1d, elements refers to indexes and there is no need to truncate
if len(item_stack.size()) == 1:
all_items.append(item_stack)
# otherwise, truncate last dimension to max length in batch
else:
all_items.append(item_stack[..., :batch_max_length])
return all_items
def __getitem__(self, idx: int):
'''
State how the input and output samples look like
This template can be changed
Training example:
e.g. <utterance> service: restaurant
e.g. <task description> <utterance> service: restaurant
e.g. <utterance>\nintent: set alarm\nslots: <slot_name1>(<slot_value1>), <slot_name1>(<slot_value1>)
Generation example:
e.g. <utterance> service:
'''
ex = self.features[idx].data
utterance = ex["utterance"]
utterance_length = self.get_n_tokens_in_sentence(utterance)
label = ex["labels"][self.label_type]
candidates = ex["possible_labels"][self.label_type]
slots = ex["labels"]["slots"] if self.cfg.target_template == "with_slots" else None
base_template = self.format_prompt(utterance, few_shot=self.cfg.few_shot, idx=idx)
sentence_without_answer = base_template
sentence = base_template + ' ' + self.format_target(label, slots=slots)
if self.cfg.eval_mode == "binary_score":
candidate_sentences = []
for candidate in candidates:
positive_answer = base_template + ' ' + candidate + ' Answer: ' + 'yes'
negative_answer = base_template + ' ' + candidate + ' Answer: ' + 'no'
if candidate == label:
correct_candidate = len(candidate_sentences) // 2
candidate_sentences.append(positive_answer)
candidate_sentences.append(negative_answer)
else:
candidate_sentences.append(negative_answer)
candidate_sentences.append(positive_answer)
else:
correct_candidate = 0
candidate_sentences = [
base_template + ' ' + self.format_target(candidate, slots=slots) for candidate in candidates
]
encodings_dict, input_ids, attn_masks = self.default_encode(sentence)
candidate_tokenized_sentences = [
self.default_encode(candidate_sentence) for candidate_sentence in candidate_sentences
]
# ensure all samples have the same number of candidates for collating into tensor
while len(candidate_tokenized_sentences) < self.max_candidates:
candidate_tokenized_sentences.append(candidate_tokenized_sentences[0])
candidate_input_ids = torch.stack([i[1] for i in candidate_tokenized_sentences])
candidate_attn_masks = torch.stack([i[2] for i in candidate_tokenized_sentences])
labels = copy.copy(torch.squeeze(encodings_dict['input_ids']))
training_mask_end = self.get_n_tokens_in_sentence(sentence_without_answer)
labels.data = torch.tensor(
[-100 if i < training_mask_end else labels.data[i] for i in range(len(labels.data))]
)
return (
input_ids,
attn_masks,
labels,
candidate_input_ids,
candidate_attn_masks,
training_mask_end,
utterance_length,
correct_candidate,
)
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_gpt_classification_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
class DialogueGPTGenerationDataset(DialogueDataset):
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
""" Constructor
Designed for free form generation tasks such as Dialogue Response Generation
Args:
dataset_split: dataset split
dialogues_processor: dialogues processor
tokenizer: tokenizer
cfg: cfg container for dataset
"""
self.cfg = cfg
self.input_label_type = self.cfg.input_field
self.output_label_type = self.cfg.output_field
self.tokenizer = tokenizer
self.tokenizer.tokenizer.padding_side = "right"
if not isinstance(dataset_split, str):
dataset_split = dataset_split[0]
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.remove_invalid_samples(self.features)
if self.cfg.debug_mode:
self.features = self.features[:16]
def remove_invalid_samples(self, features):
valid_idxs = []
all_fields = self.input_label_type.split('+') + self.output_label_type.split('+')
for i in range(len(features)):
features[i].data["labels"]["utterance"] = features[i].data["utterance"]
all_fields_non_empty = True
for field in all_fields:
if not features[i].data["labels"][field] or not features[i].data["labels"][field].strip():
all_fields_non_empty = False
if all_fields_non_empty:
valid_idxs.append(i)
return [features[i] for i in valid_idxs]
def __len__(self):
return len(self.features)
def get_n_tokens_in_sentence(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding=False, return_tensors="pt"
)
output = torch.squeeze(encodings_dict['input_ids'])
return len(output) if len(output.size()) > 0 else 0
def default_encode(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding="max_length", return_tensors="pt"
)
input_ids = torch.squeeze(encodings_dict['input_ids'])
attn_masks = torch.squeeze(encodings_dict['attention_mask'])
return encodings_dict, input_ids, attn_masks
def format_prompt(self, ex):
'''
Formats training prompt based on self.input_field_type
Training example:
e.g. response: <response> # input_label_type = response
e.g. utterance: <utterance> # input_label_type = utterance
e.g. passage: <passage> utterance: <utterance> # input_label_type = passage+utterance
'''
ex["labels"]["utterance"] = ex["utterance"]
parts = self.input_label_type.split('+')
input_sentence = ' '.join([part + ': ' + ex["labels"][part] for part in parts])
return input_sentence
def __getitem__(self, idx: int):
'''
For each example, this function determines the format of input and output sequences based on user-specified conguration.
This is controlled by model.dataset.input_field and model.dataset.output_field
For instance:
If model.dataset.input_field == response and model.dataset.output_field == fluent_response:
Input = "response: <response>" and output = "response: <response> fluent_response: <fluent_response>" (with loss calculated from <fluent_response> only)
If model.dataset.input_field == utterance and model.dataset.output_field == response:
Input = "utterance: <utterance>" and output = "utterance: <utterance> response: <response>" (with loss calculated from <response> only)
If model.dataset.input_field == passage+utterance and model.dataset.output_field == response:
Input = "passage: <passage> utterance: <utterance>" and output="passage: <passage> utterance: <utterance> response: <response>" (with loss calculated from <response> only)
'''
ex = self.features[idx].data
input_sentence = self.format_prompt(ex)
utterance_length = self.get_n_tokens_in_sentence(input_sentence)
output_sentence = ex["labels"][self.output_label_type]
base_template = input_sentence
sentence_without_answer = base_template + ' ' + self.output_label_type + ':'
sentence = sentence_without_answer + ' ' + output_sentence
encodings_dict, input_ids, attn_masks = self.default_encode(sentence)
labels = copy.copy(torch.squeeze(encodings_dict['input_ids']))
training_mask_end = self.get_n_tokens_in_sentence(sentence_without_answer)
labels.data = torch.tensor(
[-100 if i < training_mask_end else labels.data[i] for i in range(len(labels.data))]
)
return (input_ids, attn_masks, labels, training_mask_end, utterance_length)
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_gpt_generation_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.dataset.dialogue_bert_dataset import DialogueBERTDataset
from nemo.collections.nlp.data.dialogue.dataset.dialogue_gpt_classification_dataset import (
DialogueGPTClassificationDataset,
)
from nemo.collections.nlp.data.dialogue.dataset.dialogue_sgd_bert_dataset import DialogueSGDBERTDataset
from nemo.collections.nlp.data.dialogue.dataset.dialogue_zero_shot_intent_dataset import DialogueZeroShotIntentDataset
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional, Union
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.glue_benchmark.data_processors import InputExample
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import GLUEDataset
from nemo.core.neural_types import CategoricalValuesType, ChannelType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['DialogueZeroShotIntentDataset']
class DialogueZeroShotIntentDataset(GLUEDataset):
"""
Dataset for training a NLI model for zero shot intent recognition. Similar to GLUE/MNLI
dataset, but allows the user to specify which columns in the data files contain the
premise, hypothesis, and gold label.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'labels': NeuralType(tuple('B'), CategoricalValuesType()),
}
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer to split text into sub-word tokens
cfg: config dict for dataset
num_classes: number of classes in the data (should be either 2 or 3, corresponding to
labels ['entailment', 'not_entailment'] or ["contradiction", "entailment", "neutral"])
"""
self.cfg = cfg
self.tokenizer = tokenizer
if self.cfg.num_classes not in [2, 3]:
raise ValueError("num_classes must be either 2 or 3!")
self.label_list = (
["contradiction", "entailment", "neutral"]
if self.cfg.num_classes == 3
else ['not_entailment', 'entailment']
)
token_params = {
'bos_token': None,
'eos_token': tokenizer.eos_token,
'pad_token': tokenizer.pad_token,
'cls_token': tokenizer.cls_token,
'sep_token_extra': tokenizer.eos_token
if hasattr(tokenizer, 'name') and 'roberta' in tokenizer.name.lower()
else None,
}
self.raw_features = dialogues_processor.get_dialog_examples(dataset_split)
self.examples = self._create_examples(self.raw_features, dataset_split)
self.features = self.convert_examples_to_features(
self.examples,
[0, 1, 2, 3],
self.cfg.max_seq_length,
tokenizer,
output_mode="classification",
**token_params,
)
def _create_examples(self, raw_features, dataset_split: str):
"""Creates examples for the training and dev sets."""
examples = []
for idx in range(len(raw_features)):
ex = self.raw_features[idx].data
user_utterance = ex["utterance"]
intent = ex["labels"]["intent"]
for candidate_idx, candidate_intent in enumerate(ex["possible_labels"]["intent"]):
guid = "{}-{}-{}".format(dataset_split, idx, candidate_idx)
text_a = user_utterance
text_b = "{} {}".format(self.cfg.prompt_template, candidate_intent)
label = 1 if candidate_intent == intent else 0
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(
self,
examples: List[str],
label_list: List[int],
max_seq_length: int,
tokenizer: TokenizerSpec,
output_mode: str,
bos_token: str = None,
eos_token: str = '[SEP]',
pad_token: str = '[PAD]',
cls_token: str = '[CLS]',
sep_token_extra: str = None,
cls_token_at_end: bool = False,
cls_token_segment_id: int = 0,
pad_token_segment_id: int = 0,
pad_on_left: bool = False,
mask_padding_with_zero: bool = True,
sequence_a_segment_id: int = 0,
sequence_b_segment_id: int = 1,
):
"""
Loads a data file into a list of `InputBatch`s.
The `cls_token_at_end` defines the location of the CLS token:
* False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
* True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
The `cls_token_segment_id` defines the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
The convention in BERT is:
a. For sequence pairs:
* tokens: [CLS] is this jack ##ville ? [SEP] no it is not . [SEP]
* type_ids: 0 0 0 0 0 0 0 1 1 1 1 1 1
b. For single sequences:
* tokens: [CLS] the dog is hairy . [SEP]
* type_ids: 0 0 0 0 0 0 0
Where "type_ids" are used to indicate whether this is the first
sequence or the second sequence. The embedding vectors for `type=0`
and `type=1` were learned during pre-training and are added to the
wordpiece embedding vector (and position vector). This is
not *strictly* necessarysince the [SEP] token unambiguously separates
the sequences, but it makes it easier for the model to learn
the concept of sequences.
For classification tasks, the first vector (corresponding to [CLS])
is used as as the "sentence vector". Note that this only makes sense
because the entire model is fine-tuned.
The convention for NMT is:
a. For sequence pairs:
* tokens:<BOS> is this jack ##ville ? <EOS> <BOS> no it is not . <EOS>
* type_ids:0 0 0 0 0 0 0 1 1 1 1 1 1 1
b. For single sequences:
* tokens: <BOS> the dog is hairy . <EOS>
* type_ids: 0 0 0 0 0 0 0
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if example.label == "-": # skip examples without a consensus label (e.g. in SNLI data set)
continue
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
if hasattr(tokenizer, 'text_to_tokens'):
tokens_a = tokenizer.text_to_tokens(example.text_a)
else:
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
if hasattr(tokenizer, 'text_to_tokens'):
tokens_b = tokenizer.text_to_tokens(example.text_b)
else:
tokens_b = tokenizer.tokenize(example.text_b)
special_tokens_count = 2 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 2 if bos_token else 0
special_tokens_count += 1 if cls_token else 0
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
special_tokens_count = 1 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 1 if bos_token else 0
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[: max_seq_length - special_tokens_count]
# Add special tokens to sequence_a
tokens = tokens_a
if bos_token:
tokens = [bos_token] + tokens
if eos_token:
tokens += [eos_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
# Add sequence separator between sequences
if tokens_b and sep_token_extra:
tokens += [sep_token_extra]
segment_ids += [sequence_a_segment_id]
# Add special tokens to sequence_b
if tokens_b:
if bos_token:
tokens += [bos_token]
segment_ids += [sequence_b_segment_id]
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
if eos_token:
tokens += [eos_token]
segment_ids += [sequence_b_segment_id]
# Add classification token - for BERT models
if cls_token:
if cls_token_at_end:
tokens += [cls_token]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
if hasattr(tokenizer, 'tokens_to_ids'):
input_ids = tokenizer.tokens_to_ids(tokens)
else:
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if hasattr(tokenizer, 'tokens_to_ids'):
pad_token_id = tokenizer.tokens_to_ids([pad_token])[0]
else:
pad_token_id = tokenizer.convert_tokens_to_ids([pad_token])[0]
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
if len(input_ids) != max_seq_length:
raise ValueError("input_ids must be of length max_seq_length")
if len(input_mask) != max_seq_length:
raise ValueError("input_mask must be of length max_seq_length")
if len(segment_ids) != max_seq_length:
raise ValueError("segment_ids must be of length max_seq_length")
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = np.float32(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(list(map(str, tokens))))
logging.info("input_ids: %s" % " ".join(list(map(str, input_ids))))
logging.info("input_mask: %s" % " ".join(list(map(str, input_mask))))
logging.info("segment_ids: %s" % " ".join(list(map(str, segment_ids))))
logging.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)
)
return features
class InputFeatures(object):
"""A single set of features of data.
Args:
input_ids: input/token ids
input_mask: masks out subword tokens
segment_ids: distinguish one sentence from the other one (if present)
label_ids: label for the current example
"""
def __init__(
self, input_ids: List[int], input_mask: List[int], segment_ids: List[int], label_id: Union[float, int]
):
"""Initialized InputFeatures."""
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_zero_shot_intent_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.core.classes import Dataset
__all__ = ['DialogueDataset']
class DialogueDataset(Dataset):
'''
Base class for Dialogue Datasets
1. Performs Model-dependent (but Data-independent) operations (tokenization etc)
2. This can allow the same model preprocessing for multiple datasources
3. Users can configurate which labels to use for modelling
(e.g. intent classification, slot filling or sequence generation etc)
'''
def __init__(self, dataset_split: str, dialogues_processor: object, **kwargs):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __getitem__(self, idx: int):
raise NotImplementedError
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Optional
import numpy as np
from nemo.collections.nlp.data.data_utils import get_stats
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
__all__ = ['DialogueBERTDataset', 'DialogueIntentSlotInferenceDataset']
class DialogueBERTDataset(DialogueDataset):
"""
Creates a dataset to use for the task of joint intent
and slot classification with pretrained model.
For a dataset to use during inference without labels, see
IntentSlotDataset.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'intent_labels': NeuralType(('B'), LabelsType()),
'slot_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
"""
Args:
dataset_split: dataset split
dialogues_processor: Data generator for dialogues
tokenizer: tokenizer
cfg: config container for dataset
"""
self.cfg = cfg
self.all_possible_labels = dialogues_processor.intents
self.label_to_label_id = {self.all_possible_labels[i]: i for i in range(len(self.all_possible_labels))}
self.all_possible_slots = dialogues_processor.slots
self.slot_name_to_slot_id = {self.all_possible_slots[i]: i for i in range(len(self.all_possible_slots))}
self.empty_slot_name = 'O'
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.features if self.cfg.num_samples == -1 else self.features[: self.cfg.num_samples]
queries = [feature.data["utterance"] for feature in self.features]
if self.cfg.do_lowercase:
queries = [query.lower() for query in queries]
intents = [self.label_to_label_id[feature.data["labels"]["intent"]] for feature in self.features]
word_level_slots = [self.convert_slot_position_to_slot_ids(feature.data) for feature in self.features]
features = DialogueBERTDataset.get_features(
queries,
self.cfg.max_seq_length,
tokenizer,
pad_label=self.cfg.pad_label,
word_level_slots=word_level_slots,
ignore_extra_tokens=self.cfg.ignore_extra_tokens,
ignore_start_end=self.cfg.ignore_start_end,
)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
self.all_slots = features[5]
self.all_intents = intents
def convert_slot_position_to_slot_ids(self, feature):
slot_ids = [self.slot_name_to_slot_id[self.empty_slot_name] for i in range(len(feature["utterance"].split()))]
slot_name_to_positions = feature["label_positions"]["slots"]
for slot_name in slot_name_to_positions:
slot_id = self.slot_name_to_slot_id[slot_name]
start = slot_name_to_positions[slot_name]["start"]
exclusive_end = slot_name_to_positions[slot_name]["exclusive_end"]
for to_replace_position in range(start, min(exclusive_end, len(slot_ids))):
slot_ids[to_replace_position] = slot_id
return slot_ids
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_loss_mask[idx]),
np.array(self.all_subtokens_mask[idx]),
self.all_intents[idx],
np.array(self.all_slots[idx]),
)
@staticmethod
def truncate_and_pad(
max_seq_length,
ignore_start_end,
with_label,
pad_label,
tokenizer,
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
):
too_long_count = 0
for i, subtokens in enumerate(all_subtokens):
if len(subtokens) > max_seq_length:
subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :]
all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :]
all_loss_mask[i] = [1 - ignore_start_end] + all_loss_mask[i][-max_seq_length + 1 :]
all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :]
if with_label:
all_slots[i] = [pad_label] + all_slots[i][-max_seq_length + 1 :]
too_long_count += 1
all_input_ids.append([tokenizer.tokens_to_ids(t) for t in subtokens])
if len(subtokens) < max_seq_length:
extra = max_seq_length - len(subtokens)
all_input_ids[i] = all_input_ids[i] + [0] * extra
all_loss_mask[i] = all_loss_mask[i] + [0] * extra
all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra
all_input_mask[i] = all_input_mask[i] + [0] * extra
if with_label:
all_slots[i] = all_slots[i] + [pad_label] * extra
all_segment_ids.append([0] * max_seq_length)
logging.info(f'{too_long_count} are longer than {max_seq_length}')
return (
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
)
@staticmethod
def get_features(
queries,
max_seq_length,
tokenizer,
pad_label=128,
word_level_slots=None,
ignore_extra_tokens=False,
ignore_start_end=False,
):
"""
Convert queries (utterance, intent label and slot labels) to BERT input format
"""
all_subtokens = []
all_loss_mask = []
all_subtokens_mask = []
all_segment_ids = []
all_input_ids = []
all_input_mask = []
sent_lengths = []
all_slots = []
with_label = word_level_slots is not None
for i, query in enumerate(queries):
words = query.strip().split()
subtokens = [tokenizer.cls_token]
loss_mask = [1 - ignore_start_end]
subtokens_mask = [0]
if with_label:
slots = [pad_label]
for j, word in enumerate(words):
word_tokens = tokenizer.text_to_tokens(word)
# to handle emojis that could be neglected during tokenization
if len(word.strip()) > 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.ids_to_tokens(tokenizer.unk_id)]
subtokens.extend(word_tokens)
# mask all sub-word tokens except the first token in a word
# use the label for the first sub-word token as the label for the entire word to eliminate need for disambiguation
loss_mask.append(1)
loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1))
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_tokens) - 1))
if with_label:
slots.extend([word_level_slots[i][j]] * len(word_tokens))
subtokens.append(tokenizer.sep_token)
loss_mask.append(1 - ignore_start_end)
subtokens_mask.append(0)
sent_lengths.append(len(subtokens))
all_subtokens.append(subtokens)
all_loss_mask.append(loss_mask)
all_subtokens_mask.append(subtokens_mask)
all_input_mask.append([1] * len(subtokens))
if with_label:
slots.append(pad_label)
all_slots.append(slots)
max_seq_length_data = max(sent_lengths)
max_seq_length = min(max_seq_length, max_seq_length_data) if max_seq_length > 0 else max_seq_length_data
logging.info(f'Setting max length to: {max_seq_length}')
get_stats(sent_lengths)
# truncate and pad samples
(
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
) = DialogueBERTDataset.truncate_and_pad(
max_seq_length,
ignore_start_end,
with_label,
pad_label,
tokenizer,
all_slots,
all_subtokens,
all_input_mask,
all_loss_mask,
all_subtokens_mask,
all_input_ids,
all_segment_ids,
)
# log examples for debugging
logging.debug("*** Some Examples of Processed Data ***")
for i in range(min(len(all_input_ids), 5)):
logging.debug("i: %s" % (i))
logging.debug("subtokens: %s" % " ".join(list(map(str, all_subtokens[i]))))
logging.debug("loss_mask: %s" % " ".join(list(map(str, all_loss_mask[i]))))
logging.debug("input_mask: %s" % " ".join(list(map(str, all_input_mask[i]))))
logging.debug("subtokens_mask: %s" % " ".join(list(map(str, all_subtokens_mask[i]))))
if with_label:
logging.debug("slots_label: %s" % " ".join(list(map(str, all_slots[i]))))
return (all_input_ids, all_segment_ids, all_input_mask, all_loss_mask, all_subtokens_mask, all_slots)
class DialogueIntentSlotInferenceDataset(DialogueBERTDataset):
"""
Creates dataset to use for the task of joint intent
and slot classification with pretrained model.
This is to be used during inference only.
It uses list of queries as the input.
Args:
queries (list): list of queries to run inference on
max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]
tokenizer (Tokenizer): such as NemoBertTokenizer
pad_label (int): pad value use for slot labels.
by default, it's the neutral label.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""
Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
}
def __init__(self, queries, max_seq_length, tokenizer, do_lower_case):
if do_lower_case:
queries = [query.lower() for query in queries]
features = DialogueBERTDataset.get_features(queries, max_seq_length, tokenizer)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_loss_mask = features[3]
self.all_subtokens_mask = features[4]
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_loss_mask[idx]),
np.array(self.all_subtokens_mask[idx]),
)
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_bert_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
class DialogueS2SGenerationDataset(DialogueDataset):
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, cfg):
""" Constructor
Designed for free form generation tasks such as Dialogue Response Generation
Args:
dataset_split: dataset split
dialogues_processor: dialogues processor
tokenizer: tokenizer
cfg: cfg container for dataset
"""
self.cfg = cfg
self.input_label_type = self.cfg.input_field
self.output_label_type = self.cfg.output_field
self.tokenizer = tokenizer
if not isinstance(dataset_split, str):
dataset_split = dataset_split[0]
self.features = dialogues_processor.get_dialog_examples(dataset_split)
self.features = self.remove_invalid_samples(self.features)
if self.cfg.debug_mode:
self.features = self.features[:16]
@staticmethod
def format_actions(prompt_template, actions):
"""
Formats actions based on prompt_template
Args:
prompt_template: determines whether acts, slot-names, slot-values are necessary in formatted actions
actions: list of actions, each a dict containing keys 'act', 'slot' and 'values' with their corresponding values as their attribute-values
Returns:
formatted_actions: string representations of actions, formatted based on the fields needed.
"""
actions_str = []
for action in actions:
act = action['act'].lower()
slot = action['slot']
value = action['values'][0] if action['values'] else ''
if prompt_template == 'values':
action_str = value
elif prompt_template == 'slots_values':
if value:
action_str = '{} ({})'.format(slot, value)
else:
action_str = slot
elif prompt_template == 'acts_slots_values':
if value:
action_str = '{} {} ({})'.format(act, slot, value)
elif slot:
action_str = '{} {}'.format(act, slot)
else:
action_str = act
else:
raise ValueError(
"Please set model.dataset.prompt_template to acts_slots_values, slots_values or values"
)
actions_str.append(action_str)
return ' '.join(actions_str)
def remove_invalid_samples(self, features):
valid_idxs = []
for i in range(len(features)):
for field in ['utterance', 'system_utterance', 'system_actions']:
if field in features[i].data:
features[i].data["labels"][field] = features[i].data[field]
all_fields = self.input_label_type.split('+') + self.output_label_type.split('+')
all_fields_non_empty = True
for field in all_fields:
if not features[i].data["labels"][field]:
all_fields_non_empty = False
if all_fields_non_empty:
valid_idxs.append(i)
return [features[i] for i in valid_idxs]
def __len__(self):
return len(self.features)
def get_n_tokens_in_sentence(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding=False, return_tensors="pt"
)
output = torch.squeeze(encodings_dict['input_ids'])
return len(output) if len(output.size()) > 0 else 0
def default_encode(self, sentence):
encodings_dict = self.tokenizer.tokenizer(
sentence, truncation=True, max_length=self.cfg.max_seq_length, padding="max_length", return_tensors="pt"
)
input_ids = torch.squeeze(encodings_dict['input_ids'])
attn_masks = torch.squeeze(encodings_dict['attention_mask'])
return encodings_dict, input_ids, attn_masks
def format_prompt(self, ex):
'''
Formats training prompt based on self.input_field_type
Training example:
e.g. response: <response> # input_label_type = response
e.g. utterance: <utterance> # input_label_type = utterance
e.g. passage: <passage> utterance: <utterance> # input_label_type = passage+utterance
'''
parts = self.input_label_type.split('+')
input_sentence = ' '.join([part + ': ' + ex["labels"][part] for part in parts])
return input_sentence
def __getitem__(self, idx: int):
'''
State how the input and output samples look like
This template can be changed
Training example:
e.g. INPUT - "response: <response>" OUTPUT - "<fluent_response>" # input_label_type = response, output_label_type = fluent_response
e.g. INPUT - "utterance: <utterance>" OUTPUT - "<response>" # input_label_type = utterance, output_label_type = response
e.g. INPUT - "passage: <passage> utterance: <utterance>" OUTPUT - "<response>" # input_label_type = passage+utterance, output_label_type = response
'''
ex = self.features[idx].data
for field in ['utterance', 'system_utterance']:
if field in ex:
ex["labels"][field] = ex[field]
if 'system_actions' in ex:
ex["labels"]['system_actions'] = DialogueS2SGenerationDataset.format_actions(
self.cfg.prompt_template, ex['system_actions']
)
input_sentence = self.format_prompt(ex)
output_sentence = ex["labels"][self.output_label_type]
_, input_ids, attn_masks = self.default_encode(input_sentence)
_, labels, _ = self.default_encode(output_sentence)
labels[labels == self.tokenizer.tokenizer.pad_token_id] = -100
return input_ids, attn_masks, labels
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_s2s_generation_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst
"""
import os
import re
from typing import List
import numpy as np
from nemo.collections.nlp.data.dialogue.dataset.dialogue_dataset import DialogueDataset
from nemo.collections.nlp.data.dialogue.input_example.sgd_input_example import SGDInputExample
__all__ = ['DialogueSGDBERTDataset']
class DialogueSGDBERTDataset(DialogueDataset):
'''
Dataset Class
1. Performs Model-dependent (but Data-independent) operations (tokenization etc)
2. This can allow the same model preprocessing for multiple datasources
3. Users can configurate which labels to use for modelling
(e.g. intent classification, slot filling or both together etc)
'''
def __init__(self, dataset_split: str, dialogues_processor: object, tokenizer, schemas, schema_config, cfg):
""" Constructor
Args:
dataset_split: dataset split
dialogues_processor: Data generator for SGD dialogues
tokenizer: tokenizer
schemas: SGD schema for domain, intent and slots
schema_config: config dict for schemas
cfg: cfg container for dataset
"""
self.dataset_split = dataset_split
self.tokenizer = tokenizer
self.schemas = schemas
self.schema_config = schema_config
self.dialogues_processor = dialogues_processor
self.cfg = cfg
self.subsample = self.dialogues_processor._subsample
dial_file = f"{dialogues_processor._task_name}_{dataset_split}_examples_bert.processed"
self.dial_file = os.path.join(self.cfg.data_dir, dial_file)
if self.cfg.use_cache and os.path.exists(self.dial_file):
self.load_features()
else:
self.process_features()
self.save_features()
def load_features(self):
with open(self.dial_file, "rb") as f:
self.features = np.load(f, allow_pickle=True)
def process_features(self):
self.features = []
self.raw_features = self.dialogues_processor.get_dialog_examples(self.dataset_split)
for idx in range(len(self.raw_features)):
self.bert_process_one_sample(idx)
def save_features(self):
with open(self.dial_file, "wb") as f:
np.save(f, self.features)
def _tokenize(self, utterance: str):
"""
Tokenize the utterance
Args:
utterance: A string containing the utterance to be tokenized.
Returns:
bert_tokens: A list of tokens obtained by word-piece tokenization of the
utterance.
alignments: A dict mapping indices of characters corresponding to start
and end positions of words (not subwords) to corresponding indices in
bert_tokens list.
inverse_alignments: A list of size equal to bert_tokens. Each element is a
tuple containing the index of the starting and inclusive ending
character of the word corresponding to the subword. This list is used
during inference to map word-piece indices to spans in the original
utterance.
"""
# utterance = tokenization.convert_to_unicode(utterance)
# After _naive_tokenize, spaces and punctuation marks are all retained, i.e.
# direct concatenation of all the tokens in the sequence will be the
# original string.
tokens = DialogueSGDBERTDataset._naive_tokenize(utterance)
# ['I', ' ', 'am', ' ', 'feeling', ' ', 'hungry', ' ', 'so', ' ', 'I', ' ', 'would', ' ', 'like', ' ', 'to', ' ', 'find', ' ', 'a', ' ', 'place', ' ', 'to', ' ', 'eat', '.']
# Filter out empty tokens and obtain aligned character index for each token.
alignments = {}
char_index = 0
bert_tokens = (
[]
) # ['I', 'am', 'feeling', 'hungry', 'so', 'I', 'would', 'like', 'to', 'find', 'a', 'place', 'to', 'eat', '.']
# These lists store inverse alignments to be used during inference.
bert_tokens_start_chars = []
bert_tokens_end_chars = []
for token in tokens:
if token.strip():
subwords = self.tokenizer.text_to_tokens(token)
# Store the alignment for the index of starting character and the
# inclusive ending character of the token.
alignments[char_index] = len(bert_tokens)
bert_tokens_start_chars.extend([char_index] * len(subwords))
bert_tokens.extend(subwords)
# The inclusive ending character index corresponding to the word.
inclusive_char_end = char_index + len(token) - 1
alignments[inclusive_char_end] = len(bert_tokens) - 1
bert_tokens_end_chars.extend([inclusive_char_end] * len(subwords))
char_index += len(token)
inverse_alignments = list(zip(bert_tokens_start_chars, bert_tokens_end_chars))
return bert_tokens, alignments, inverse_alignments
@classmethod
def _naive_tokenize(cls, s: str):
"""
Tokenizes a string, separating words, spaces and punctuations.
Args:
s: a string
Returns:
seq_tok: list of words, spaces and punctuations from the string
"""
# Spaces and punctuation marks are all retained, i.e. direct concatenation
# of all the tokens in the sequence will be the original string.
seq_tok = [tok for tok in re.split(r"([^a-zA-Z0-9])", s) if tok]
return seq_tok
def __len__(self):
return len(self.features)
def __getitem__(self, idx: int):
ex = self.features[idx]
return (
np.array(ex.example_id_num),
np.array(ex.example_id_num[-1]), # service_id
np.array(ex.utterance_ids),
np.array(ex.utterance_segment),
np.array(ex.utterance_mask, dtype=np.long),
np.array(ex.intent_status, dtype=np.float32),
np.array(ex.requested_slot_status, dtype=np.float32),
np.array(ex.categorical_slot_status),
np.array(ex.categorical_slot_value_status, dtype=np.float32),
np.array(ex.noncategorical_slot_status),
np.array(ex.noncategorical_slot_value_start),
np.array(ex.noncategorical_slot_value_end),
np.array(ex.start_char_idx), # noncat_alignment_start
np.array(ex.end_char_idx), # noncat_alignment_end
np.array(ex.task_mask), # noncat_alignment_end
)
def bert_process_one_sample(self, idx):
"""
Creates an example for each frame in the user turn.
Args:
turn_id: turn number
system_utterance: last system utterance
user_utterance: lst user utterance
system_frames: all system utterances and slot - slot value pairs
user_frames: all user utterances and slot - slot value pairs
prev_states: slot - slot value pairs from the previous turns
schemas: schema for all services of all datasets
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
prev_states: updated dialogue state e.g. {'Restaurants_1': {'city': ['San Jose'], 'cuisine': ['American']}}
"""
ex = self.raw_features[idx].data
example_id_num = ex["example_id_num"]
example_id = ex["example_id"]
user_utterance = ex["utterance"]
system_utterance = ex["system_utterance"]
service = ex["labels"]["service"]
schemas = self.schemas
state_update = ex["labels"]["slots"]
system_slots = ex["system_slots"]
user_tokens, user_alignments, user_inv_alignments = self._tokenize(user_utterance)
system_tokens, system_alignments, system_inv_alignments = self._tokenize(system_utterance)
system_user_utterance = system_utterance + ' ' + user_utterance
system_user_tokens, system_user_alignments, system_user_inv_alignments = self._tokenize(system_user_utterance)
examples = []
base_example = SGDInputExample(schema_config=self.schema_config, tokenizer=self.tokenizer)
base_example.service_schema = self.schemas.get_service_schema(service)
base_example.service_id = example_id_num[-1]
base_example.example_id = example_id
base_example.example_id_num = example_id_num
for model_task in range(self.schema_config["NUM_TASKS"]):
if model_task == 0:
for intent_id, intent in enumerate(schemas.get_service_schema(service).intents):
task_example = base_example.make_copy()
task_example.task_mask[model_task] = 1
task_example.intent_id = intent_id
task_example.example_id += f"-{model_task}-{intent_id}-0"
task_example.example_id_num.extend([model_task, intent_id, 0])
intent_description = (
intent + " " + self.schemas.get_service_schema(service).intent_descriptions[intent]
)
intent_tokens, intent_alignments, intent_inv_alignments = self._tokenize(intent_description)
task_example.add_utterance_features(
intent_tokens,
intent_inv_alignments,
system_user_tokens,
system_user_inv_alignments,
intent_description,
system_user_utterance,
)
task_example.add_intents(ex)
examples.append(task_example)
if model_task == 1:
for slot_id, slot in enumerate(schemas.get_service_schema(service).slots):
task_example = base_example.make_copy()
task_example.task_mask[model_task] = 1
task_example.requested_slot_id = slot_id
task_example.example_id += f"-{model_task}-{slot_id}-0"
task_example.example_id_num.extend([model_task, slot_id, 0])
slot_description = slot + " " + self.schemas.get_service_schema(service).slot_descriptions[slot]
slot_tokens, slot_alignments, slot_inv_alignments = self._tokenize(slot_description)
task_example.add_utterance_features(
slot_tokens,
slot_inv_alignments,
user_tokens,
user_inv_alignments,
slot_description,
user_utterance,
)
task_example.add_requested_slots(ex)
examples.append(task_example)
if model_task == 2:
off_slots = []
on_slots = []
for slot_id, slot in enumerate(schemas.get_service_schema(service).categorical_slots):
task_example = base_example.make_copy()
task_example.task_mask[model_task] = 1
# assert task_example.task_mask == [0, 0, 1, 0, 0, 0]
task_example.categorical_slot_id = slot_id
task_example.example_id += f"-{model_task}-{slot_id}-0"
task_example.example_id_num.extend([model_task, slot_id, 0])
slot_description = slot + " " + schemas.get_service_schema(service).slot_descriptions[slot]
slot_tokens, slot_alignments, slot_inv_alignments = self._tokenize(slot_description)
task_example.add_utterance_features(
slot_tokens,
slot_inv_alignments,
system_user_tokens,
system_user_inv_alignments,
slot_description,
system_user_utterance,
)
task_example.add_categorical_slots(state_update)
if task_example.categorical_slot_status == 0:
off_slots.append(task_example)
else:
on_slots.append(task_example)
examples.append(task_example)
old_example = task_example
for value_id, value in enumerate(
schemas.get_service_schema(service).get_categorical_slot_values(slot)
):
if self.dataset_split != 'train' or task_example.categorical_slot_status == 1:
task_example = old_example.make_copy_of_categorical_features()
task_example.task_mask[3] = 1
# assert task_example.task_mask == [0, 0, 0, 1, 0, 0]
task_example.categorical_slot_id = slot_id
task_example.categorical_slot_value_id = value_id
task_example.example_id = base_example.example_id + f"-3-{slot_id}-{value_id}"
task_example.example_id_num = base_example.example_id_num + [3, slot_id, value_id]
slot_description = slot + " " + value # add slot description
slot_tokens, slot_alignments, slot_inv_alignments = self._tokenize(slot_description)
task_example.add_utterance_features(
slot_tokens,
slot_inv_alignments,
system_user_tokens,
system_user_inv_alignments,
slot_description,
system_user_utterance,
)
task_example.add_categorical_slots(state_update)
assert task_example.categorical_slot_status == old_example.categorical_slot_status
examples.append(task_example)
if self.dataset_split == 'train' and self.subsample:
num_on_slots = len(on_slots)
examples.extend(
np.random.choice(off_slots, replace=False, size=min(max(num_on_slots, 1), len(off_slots)))
)
else:
examples.extend(off_slots)
if model_task == 4: # noncat slot status
off_slots = []
on_slots = []
for slot_id, slot in enumerate(schemas.get_service_schema(service).non_categorical_slots):
task_example = base_example.make_copy()
task_example.task_mask[model_task] = 1
# assert task_example.task_mask == [0, 0, 0, 0, 1, 0]
task_example.noncategorical_slot_id = slot_id
task_example.example_id += f"-{model_task}-{slot_id}-0"
task_example.example_id_num.extend([model_task, slot_id, 0])
slot_description = slot + " " + schemas.get_service_schema(service).slot_descriptions[slot]
slot_tokens, slot_alignments, slot_inv_alignments = self._tokenize(slot_description)
task_example.add_utterance_features(
slot_tokens,
slot_inv_alignments,
system_user_tokens,
system_user_inv_alignments,
slot_description,
system_user_utterance,
)
user_span_boundaries = self._find_subword_indices(
state_update,
user_utterance,
ex["label_positions"]["slots"],
user_alignments,
user_tokens,
2 + len(slot_tokens) + len(system_tokens),
)
if system_slots is not None:
system_span_boundaries = self._find_subword_indices(
state_update,
system_utterance,
system_slots,
system_alignments,
system_tokens,
2 + len(slot_tokens),
)
else:
system_span_boundaries = {}
task_example.add_noncategorical_slots(state_update, user_span_boundaries, system_span_boundaries)
if task_example.noncategorical_slot_status == 0:
off_slots.append(task_example)
else:
on_slots.append(task_example)
examples.append(task_example)
if self.dataset_split != 'train' or task_example.noncategorical_slot_status == 1:
task_example = task_example.make_copy_of_non_categorical_features()
task_example.task_mask[5] = 1
# assert task_example.task_mask == [0, 0, 0, 0, 0, 1]
task_example.example_id = base_example.example_id + f"-5-{slot_id}-0"
task_example.example_id_num = base_example.example_id_num + [5, slot_id, 0]
examples.append(task_example)
if self.dataset_split == 'train' and self.subsample:
num_on_slots = len(on_slots)
examples.extend(
np.random.choice(off_slots, replace=False, size=min(max(num_on_slots, 1), len(off_slots)))
)
else:
examples.extend(off_slots)
for example in examples:
self.features.append(example)
def _find_subword_indices(
self,
slot_values: dict,
utterance: str,
char_slot_spans: dict,
alignments: List[int],
subwords: List[str],
bias: int,
) -> dict:
"""
Find indices for subwords corresponding to slot values.
Args:
slot_values: slot - slot value pairs
utterance: utterance
char_slot_spans: char - slot spans
alignments: alignments
subwords: subtokens mapping
bias: offset
Returns:
span_boundaries: span boundaries
"""
span_boundaries = {}
for slot, values in slot_values.items():
# Get all values present in the utterance for the specified slot.
value_char_spans = {}
for key, slot_span in char_slot_spans.items():
# print(key, slot, slot_span, char_slot_spans)
if slot_span["slot"] == slot:
value = utterance[slot_span["start"] : slot_span["exclusive_end"]]
start_tok_idx = alignments[slot_span["start"]]
end_tok_idx = alignments[slot_span["exclusive_end"] - 1]
if 0 <= start_tok_idx < len(subwords):
end_tok_idx = min(end_tok_idx, len(subwords) - 1)
value_char_spans[value] = (start_tok_idx + bias, end_tok_idx + bias)
for v in values:
if v in value_char_spans:
span_boundaries[slot] = value_char_spans[v]
break
return span_boundaries
| NeMo-main | nemo/collections/nlp/data/dialogue/dataset/dialogue_sgd_bert_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class DialogueDesignInputExample(DialogueInputExample):
"""
Template for DialogueDesignInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"system_utterance": <system_utterance>,
"labels": {
"service": <service>,
"intent": <intent_description>,
"slots": {
<slot-name1>: '',
<slot-name2>: '',
}, # dataset does not contain ground truth slot values
},
"possible_labels": {
'intent': [<intent1>, <intent2>, ...],
"service": [<service1>, <service2>, ...],
"slots": {
"<slot-name1>": [<slot-value1>, <slot-value2>, ...],
"<slot-name2>": [<slot-value1>, <slot-value2>, ...],
}
},
"description": {
"service": <service>,
"intent": <intent_description>,
"slots": {
"<slot-name1>": "<slot-question1>",
"<slot-name2>": "<slot-question2>",
}
},
}
"""
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/design_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class DialogueAssistantInputExample(DialogueInputExample):
"""
Template for DialogueAssistantInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"labels": {
"service": <service>,
"intent": <intent>,
"slots": {
"<slot-name1>": [<slot-value1>, <slot-value2>],
"<slot-name2>": [<slot-value2>],
}
},
"label_positions":{
"slots": {
"<slot-name1>": {
# note for the Assistant dataset, start and end are word positions rather than char position
# these are whitespace-delimited word positions rather than tokenization-specific sub-word tokens.
"exclusive_end": 3,
"slot": "restaurant_name",
"start": 1
},
}
},
"possible_labels": {
"service": [<service1>, <service2>, ...],
"intent": [<intent1>, <intent2>, ...],
"slots": {
# all slots for categorical variables
# empty list for extractive slots
# Assistant only support extractive slots
"<slot-name1>": [],
"<slot-name2>": [],
}
}
}
"""
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/assistant_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class DialogueMSMarcoInputExample(DialogueInputExample):
"""
Template for DialogueMSMarcoInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"labels": {
"service": <service>, # this is the domain
"example_id": <example_id>,
"response": <response>,
"fluent_response": <fluent_response>, # written version of the response that is more fluent
"passage": <passage>, # passage which supports generating the response (answer) to the utterance (question)
},
"possible_labels": {
"service": [<service1>, <service2>, ...],
"passage": [<passage1>, <passage2>, ...],
}
}
"""
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/ms_marco_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.assistant_input_example import DialogueAssistantInputExample
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
from nemo.collections.nlp.data.dialogue.input_example.sgd_input_example import DialogueSGDInputExample, SGDInputExample
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['DialogueInputExample']
class DialogueInputExample(object):
"""
Generic Dialogue Input Example
Uses data: dict as a flexible interface to support various input types.
This ranges from classification labels, to complex nested labels such as those in SGD
{
"utterance": <utterance>,
"labels": {
"intent": <intent>,
"slots": { ... },
}
}
"""
def __init__(self, data: dict):
self.data = data
def __repr__(self):
return self.data
def __str__(self):
return self.data
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
class MellonQAInputExample(DialogueInputExample):
"""
Template for MellonQAInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"utterance": <utterance>,
"labels": {
"example_id": <example_id>,
"response": <response>,
"fluent_response": <fluent_response>, # written version of the response that is more fluent
"passage": <passage>, # passage which supports generating the response (answer) to the utterance (question)
}
}
"""
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/mellon_qa_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py
"""
from typing import List
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
from nemo.utils import logging
__all__ = [
'SGDInputExample',
'STR_DONTCARE',
'STATUS_OFF',
'STATUS_ACTIVE',
'STATUS_DONTCARE',
]
class DialogueSGDInputExample(DialogueInputExample):
"""
Template for DialogueSGDInputExample
Meant as a descriptor rather than to be instantiated
Please instantiate using the base class 'DialogueInputExample'
{
"example_id": <example_id>,
"example_id_num": <example_id_num>,
"utterance": <utterance>,
"system_utterance": <system_utterance>,
"system_slots": None or {
"<slot-name1>": {
"exclusive_end": 46,
"slot": "restaurant_name",
"start": 34
},
"system_actions": None or [{
"act": "INFORM",
"canonical_values": [
"2019-03-02"
],
"slot": "date",
"values": [
"March 2nd"
]
}, ...]
"labels": {
"service": <service>,
"intent": <intent>,
"slots": {
#only non-empty slots
#most slot values are list of length 1
#but there are some of length 2 as both are accepted
#e.g. 1930 and 7:30 pm
"<slot-name1>": [<slot-value1>, <slot-value2>],
"<slot-name2>": [<slot-value2>],
}
},
"label_positions":{
"slots": {
"<slot-name1>": {
"exclusive_end": 46,
"slot": "restaurant_name",
"start": 34
},
}
},
"possible_labels": {
"service": [<service1>, <service2>, ...],
"intent": [<intent1>, <intent2>, ...],
"slots": {
#all slots including empty
"<slot-name1>": [<slot-value1>, <slot-value2>, ...],
"<slot-name2>": [<slot-value1>, <slot-value2>, ...],
}
},
"description": {
"service": <service description>,
"intent": <intent description>,
"slots": {
#only non-empty slots
"<slot-name1>": <slot-name1 description>,
"<slot-name2>": <slot-name2 description>,
}
}
}
"""
STR_DONTCARE = "dontcare"
# These are used to represent the status of slots (off, active, dontcare) and
# intents (off, active) in dialogue state tracking.
STATUS_OFF = 0
STATUS_ACTIVE = 1
STATUS_DONTCARE = 2
class SGDInputExample(object):
"""An example for training/inference."""
def __init__(
self,
schema_config: dict,
tokenizer: object,
service_schema: object = None,
example_id: str = "NONE",
example_id_num: List[int] = [],
):
"""
Constructs an InputExample.
Args:
schema_config: configuration
tokenizer: tokenizer object
service_schema: A ServiceSchema object wrapping the schema for the service
corresponding to this example.
example_id: Unique identifier for the example, like: 'train-1_00000-00-Restaurants_1'
example_id_num: dialogue_id and turn_id combined and service id combined into a list of ints,
like: [1, 0, 0, 18]
"""
self.schema_config = schema_config
self.service_schema = service_schema
self.service_id = None
if service_schema:
self.service_id = service_schema.service_id
self.example_id = example_id
self.example_id_num = example_id_num
self._max_seq_length = schema_config["MAX_SEQ_LENGTH"]
self._tokenizer = tokenizer
if self._tokenizer is None:
raise ValueError("Must specify tokenizer")
self.user_utterance = ''
self.system_utterance = ''
# The id of each subword in the vocabulary for BERT.
self.utterance_ids = [0] * self._max_seq_length
# Denotes the identity of the sequence. Takes values 0 (schema description) and 1 (system and user utterance).
self.utterance_segment = [0] * self._max_seq_length
# Mask which takes the value 0 for padded tokens and 1 otherwise.
self.utterance_mask = [0] * self._max_seq_length
# Start and inclusive end character indices in the original utterance
# corresponding to the tokens. This is used to obtain the character indices
# from the predicted subword indices during inference.
# NOTE: A positive value indicates the character indices in the schema description
# whereas a negative value indicates the character indices in the
# utterance. The indices are offset by 1 to prevent ambiguity in the
# 0 index, which could be in either the schema description or utterance by the
# above convention. Now the 0 index corresponds to padded tokens.
self.start_char_idx = [0] * self._max_seq_length
self.end_char_idx = [0] * self._max_seq_length
# Id of categorical slot present in the example or 0 if not present.
self.categorical_slot_id = 0
# Id of non categorical slot present in the example or 0 if not present.
self.noncategorical_slot_id = 0
# The status of categorical slot in the example.
self.categorical_slot_status = STATUS_OFF
# The status of non categorical slot in the example.
self.noncategorical_slot_status = STATUS_OFF
# Masks out tasks not represented by example
self.task_mask = [0] * schema_config["NUM_TASKS"]
# The index of the starting subword corresponding to the slot span
# for a non-categorical slot value.
self.noncategorical_slot_value_start = 0
# The index of the ending (inclusive) subword corresponding to the slot span
# for a non-categorical slot value.
self.noncategorical_slot_value_end = 0
# Id of categorical slot value present in the example or 0 if not present.
self.categorical_slot_value_id = 0
# The status of categorical slot value in the example.
self.categorical_slot_value_status = STATUS_OFF
# Id of requested slot present in the example or 0 if not present.
self.requested_slot_id = 0
# Takes value 1 if the corresponding slot is requested, 0 otherwise.
self.requested_slot_status = STATUS_OFF
# ID of intent present in the example.
self.intent_id = 0
# Takes value 1 if the intent is active, 0 otherwise.
self.intent_status = STATUS_OFF
@property
def readable_summary(self):
"""Get a readable dict that summarizes the attributes of an InputExample."""
seq_length = sum(self.utterance_mask)
utt_toks = self._tokenizer.ids_to_tokens(self.utterance_ids[:seq_length])
utt_tok_mask_pairs = list(zip(utt_toks, self.utterance_segment[:seq_length]))
active_intent = (
self.service_schema.get_intent_from_id(self.intent_id) if self.intent_status == STATUS_ACTIVE else ""
)
slot_values_in_state = {}
if self.categorical_slot_status == STATUS_ACTIVE:
slot_values_in_state[
self.service_schema.get_categorical_slot_from_id(self.categorical_slot_id)
] = self.service_schema.get_categorical_slot_value_from_id(
self.categorical_slot_id, self.categorical_slot_value_id
)
elif self.categorical_slot_status == STATUS_DONTCARE:
slot_values_in_state[
self.service_schema.get_categorical_slot_from_id(self.categorical_slot_id)
] = STR_DONTCARE
if self.noncategorical_slot_status == STATUS_ACTIVE:
slot = self.service_schema.get_non_categorical_slot_from_id(self.noncategorical_slot_id)
start_id = self.noncategorical_slot_value_start[slot]
end_id = self.noncategorical_slot_value_end[slot]
# Token list is consisted of the subwords that may start with "##". We
# remove "##" to reconstruct the original value. Note that it's not a
# strict restoration of the original string. It's primarily used for
# debugging.
# ex. ["san", "j", "##ose"] --> "san jose"
readable_value = " ".join(utt_toks[start_id : end_id + 1]).replace(" ##", "")
slot_values_in_state[slot] = readable_value
elif self.noncategorical_slot_status == STATUS_DONTCARE:
slot = self.service_schema.get_non_categorical_slot_from_id(self.noncategorical_slot_id)
slot_values_in_state[slot] = STR_DONTCARE
summary_dict = {
"utt_tok_mask_pairs": utt_tok_mask_pairs,
"utt_len": seq_length,
"categorical_slot_id": self.categorical_slot_id,
"noncategorical_slot_id": self.noncategorical_slot_id,
"intent_id": self.intent_id,
"service_name": self.service_schema.service_name,
"active_intent": active_intent,
"slot_values_in_state": slot_values_in_state,
}
return summary_dict
def add_utterance_features(
self, system_tokens, system_inv_alignments, user_tokens, user_inv_alignments, system_utterance, user_utterance
):
"""Add utterance related features input to InputExample.
Note: this method modifies the system tokens and user_tokens in place to
make their total length <= the maximum input length for BERT model.
Args:
system_tokens: a list of strings which represents schema description.
system_inv_alignments: a list of tuples which denotes the start and end
charater of the tpken that a bert token originates from in the original
schema description.
user_tokens: a list of strings which represents utterance.
user_inv_alignments: a list of tuples which denotes the start and end
charater of the token that a bert token originates from in the original
system and user utterance.
"""
# Input sequence length for utterance BERT encoder
max_utt_len = self._max_seq_length
# Modify lengths of schema description & utterance so that length of total utt
# (including cls_token, setp_token, sep_token) is no more than max_utt_len
is_too_long = truncate_seq_pair(system_tokens, user_tokens, max_utt_len - 3)
if is_too_long:
logging.debug(
f'Utterance sequence truncated in example id - {self.example_id} from {len(system_tokens) + len(user_tokens)}.'
)
# Construct the tokens, segment mask and valid token mask which will be
# input to BERT, using the tokens for schema description (sequence A) and
# system and user utterance (sequence B).
utt_subword = []
utt_seg = []
utt_mask = []
start_char_idx = []
end_char_idx = []
utt_subword.append(self._tokenizer.cls_token)
utt_seg.append(0)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
for subword_idx, subword in enumerate(system_tokens):
utt_subword.append(subword)
utt_seg.append(0)
utt_mask.append(1)
st, en = system_inv_alignments[subword_idx]
start_char_idx.append(-(st + 1))
end_char_idx.append(-(en + 1))
utt_subword.append(self._tokenizer.sep_token)
utt_seg.append(0)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
for subword_idx, subword in enumerate(user_tokens):
utt_subword.append(subword)
utt_seg.append(1)
utt_mask.append(1)
st, en = user_inv_alignments[subword_idx]
start_char_idx.append(st + 1)
end_char_idx.append(en + 1)
utt_subword.append(self._tokenizer.sep_token)
utt_seg.append(1)
utt_mask.append(1)
start_char_idx.append(0)
end_char_idx.append(0)
utterance_ids = self._tokenizer.tokens_to_ids(utt_subword)
# Zero-pad up to the BERT input sequence length.
while len(utterance_ids) < max_utt_len:
utterance_ids.append(0)
utt_seg.append(0)
utt_mask.append(0)
start_char_idx.append(0)
end_char_idx.append(0)
self.utterance_ids = utterance_ids
self.utterance_segment = utt_seg
self.utterance_mask = utt_mask
self.start_char_idx = start_char_idx
self.end_char_idx = end_char_idx
self.user_utterance = user_utterance
self.system_utterance = system_utterance
def make_copy(self):
"""Make a copy of the current example with utterance features."""
new_example = SGDInputExample(
schema_config=self.schema_config,
service_schema=self.service_schema,
example_id=self.example_id,
example_id_num=self.example_id_num.copy(),
tokenizer=self._tokenizer,
)
return new_example
def make_copy_of_categorical_features(self):
"""Make a copy of the current example with utterance and categorical features."""
new_example = self.make_copy()
new_example.categorical_slot_status = self.categorical_slot_status
return new_example
def make_copy_of_non_categorical_features(self):
"""Make a copy of the current example with utterance features and non categorical features."""
new_example = self.make_copy()
new_example.noncategorical_slot_id = self.noncategorical_slot_id
new_example.noncategorical_slot_status = self.noncategorical_slot_status
new_example.utterance_ids = list(self.utterance_ids)
new_example.utterance_segment = list(self.utterance_segment)
new_example.utterance_mask = list(self.utterance_mask)
new_example.start_char_idx = list(self.start_char_idx)
new_example.end_char_idx = list(self.end_char_idx)
new_example.user_utterance = self.user_utterance
new_example.system_utterance = self.system_utterance
new_example.noncategorical_slot_status = self.noncategorical_slot_status
new_example.noncategorical_slot_value_start = self.noncategorical_slot_value_start
new_example.noncategorical_slot_value_end = self.noncategorical_slot_value_end
return new_example
def add_categorical_slots(self, state_update: dict):
"""Add features for categorical slots.
Args:
state_update: slot value pairs of the state update
"""
categorical_slots = self.service_schema.categorical_slots
if not categorical_slots:
return
slot = categorical_slots[self.categorical_slot_id]
values = state_update.get(slot, [])
if not values:
self.categorical_slot_status = STATUS_OFF
elif values[0] == STR_DONTCARE:
self.categorical_slot_status = STATUS_DONTCARE
else:
self.categorical_slot_status = STATUS_ACTIVE
self.categorical_slot_value_status = (
self.categorical_slot_value_id == self.service_schema.get_categorical_slot_value_id(slot, values[0])
)
def add_noncategorical_slots(self, state_update: dict, system_span_boundaries: dict, user_span_boundaries: dict):
"""Add features for non-categorical slots.
Args:
state_update: slot value pairs of state update
system_span_boundaries: span boundaries of schema description
user_span_boundaries: span boundaries of utterance
"""
noncategorical_slots = self.service_schema.non_categorical_slots
slot = noncategorical_slots[self.noncategorical_slot_id]
values = state_update.get(slot, [])
if not values:
self.noncategorical_slot_status = STATUS_OFF
elif values[0] == STR_DONTCARE:
self.noncategorical_slot_status = STATUS_DONTCARE
else:
self.noncategorical_slot_status = STATUS_ACTIVE
# Add indices of the start and end tokens for the first encountered
# value. Spans in user utterance are prioritized over the system
# utterance. If a span is not found, the slot value is ignored.
if slot in user_span_boundaries:
start, end = user_span_boundaries[slot]
elif slot in system_span_boundaries:
start, end = system_span_boundaries[slot]
else:
# A span may not be found because the value was cropped out or because
# the value was mentioned earlier in the dialogue. Since this model
# only makes use of the last two utterances to predict state updates,
# it will fail in such cases.
logging.debug(
f'"Slot values {str(values)} not found in user or system utterance in example with id - {self.example_id}.'
)
start = 0
end = 0
self.noncategorical_slot_value_start = start
self.noncategorical_slot_value_end = end
def add_requested_slots(self, frame: dict):
"""Add requested slots to InputExample
Args:
frame: frame object from which requested slots are extracted
"""
all_slots = self.service_schema.slots
slot = all_slots[self.requested_slot_id]
if slot in frame["labels"]["slots"]:
self.requested_slot_status = STATUS_ACTIVE
def add_intents(self, frame):
"""Add intents to InputExample
Args:
frame: frame object from which intents are extracted
"""
all_intents = self.service_schema.intents
intent = all_intents[self.intent_id]
if intent == frame["labels"]["intent"]:
self.intent_status = STATUS_ACTIVE
# Modified from run_classifier._truncate_seq_pair in the public bert model repo.
# https://github.com/google-research/bert/blob/master/run_classifier.py.
def truncate_seq_pair(tokens_a: List[int], tokens_b: List[int], max_length: int) -> bool:
"""Truncate a seq pair in place so that their total length <= max_length.
Args:
tokens_a: first token sequence
tokens_b: second token sequence
max_length: truncated sequence length
Returns:
is_too_long: whether combined sequences exceed maximum sequence length
"""
is_too_long = False
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
is_too_long = True
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
return is_too_long
| NeMo-main | nemo/collections/nlp/data/dialogue/input_example/sgd_input_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueDesignDataProcessor']
class DialogueDesignDataProcessor(DialogueDataProcessor):
"""Data Processor for Design Dataset"""
def __init__(self, data_dir: str, tokenizer: object, cfg=None):
"""
Constructs DialogueDesignDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
def open_csv(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
df = pd.read_csv(filename)
return df.to_dict(orient='index')
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
Dev set contains self.cfg.dev_proportion % of samples with the rest going into the train set
Test set contains the whole dataset (Dev + Train) as this dataset is small (~100) and primarily used in a zero shot setting
"""
examples = []
raw_examples = self.open_csv('mellon_design_OV.csv')
# remove disabled examples
raw_examples = [raw_examples[i] for i in range(len(raw_examples)) if raw_examples[i]['disabled'] != 'yes']
n_samples = len(raw_examples)
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, self.cfg.dev_proportion)
all_intents = sorted(list(set(raw_examples[i]['intent labels'] for i in range(len(raw_examples)))))
all_services = sorted(list(set(raw_examples[i]['domain'] for i in range(len(raw_examples)))))
for i in idxs:
raw_example = raw_examples[i]
utterances = [raw_example['example_{}'.format(i)] for i in range(1, 4)]
service = raw_example['domain']
intent = raw_example['intent']
intent_description = raw_example['intent labels']
system_utterance = raw_example['response']
slot_names = [raw_example['slot{}'.format(i)] for i in range(1, 3)]
# these are possible slot values not ground truth slot values
slot_values = [raw_example['slot{}_values'.format(i)] for i in range(1, 3)]
slot_questions = [raw_example['slot{}_values'.format(i)] for i in range(1, 3)]
for j in range(1, 3):
value = raw_example['slot{}'.format(j)]
if isinstance(value, str):
system_utterance = system_utterance.replace('slot{}'.format(j), value)
valid_slots_ids = [i for i, slot in enumerate(slot_names) if isinstance(slot, str)]
slot_names = [slot_names[i] for i in valid_slots_ids]
slot_values = [slot_values[i] if isinstance(slot_values[i], str) else '' for i in valid_slots_ids]
slot_questions = [slot_questions[i] if isinstance(slot_questions[i], str) else '' for i in valid_slots_ids]
for utterance in utterances:
if not isinstance(utterance, str):
continue
input_example = {
"utterance": utterance,
"system_utterance": system_utterance,
"labels": {
"service": service,
"intent": intent_description,
"slots": {
slot: '' for slot in slot_names
}, # dataset does not contain ground truth slot values
},
"possible_labels": {
'intent': all_intents,
"service": all_services,
"slots": {slot: slot_values[i] for i, slot in enumerate(slot_names)},
},
"description": {
"service": service,
"intent": intent_description,
"slots": {slot: slot_questions[i] for i, slot in enumerate(slot_names)},
},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/design_data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/data_utils.py
"""
import collections
import json
import os
import pickle
import re
from typing import List
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
from nemo.collections.nlp.data.dialogue.sgd.schema import Schema
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
__all__ = ['DialogueSGDDataProcessor']
FILE_RANGES = {
"sgd_single_domain": {"train": range(1, 44), "dev": range(1, 8), "test": range(1, 12)},
"sgd_multi_domain": {"train": range(44, 128), "dev": range(8, 21), "test": range(12, 35)},
"sgd_all": {"train": range(1, 128), "dev": range(1, 21), "test": range(1, 35)},
"sgd_all_single": {"train": range(1, 128), "dev": range(1, 8), "test": range(1, 12)},
"multiwoz": {"train": range(1, 18), "dev": range(1, 3), "test": range(1, 3)},
"debug_sample": {"train": range(1, 2), "dev": range(1, 2), "test": range(1, 2)},
}
class DialogueSGDDataProcessor(DialogueDataProcessor):
"""Data Processor for SGD dialogues.
More information at https://arxiv.org/abs/1909.05855
***Downloading the dataset***
# git clone https://github.com/google-research-datasets/dstc8-schema-guided-dialogue.git
***Data format***
SGD data comes with a JSON schema file and dialogue files for each dataset split.
In the following we will show an example for a service entry in the schema file.
* service_name
* description
* slots
* name
* description
* is_categorical
* possible values
* intents
* name
* description
* required_slots (not used)
* is_transactional (not used)
* optional_slots (not used)
* result_slots (not used)
In the following we will show an example for a dialogue.
* dialogue_id
* services
* turns
* frames
* actions
* act
* slot
* values
* service
* slots
* exclusive_end
* slot
* start
* state
* active_intent
* requeste_slots
* slot_values
* speaker - [USER, SYSTEM]
* utterance
"""
def __init__(
self, data_dir: str, dialogues_example_dir: str, tokenizer: object, cfg=None,
):
"""
Constructs DialogueSGDDataProcessor
Args:
data_dir: path to data directory
dialogues_example_dir: path to store processed dialogue examples
tokenizer: tokenizer object
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self.cfg = cfg
self._task_name = self.cfg.task_name # e.g. "sgd_single_domain"
self._subsample = self.cfg.subsample
all_schema_json_paths = []
for dataset_split in ['train', 'test', 'dev']:
all_schema_json_paths.append(os.path.join(self.cfg.data_dir, dataset_split, "schema.json"))
self.schemas = Schema(all_schema_json_paths)
self.schema_config = {
"MAX_NUM_CAT_SLOT": self.cfg.max_num_cat_slot,
"MAX_NUM_NONCAT_SLOT": self.cfg.max_num_noncat_slot,
"MAX_NUM_VALUE_PER_CAT_SLOT": self.cfg.max_value_per_cat_slot,
"MAX_NUM_INTENT": self.cfg.max_num_intent,
"NUM_TASKS": self.cfg.num_tasks,
"MAX_SEQ_LENGTH": self.cfg.max_seq_length,
}
train_file_range = FILE_RANGES[self._task_name]["train"]
dev_file_range = FILE_RANGES[self._task_name]["dev"]
test_file_range = FILE_RANGES[self._task_name]["test"]
self._file_ranges = {
"train": train_file_range,
"dev": dev_file_range,
"test": test_file_range,
}
self._seen_services = {
"train": set(),
"dev": set(),
"test": set(),
}
self._tokenizer = tokenizer
self._dialogues_example_dir = dialogues_example_dir
self.dial_files = {}
# slots_relation_list.np would contain the candidate list of slots for each (service, slot) which would be
# looked into when a switch between two services happens in the dialogue and we can not find any value for a slot in the current user utterance.
# This file would get generated from the dialogues in the training set.
self.slots_relation_file = os.path.join(
dialogues_example_dir, f"{self._task_name}_train_slots_relation_list.np"
)
for dataset in ["train", "dev", "test"]:
# Process dialogue files
dial_file = f"{self._task_name}_{dataset}_examples.json"
dial_file = os.path.join(dialogues_example_dir, dial_file)
self.dial_files[(self._task_name, dataset)] = dial_file
dialog_paths = DialogueSGDDataProcessor.get_dialogue_files(data_dir, dataset, self._task_name)
dialogs = DialogueSGDDataProcessor.load_dialogues(dialog_paths)
for dialog in dialogs:
self._seen_services[dataset].update(set(dialog['services']))
if is_global_rank_zero():
overwrite_dial_files = not self.cfg.use_cache
self.save_dialog_examples(overwrite_dial_files=overwrite_dial_files)
def save_dialog_examples(self, overwrite_dial_files: bool):
"""
Preprocesses dialogues and saves to disk.
Args:
overwrite_dial_files: whether or not to overwrite saved file if already exists
"""
for dataset in ["train", "dev", "test"]:
dial_file = self.dial_files[(self._task_name, dataset)]
if not os.path.exists(dial_file) or overwrite_dial_files:
logging.info(f"Start generating the dialogue examples for {dataset} dataset.")
if not os.path.exists(self._dialogues_example_dir):
os.makedirs(self._dialogues_example_dir)
dial_examples, slots_relation_list = self._generate_dialog_examples(
dataset, self.schemas, self._subsample
)
with open(dial_file, "w", encoding="UTF-8") as f:
json.dump([i.data for i in dial_examples], f)
if dataset == "train":
with open(self.slots_relation_file, "wb") as f:
pickle.dump(slots_relation_list, f)
logging.info(f"The slot carry-over list for train set is stored at {self.slots_relation_file}")
logging.info(f"The dialogue examples for {dataset} dataset saved at {dial_file}")
logging.info(f"Finish generating the dialogue examples for {dataset} dataset.")
# common interface for Data Processor
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
def get_dialog_examples(self, dataset_split: str) -> List[object]:
"""
Loads preprocessed dialogue examples from disk.
Args:
dataset_split: dataset split
Returns:
dial_examples: list of InputExample's.
"""
if (self._task_name, dataset_split) not in self.dial_files or not os.path.exists(
self.dial_files[(self._task_name, dataset_split)]
):
raise ValueError(
f"{dataset_split} dialogue examples were not processed for {self._task_name} task. Re-initialize SGDDataProcessor and add {dataset_split} dataset split to datasets arg."
)
dial_file = self.dial_files[(self._task_name, dataset_split)]
logging.info(f"Loading dialogue examples from {dial_file}.")
with open(dial_file, "rb") as f:
dial_examples = json.load(f)
dial_examples = [DialogueInputExample(i) for i in dial_examples]
if not os.path.exists(self.slots_relation_file):
raise ValueError(
f"Slots relation file {self.slots_relation_file} does not exist. It is needed for the carry-over mechanism of state tracker for switches between services."
)
if os.path.getsize(self.slots_relation_file) > 0:
with open(self.slots_relation_file, "rb") as f:
self.schemas._slots_relation_list = pickle.load(f)
logging.info(
f"Loaded the slot relation list for value carry-over between services from {self.slots_relation_file}."
)
return dial_examples
def get_seen_services(self, dataset_split: str):
"""
Returns list of seen services, i.e. both in given and training split
Args:
dataset_split: data split
Returns:
seen_services: list of seen services
"""
seen_services = self._seen_services[dataset_split]
return seen_services
def _generate_dialog_examples(self, dataset_split: str, schemas: object, subsample: bool):
"""
Returns a list of `InputExample`s of the data splits' dialogues.
Args:
dataset_split: data split, can be "train", "dev", or "test".
schemas: schema for all services of all datasets
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
"""
logging.info(f'Creating examples and slot relation list from the dialogues started...')
dialog_paths = [
os.path.join(self.data_dir, dataset_split, "dialogues_{:03d}.json".format(i))
for i in self._file_ranges[dataset_split]
]
dialogs = DialogueSGDDataProcessor.load_dialogues(dialog_paths)
examples = []
slot_carryover_candlist = collections.defaultdict(int)
for dialog_idx, dialog in enumerate(dialogs):
if dialog_idx % 1000 == 0:
logging.info(f'Processed {dialog_idx} dialogues.')
examples.extend(
self._create_examples_from_dialog(dialog, schemas, dataset_split, slot_carryover_candlist, subsample)
)
slots_relation_list = collections.defaultdict(list)
for slots_relation, relation_size in slot_carryover_candlist.items():
if relation_size > 0:
slots_relation_list[(slots_relation[0], slots_relation[1])].append(
(slots_relation[2], slots_relation[3], relation_size)
)
slots_relation_list[(slots_relation[2], slots_relation[3])].append(
(slots_relation[0], slots_relation[1], relation_size)
)
return examples, slots_relation_list
def _create_examples_from_dialog(
self, dialog: dict, schemas: object, dataset_split: str, slot_carryover_candlist: dict, subsample: bool
):
"""
Create examples for every turn in the dialogue.
Args:
dialog: dialogue example
schemas: schema for all services of all datasets
dataset_split: data split
slot_carryover_candlist: a dictionary to keep and count the number of carry-over cases between two slots from two different services
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
"""
dialog_id = dialog["dialogue_id"]
prev_states = {}
examples = []
for turn_idx, turn in enumerate(dialog["turns"]):
# Generate an example for every frame in every user turn.
if turn["speaker"] == "USER":
user_utterance = turn["utterance"]
user_frames = {f["service"]: f for f in turn["frames"]}
if self.cfg.system_utterance == 'prev_turn':
if turn_idx > 0:
system_turn = dialog["turns"][turn_idx - 1]
system_utterance = system_turn["utterance"]
system_frames = {f["service"]: f for f in system_turn["frames"]}
else:
system_utterance = ""
system_frames = {}
else: # takes the system utterance of the next turn
system_turn = dialog["turns"][turn_idx + 1]
system_utterance = system_turn["utterance"]
system_frames = {f["service"]: f for f in system_turn["frames"]}
turn_id = "{}-{}-{:02d}".format(dataset_split, dialog_id, turn_idx)
turn_examples, prev_states, slot_carryover_values = self._create_examples_from_turn(
turn_id,
system_utterance,
user_utterance,
system_frames,
user_frames,
prev_states,
schemas,
subsample,
)
examples.extend(turn_examples)
for value, slots_list in slot_carryover_values.items():
if value in ["True", "False"]:
continue
if len(slots_list) > 1:
for service1, slot1 in slots_list:
for service2, slot2 in slots_list:
if service1 == service2:
continue
if service1 > service2:
service1, service2 = service2, service1
slot1, slot2 = slot2, slot1
slot_carryover_candlist[(service1, slot1, service2, slot2)] += 1
return examples
def _get_state_update(self, current_state: dict, prev_state: dict) -> dict:
"""
Updates dialogue state
Args:
current_state: slot values pairs for the current dialogue turn
prev_state: slot values pairs for the previous dialogue turns
Returns:
state_update: slot values pairs that are added/updated during the current dialogue turn
"""
state_update = dict(current_state)
for slot, values in current_state.items():
if slot in prev_state and prev_state[slot][0] in values:
# Remove the slot from state if its value didn't change.
state_update.pop(slot)
return state_update
@staticmethod
def convert_camelcase_to_lower(label):
"""Converts camelcase to lowercase with spaces e.g. 'HelloWorld' --> 'hello world'"""
if label.lower() == "none":
return "none"
label = label.split("_")[0]
tokens = re.findall('[A-Z][^A-Z]*', label)
return ' '.join([token.lower() for token in tokens])
def preprocess_intent(self, intent, schemas, service):
if self.cfg.preprocess_intent_function == 'default':
return intent
elif self.cfg.preprocess_intent_function == 'lowercase':
return DialogueSGDDataProcessor.convert_camelcase_to_lower(intent)
elif self.cfg.preprocess_intent_function == 'description':
return schemas.get_service_schema(service).intent_descriptions[intent]
else:
raise ValueError(
'Only default, lowercase and description are allowed for model.dataset.preprocess_intent_function for SGD task'
)
def _create_examples_from_turn(
self,
turn_id: int,
system_utterance: str,
user_utterance: str,
system_frames: dict,
user_frames: dict,
prev_states: dict,
schemas: object,
subsample: bool,
):
"""
Creates an example for each frame in the user turn.
Args:
turn_id: turn number
system_utterance: last system utterance
user_utterance: lst user utterance
system_frames: all system utterances and slot - slot value pairs
user_frames: all user utterances and slot - slot value pairs
prev_states: slot - slot value pairs from the previous turns
schemas: schema for all services of all datasets
subsample: whether to balance postive and negative samples in the dataset
Returns:
examples: a list of `InputExample`s.
prev_states: updated dialogue state e.g. {'Restaurants_1': {'city': ['San Jose'], 'cuisine': ['American']}}
"""
system_user_utterance = system_utterance + ' ' + user_utterance
states = {}
examples = []
slot_carryover_values = collections.defaultdict(list)
for service, user_frame in user_frames.items():
state = user_frame["state"]["slot_values"]
state_update = self._get_state_update(state, prev_states.get(service, {}))
states[service] = state
system_frame = system_frames.get(service, None)
dataset_split, dialog_id, turn_id_ = turn_id.split('-')
dialog_id_1, dialog_id_2 = dialog_id.split('_')
example_id = f"{turn_id}-{service}"
example_id_num = [
int(dialog_id_1),
int(dialog_id_2),
int(turn_id_),
schemas.get_service_id(service),
]
intent = user_frames[service]["state"]['active_intent']
all_possible_slots = schemas.get_service_schema(service).slots
categorical_slots = schemas.get_service_schema(service).categorical_slots
one_example = {
"example_id": example_id,
"example_id_num": example_id_num,
"utterance": user_utterance,
"system_utterance": system_utterance,
"system_slots": {slot["slot"]: slot for slot in system_frame["slots"]}
if system_frame is not None
else None,
"system_actions": system_frame["actions"] if system_frame is not None else None,
"labels": {
"service": service,
"intent": self.preprocess_intent(intent, schemas, service),
"slots": {slot: state[slot] for slot in state_update},
},
"label_positions": {"slots": {slot["slot"]: slot for slot in user_frames[service]["slots"]}},
"possible_labels": {
"service": schemas.services,
"intent": [
self.preprocess_intent(intent, schemas, service)
for intent in schemas.get_service_schema(service).intents
],
"slots": {
slot: schemas.get_service_schema(service).get_categorical_slot_values(slot)
if slot in categorical_slots
else []
for slot in all_possible_slots
},
},
"description": {
"service": schemas.get_service_schema(service).description,
"intent": schemas.get_service_schema(service).intent_descriptions[intent],
"slots": {
slot: schemas.get_service_schema(service).slot_descriptions[slot] for slot in state_update
},
},
}
examples.append(DialogueInputExample(one_example))
if service not in prev_states and int(turn_id_) > 0:
for slot_name, values in state_update.items():
for value in values:
slot_carryover_values[value].append((service, slot_name))
for prev_service, prev_slot_value_list in prev_states.items():
if prev_service == service:
continue
if prev_service in state:
prev_slot_value_list = state[prev_service]
for prev_slot_name, prev_values in prev_slot_value_list.items():
for prev_value in prev_values:
slot_carryover_values[prev_value].append((prev_service, prev_slot_name))
return examples, states, slot_carryover_values
def _find_subword_indices(
self,
slot_values: dict,
utterance: str,
char_slot_spans: dict,
alignments: List[int],
subwords: List[str],
bias: int,
) -> dict:
"""
Find indices for subwords corresponding to slot values.
Args:
slot_values: slot - slot value pairs
utterance: utterance
char_slot_spans: char - slot spans
alignments: alignments
subwords: subtokens mapping
bias: offset
Returns:
span_boundaries: span boundaries
"""
span_boundaries = {}
for slot, values in slot_values.items():
# Get all values present in the utterance for the specified slot.
value_char_spans = {}
for slot_span in char_slot_spans:
if slot_span["slot"] == slot:
value = utterance[slot_span["start"] : slot_span["exclusive_end"]]
start_tok_idx = alignments[slot_span["start"]]
end_tok_idx = alignments[slot_span["exclusive_end"] - 1]
if 0 <= start_tok_idx < len(subwords):
end_tok_idx = min(end_tok_idx, len(subwords) - 1)
value_char_spans[value] = (start_tok_idx + bias, end_tok_idx + bias)
for v in values:
if v in value_char_spans:
span_boundaries[slot] = value_char_spans[v]
break
return span_boundaries
@classmethod
def load_dialogues(cls, dialog_json_filepaths: List[str]) -> List[dict]:
"""
Obtain the list of all dialogues from specified json files.
Args:
dialog_json_filepaths: list of json files
Returns:
dialogs: the list of all dialogues
"""
dialogs = []
for dialog_json_filepath in sorted(dialog_json_filepaths):
with open(dialog_json_filepath, 'r', encoding="UTF-8") as f:
dialogs.extend(json.load(f))
f.close()
return dialogs
@classmethod
def get_dialogue_files(cls, data_dir: str, dataset_split: str, task_name: str):
"""
Obtain the list of all dialogue json files
Args:
data_dir: path to the data folder
dataset_split: data split
task_name: SGD task name, see keys of the FILE_RANGES
Returns:
dialog: the list of all dialogue json files paths
"""
return [
os.path.join(data_dir, dataset_split, 'dialogues_{:03d}.json'.format(fid))
for fid in FILE_RANGES[task_name][dataset_split]
]
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/sgd_data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from nemo.collections.nlp.data.data_utils.data_preprocessing import DataProcessor
__all__ = ['DialogueDataProcessor']
class DialogueDataProcessor(DataProcessor):
"""
Base class for Data Processing for all data sources
Data Processor is designed to be Model-independent (but Data-dependent) so that
- Encourages experimentation with a variety of models \
(BERT-style; GPT-style; T5-style), \
which have different tokenization/preprocessing requirements
- Facilitates experiments with a variety of data sources,
as data is processed into a common format
Roles
1. Processes raw files into Dialogue Input Examples.
2. Keeps all possibly relevant information from the raw files, which
the Dataset class can then determine which labels to use
"""
def __init__(self):
raise NotImplementedError()
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
raise NotImplementedError()
@staticmethod
def get_relevant_idxs(dataset_split, n_samples, dev_proportion):
"""
Obtain indexes for each dataset_split, when train and dev sets are not in separate files
Args:
dataset_split: train, dev or test
n_samples: total number of samples
dev_proportion: value from 1 to 99 that represent proportion of data in dev set
Returns:
idxs: indices for relevant samples
"""
if dataset_split in ["train", "dev"]:
n_dev = int(n_samples * (dev_proportion / 100))
dev_idxs = random.sample(list(range(n_samples)), n_dev)
if dataset_split == "dev":
idxs = dev_idxs
else:
dev_idxs_set = set(dev_idxs)
train_idxs = [idx for idx in list(range(n_samples)) if idx not in dev_idxs_set]
idxs = train_idxs
elif dataset_split == "test":
idxs = list(range(n_samples))
else:
raise ValueError("please select dataset split from train, dev and test")
return idxs
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ast import literal_eval
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueMSMarcoDataProcessor']
class DialogueMSMarcoDataProcessor(DialogueDataProcessor):
"""Data Processor for MS Marco dialogues. (https://github.com/microsoft/MSMARCO-Question-Answering)
Please agree to the Terms of Use before downloading data at
https://msmarco.blob.core.windows.net/msmarco/train_v2.1.json.gz
https://msmarco.blob.core.windows.net/msmarco/dev_v2.1.json.gz
"""
def __init__(self, data_dir: str, tokenizer: object, cfg=None):
"""
Constructs DialogueMSMarcoDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
debug_mode: reduce number of samples to load in order to increase speed of processing
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
def open_json(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
data = json.load(f)
return data
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the MS Marco dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
Dev set contains self.cfg.dev_proportion % of samples with the rest going into the train set
"""
examples = []
dataset_split_print = {"train": "train", "dev": "train", "test": "dev"}
raw_examples = self.open_json("{}_v2.1.json".format(dataset_split_print[dataset_split]))
n_samples = len(raw_examples['answers'])
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, self.cfg.dev_proportion)
if self.cfg.debug_mode:
idxs = idxs[:100]
for i in idxs:
utterance = raw_examples['query'][str(i)]
# answer need not be extracted from passage
# taking the first answer as the ground truth correct answer as only <1% has multiple answers
answer = raw_examples['answers'][str(i)]
answer = answer[0] if isinstance(answer, list) else answer
well_formed_answer = raw_examples['wellFormedAnswers'][str(i)]
well_formed_answer = (
well_formed_answer if isinstance(well_formed_answer, list) else literal_eval(well_formed_answer)
)
well_formed_answer = well_formed_answer[0] if well_formed_answer else None
query_type = raw_examples['query_type'][str(i)]
candidate_passages = raw_examples['passages'][str(i)]
passage = [
candidate_passage["passage_text"]
for candidate_passage in candidate_passages
if int(candidate_passage["is_selected"])
]
passage = passage[0] if passage else None
possible_passages = [candidate_passage["passage_text"] for candidate_passage in candidate_passages]
input_example = {
"utterance": utterance,
"example_id": i,
"labels": {
"service": query_type,
"response": answer,
"fluent_response": well_formed_answer,
"passage": passage,
},
"possible_labels": {
"service": "LOCATION,NUMERIC,PERSON,DESCRIPTION,ENTITY".split(','),
"passage": possible_passages,
},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/ms_marco_data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueAssistantDataProcessor']
class DialogueAssistantDataProcessor(DialogueDataProcessor):
"""Data Processor for Assistant dialogues."""
def __init__(self, data_dir: str, tokenizer: object, cfg):
"""
Constructs DialogueAssistantDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
self.intents = self.open_file("dict.intents.csv")
if self.cfg.preprocess_intent_function == 'remove_domain':
self.intents = [
DialogueAssistantDataProcessor.normalize_zero_shot_intent(intent) for intent in self.intents
]
self.slots = self.open_file("dict.slots.csv")
(
bio_slot_ids_to_unified_slot_ids,
unified_slots,
) = DialogueAssistantDataProcessor.map_bio_format_slots_to_unified_slots(self.slots)
self.slots = unified_slots
self.bio_slot_ids_to_unified_slot_ids = bio_slot_ids_to_unified_slot_ids
self.services = sorted(list(set([intent.split('_')[0] for intent in self.intents])))
self.empty_slot_id = [str(idx) for idx, slot_name in enumerate(self.slots) if slot_name == "O"][0]
@staticmethod
def normalize_zero_shot_intent(label):
label = label.split('.')[1]
if label == 'nomatch':
return 'no match'
else:
return label.replace('_', ' ')
def open_file(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
lines = [i.strip() for i in f.readlines()]
return lines
@staticmethod
def get_continuous_slots(slot_ids, empty_slot_id, bio_slot_ids_to_unified_slot_ids):
"""
Extract continuous spans of slot_ids
To accomodate slots with distinct labels for B-label1 and I-label1,
slot_id = self.bio_slot_ids_to_unified_slot_ids[slot_id] is called to map them both to label1
Args:
Slot: list of int representing slot of each word token
For instance, 54 54 54 54 54 54 54 54 18 54 44 44 54 46 46 54 12
Corresponds to "please set an alarm clock for my next meeting with the team at three pm next friday"
Except for the empty_slot_id (54 in this case), we hope to extract the continuous spans of tokens,
each containing a start position and an exclusive end position
E.g {18: [9, 10], 44: [11, 13], 46: [14, 16], 12: [17, 18]}
"""
slot_id_stack = []
position_stack = []
for i in range(len(slot_ids)):
slot_id = slot_ids[i]
slot_id = bio_slot_ids_to_unified_slot_ids[slot_id]
if not slot_id_stack or slot_id != slot_id_stack[-1]:
slot_id_stack.append(slot_id)
position_stack.append([])
position_stack[-1].append(i)
slot_id_to_start_and_exclusive_end = {
slot_id_stack[i]: [position_stack[i][0], position_stack[i][-1] + 1]
for i in range(len(position_stack))
if slot_id_stack[i] != empty_slot_id
}
return slot_id_to_start_and_exclusive_end
@staticmethod
def map_bio_format_slots_to_unified_slots(slots):
"""
maps BIO format slots to unified slots (meaning that B-alarm_time and I-alarm_time both map to alarm_time)
called even slots does not contain BIO, for unified interface
in that case slots == unified_slots and bio_slot_ids_to_unified_slot_ids is an identity mapping i.e. {"0": "0", "1": "1"}
"""
bio_slot_ids_to_unified_slot_ids = {}
unified_slots = []
unified_idx = -1
for idx, slot in enumerate(slots):
if slot.replace('I-', '').replace('B-', '') not in unified_slots:
unified_idx += 1
unified_slots.append(slot.replace('I-', '').replace('B-', ''))
bio_slot_ids_to_unified_slot_ids[str(idx)] = str(unified_idx)
return bio_slot_ids_to_unified_slot_ids, unified_slots
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the assistant dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
This is done by taking every 10th example and putting it into the dev set,
with all other examples going into the new train set.
"""
examples = []
dataset_split_print = {"train": "train", "dev": "train", "test": "test"}
raw_examples_intent = self.open_file("{}.tsv".format(dataset_split_print[dataset_split]))
# removes header of tsv file
raw_examples_intent = raw_examples_intent[1:]
raw_examples_slots = self.open_file("{}_slots.tsv".format(dataset_split_print[dataset_split]))
if dataset_split in ["train", "dev"]:
train_idx = []
dev_idx = []
for idx in range(len(raw_examples_intent)):
if idx % 10 == 0:
dev_idx.append(idx)
else:
train_idx.append(idx)
if dataset_split == "train":
raw_examples_intent = [raw_examples_intent[idx] for idx in train_idx]
raw_examples_slots = [raw_examples_slots[idx] for idx in train_idx]
elif dataset_split == "dev":
raw_examples_intent = [raw_examples_intent[idx] for idx in dev_idx]
raw_examples_slots = [raw_examples_slots[idx] for idx in dev_idx]
for i in range(len(raw_examples_intent)):
utterance, intent_id = raw_examples_intent[i].split('\t')
slot_ids = raw_examples_slots[i].split()
utterance_tokens = utterance.split()
intent = self.intents[int(intent_id)]
slot_id_to_start_and_exclusive_end = DialogueAssistantDataProcessor.get_continuous_slots(
slot_ids, self.empty_slot_id, self.bio_slot_ids_to_unified_slot_ids
)
slot_to_start_and_exclusive_end = {
self.slots[int(slot_id)]: position for slot_id, position in slot_id_to_start_and_exclusive_end.items()
}
slot_to_words = {
slot: ' '.join(utterance_tokens[position[0] : position[1]])
for slot, position in slot_to_start_and_exclusive_end.items()
}
input_example = {
"utterance": utterance,
"labels": {"service": intent.split('_')[0], "intent": intent, "slots": slot_to_words},
"label_positions": {
"slots": {
slot: {"start": position[0], "exclusive_end": position[1], "slot": slot,}
for slot, position in slot_to_start_and_exclusive_end.items()
}
},
"possible_labels": {
"service": self.services,
"intent": self.intents,
"slots": {
# this dataset does not support categorical slots (i.e. only extractive slots)
# therefore use empty list for all values
slot: []
for slot in self.slots
},
},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/assistant_data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pandas as pd
from nemo.collections.nlp.data.dialogue.data_processor.data_processor import DialogueDataProcessor
from nemo.collections.nlp.data.dialogue.input_example.input_example import DialogueInputExample
__all__ = ['DialogueMellonQADataProcessor']
class DialogueMellonQADataProcessor(DialogueDataProcessor):
"""Data Processor for Mellon QA dialogues.
"""
def __init__(self, data_dir: str, tokenizer: object, cfg=None):
"""
Constructs DialogueMSMarcoDataProcessor
Args:
data_dir: path to data directory
tokenizer: tokenizer object
cfg: cfg container for dataset
"""
self.data_dir = data_dir
self._tokenizer = tokenizer
self.cfg = cfg
def open_csv(self, filename):
"""
Reads file into a list
"""
filename = os.path.join(self.data_dir, filename)
with open(filename, "r", encoding="UTF-8") as f:
df = pd.read_csv(filename)
return df.to_dict(orient='index')
def get_dialog_examples(self, dataset_split: str):
"""
Process raw files into DialogueInputExample
Args:
dataset_split: {train, dev, test}
For the Mellon QA dataset, there is no explicit dev set (instead uses the test set as the dev set)
Therefore, this function creates a dev set and a new train set from the train set.
Dev set contains self.cfg.dev_proportion % of samples with the rest going into the train set
Test set contains the whole dataset (Dev + Train) as this dataset is small (~100) and primarily used in a zero shot setting
"""
examples = []
raw_examples = self.open_csv('mellon_qa_data.csv')
raw_examples = list(raw_examples.values())
# filter out answers with no answer
raw_examples = [
example
for example in raw_examples
if isinstance(example['Non Generative Question Answering '], str)
and isinstance(example['Generative Question Answering '], str)
]
n_samples = len(raw_examples)
idxs = DialogueDataProcessor.get_relevant_idxs(dataset_split, n_samples, self.cfg.dev_proportion)
for i in idxs:
utterance = str(raw_examples[i]['Question'])
answer = str(raw_examples[i]['Non Generative Question Answering '])
well_formed_answer = str(raw_examples[i]['Generative Question Answering '])
passage = raw_examples[i]['Passage']
input_example = {
"utterance": utterance,
"example_id": i,
"labels": {"response": answer, "fluent_response": well_formed_answer, "passage": passage,},
}
example = DialogueInputExample(input_example)
examples.append(example)
return examples
def get_train_examples(self):
"""Gets a collection of `InputExample`s for the train set."""
return self.get_dialog_examples("train")
def get_dev_examples(self):
"""Gets a collection of `InputExample`s for the dev set."""
return self.get_dialog_examples("dev")
def get_test_examples(self):
"""Gets a collection of `InputExample`s for the test set."""
return self.get_dialog_examples("test")
| NeMo-main | nemo/collections/nlp/data/dialogue/data_processor/mellon_qa_data_processor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.dialogue.sgd.evaluate import evaluate, get_in_domain_services
from nemo.collections.nlp.data.dialogue.sgd.schema import Schema
| NeMo-main | nemo/collections/nlp/data/dialogue/sgd/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Prediction and evaluation-related utility functions.
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/baseline/pred_utils.py
"""
import json
import os
from collections import OrderedDict, defaultdict
from typing import Dict, List, Optional
from nemo.collections.nlp.data.dialogue.input_example.sgd_input_example import (
STATUS_ACTIVE,
STATUS_DONTCARE,
STR_DONTCARE,
)
from nemo.utils import logging
REQ_SLOT_THRESHOLD = 0.5
__all__ = ['write_predictions_to_file']
def set_cat_slot(predictions_status: dict, predictions_value: dict, cat_slot_values: Dict[str, List[str]]) -> dict:
"""
Extract predicted categorical slot information
Args:
predictions_status: predicted statuses
predictions_value: predicted slot values
cat_slot_values: possible categorical slots and their potential values for this service
Returns:
out_dict: predicted slot value pairs
"""
out_dict = {}
for slot_idx, slot in enumerate(cat_slot_values):
slot_status = predictions_status[slot_idx][0]["cat_slot_status"]
if slot_status == STATUS_DONTCARE:
out_dict[slot] = STR_DONTCARE
elif slot_status == STATUS_ACTIVE:
tmp = predictions_value[slot_idx]
value_idx = max(tmp, key=lambda k: tmp[k]['cat_slot_value_status'][0].item())
out_dict[slot] = cat_slot_values[slot][value_idx]
return out_dict
def set_noncat_slot(
predictions_status: dict,
predictions_value: dict,
non_cat_slots: List[str],
user_utterance: str,
sys_slots_agg: Optional[dict] = None,
) -> dict:
"""
Extract predicted non categorical slot information
Args:
predictions_status: predicted statuses
predictions_value: predicted slot values
non_cat_slots: list of possible non categorical slots for this service
user_utterance: system and user utterance
sys_slots_agg: system retrieval lookup table. Contains for each slot the most recent value seen in the history
Returns:
out_dict: predicted slot value pairs
"""
out_dict = {}
for slot_idx, slot in enumerate(non_cat_slots):
slot_status = predictions_status[slot_idx][0]["noncat_slot_status"]
if slot_status == STATUS_DONTCARE:
out_dict[slot] = STR_DONTCARE
elif slot_status == STATUS_ACTIVE:
tok_start_idx = predictions_value[slot_idx][0]["noncat_slot_start"]
tok_end_idx = predictions_value[slot_idx][0]["noncat_slot_end"]
ch_start_idx = predictions_value[slot_idx][0]["noncat_alignment_start"][tok_start_idx]
ch_end_idx = predictions_value[slot_idx][0]["noncat_alignment_end"][tok_end_idx]
if ch_start_idx > 0 and ch_end_idx > 0:
# Add span from the utterance.
out_dict[slot] = user_utterance[ch_start_idx - 1 : ch_end_idx]
elif sys_slots_agg and slot in sys_slots_agg:
# system retrieval
out_dict[slot] = sys_slots_agg[slot]
return out_dict
def get_predicted_dialog(dialog: dict, all_predictions: dict, schemas: object, state_tracker: str) -> dict:
"""Overwrite the labels in the turn with the predictions from the model. For test set, these labels are missing from the data and hence they are added.
Args:
dialog: ground truth dialog
all_predictions: predictions
schemas: schema object of all services of all datasets
state_tracker: state tracker option, e.g. nemotracker
Returns:
dialog: dialog overwritten with prediction information
"""
dialog_id = dialog["dialogue_id"]
if state_tracker == "baseline":
sys_slots_agg = {}
else:
sys_slots_agg = defaultdict(OrderedDict)
all_slot_values = defaultdict(dict)
for turn_idx, turn in enumerate(dialog["turns"]):
if turn["speaker"] == "SYSTEM" and state_tracker == 'nemotracker':
for frame in turn["frames"]:
if frame["service"] not in sys_slots_agg:
sys_slots_agg[frame["service"]] = OrderedDict()
for action in frame["actions"]:
if action["slot"] and len(action["values"]) > 0:
sys_slots_agg[frame["service"]][action["slot"]] = action["values"][0]
if turn["speaker"] == "USER":
user_utterance = turn["utterance"]
system_utterance = dialog["turns"][turn_idx - 1]["utterance"] if turn_idx else ""
system_user_utterance = system_utterance + ' ' + user_utterance
turn_id = "{:02d}".format(turn_idx)
for frame in turn["frames"]:
predictions = all_predictions[(dialog_id, turn_id, frame["service"])]
slot_values = all_slot_values[frame["service"]]
service_schema = schemas.get_service_schema(frame["service"])
# Remove the slot spans and state if present.
frame.pop("slots", None)
frame.pop("state", None)
# The baseline model doesn't predict slot spans. Only state predictions
# are added.
state = {}
# Add prediction for active intent. No Offset is subtracted since schema has now NONE intent at index 0
state["active_intent"] = get_predicted_intent(
predictions=predictions[0], intents=service_schema.intents
)
# Add prediction for requested slots.
state["requested_slots"] = get_requested_slot(predictions=predictions[1], slots=service_schema.slots)
# Add prediction for user goal (slot values).
# Categorical slots.
cat_out_dict = set_cat_slot(
predictions_status=predictions[2],
predictions_value=predictions[3],
cat_slot_values=service_schema.categorical_slot_values,
)
for k, v in cat_out_dict.items():
slot_values[k] = v
# Non-categorical slots.
noncat_out_dict = set_noncat_slot(
predictions_status=predictions[4],
predictions_value=predictions[5],
non_cat_slots=service_schema.non_categorical_slots,
user_utterance=system_user_utterance,
sys_slots_agg=sys_slots_agg.get(frame["service"], None),
)
for k, v in noncat_out_dict.items():
slot_values[k] = v
# Create a new dict to avoid overwriting the state in previous turns
# because of use of same objects.
state["slot_values"] = {s: [v] for s, v in slot_values.items()}
frame["state"] = state
return dialog
def get_predicted_intent(predictions: dict, intents: List[str]) -> str:
"""
Returns intent name with maximum score
Args:
predictions: predictions
intents: list of possible intents for this service
Returns:
intent: predicted intent
"""
assert len(predictions) == len(intents)
active_intent_id = max(predictions, key=lambda k: predictions[k][0]['intent_status'])
intent = intents[active_intent_id]
return intent
def get_requested_slot(predictions: dict, slots: List[str]) -> List[str]:
"""
Returns list of slots which are predicted to be requested
Args:
predictions: predictions
slots: list of possible slots
Returns:
requested_slots: list of requested slots
"""
active_indices = [k for k in predictions if predictions[k][0]["req_slot_status"] > REQ_SLOT_THRESHOLD]
requested_slots = list(map(lambda k: slots[k], active_indices))
return requested_slots
def write_predictions_to_file(
predictions: List[dict],
input_json_files: List[str],
output_dir: str,
schemas: object,
state_tracker: str,
eval_debug: bool,
in_domain_services: set,
):
"""Save predicted dialogues as json files.
Args:
predictions: An iterator containing model predictions. This is the output of
the predict method in the estimator.
input_json_files: A list of json paths containing the dialogues to run
inference on.
output_dir: The directory where output json files will be created.
schemas: Schemas to all services in the dst dataset
state_tracker: state tracker option
eval_debug: output evaluation debugging information
in_domain_services: in domain services
"""
logging.info(f"Writing predictions to {output_dir} started.")
# Index all predictions.
all_predictions = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
for idx, prediction in enumerate(predictions):
eval_dataset, dialog_id, turn_id, service_name, model_task, slot_intent_id, value_id = prediction[
'example_id'
].split('-')
all_predictions[(dialog_id, turn_id, service_name)][int(model_task)][int(slot_intent_id)][
int(value_id)
] = prediction
logging.info(f'Predictions for {idx} examples in {eval_dataset} dataset are getting processed.')
# Read each input file and write its predictions.
for input_file_path in input_json_files:
with open(input_file_path, encoding="UTF-8") as f:
dialogs = json.load(f)
logging.debug(f'{input_file_path} file is loaded')
pred_dialogs = []
for d in dialogs:
pred_dialog = get_predicted_dialog(d, all_predictions, schemas, state_tracker)
pred_dialogs.append(pred_dialog)
input_file_name = os.path.basename(input_file_path)
output_file_path = os.path.join(output_dir, input_file_name)
with open(output_file_path, "w", encoding="UTF-8") as f:
json.dump(pred_dialogs, f, indent=2, separators=(",", ": "), sort_keys=True)
| NeMo-main | nemo/collections/nlp/data/dialogue/sgd/prediction_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluate predictions JSON file, w.r.t. ground truth file.
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/evaluate.py
"""
import collections
import glob
import json
import os
import numpy as np
from nemo.collections.nlp.metrics.sgd_metrics import (
ACTIVE_INTENT_ACCURACY,
JOINT_CAT_ACCURACY,
JOINT_GOAL_ACCURACY,
JOINT_NONCAT_ACCURACY,
NAN_VAL,
REQUESTED_SLOTS_F1,
REQUESTED_SLOTS_PRECISION,
REQUESTED_SLOTS_RECALL,
SLOT_TAGGING_F1,
SLOT_TAGGING_PRECISION,
SLOT_TAGGING_RECALL,
get_active_intent_accuracy,
get_average_and_joint_goal_accuracy,
get_requested_slots_f1,
get_slot_tagging_f1,
)
from nemo.utils import logging
__all__ = ['get_in_domain_services']
ALL_SERVICES = "#ALL_SERVICES"
SEEN_SERVICES = "#SEEN_SERVICES"
UNSEEN_SERVICES = "#UNSEEN_SERVICES"
# Name of the file containing all predictions and their corresponding frame metrics.
PER_FRAME_OUTPUT_FILENAME = "dialogues_and_metrics.json"
def get_service_set(schema_path: str) -> set:
"""
Get the set of all services present in a schema.
Args:
schema_path: schema file path
Returns:
service_set: set of services in file
"""
service_set = set()
with open(schema_path, encoding="UTF-8") as f:
schema = json.load(f)
for service in schema:
service_set.add(service["service_name"])
f.close()
return service_set
def get_in_domain_services(schema_path: str, service_set: set) -> set:
"""Get the set of common services between a schema and set of services.
Args:
schema_path: path to schema file
service_set: set of services
Returns:
joint_services: joint services between schema path file and service set
"""
joint_services = get_service_set(schema_path) & service_set
return joint_services
def get_dataset_as_dict(file_path_patterns) -> dict:
"""Read the DSTC8/SGD json dialogue data as dictionary with dialog ID as keys.
Args:
file_path_patterns: list or directory of files
Returns:
dataset_dict: dataset dictionary with dialog ID as keys
"""
dataset_dict = {}
if isinstance(file_path_patterns, list):
list_fp = file_path_patterns
else:
list_fp = sorted(glob.glob(file_path_patterns))
for fp in list_fp:
if PER_FRAME_OUTPUT_FILENAME in fp:
continue
logging.debug("Loading file: %s", fp)
with open(fp, encoding="UTF-8") as f:
data = json.load(f)
if isinstance(data, list):
for dial in data:
dataset_dict[dial["dialogue_id"]] = dial
elif isinstance(data, dict):
dataset_dict.update(data)
f.close()
return dataset_dict
def get_metrics(
dataset_ref: dict,
dataset_hyp: dict,
service_schemas: dict,
in_domain_services: set,
joint_acc_across_turn: bool,
use_fuzzy_match: bool,
):
"""Calculate the DSTC8/SGD metrics.
Args:
dataset_ref: The ground truth dataset represented as a dict mapping dialogue id to the corresponding dialogue.
dataset_hyp: The predictions in the same format as `dataset_ref`.
service_schemas: A dict mapping service name to the schema for the service.
in_domain_services: The set of services which are present in the training set.
joint_acc_across_turn: Whether to compute joint accuracy across turn instead of across service. Should be set to True when conducting multiwoz style evaluation.
use_fuzzy_match: Whether to use fuzzy string matching when comparing non-categorical slot values. Should be set to False when conducting multiwoz style evaluation.
Returns:
all_metric_aggregate: A dict mapping a metric collection name to a dict containing the values
for various metrics. Each metric collection aggregates the metrics across a specific set of frames in the dialogues.
per_frame_metric: metrics aggregated for each frame
"""
# Metrics can be aggregated in various ways, eg over all dialogues, only for
# dialogues containing unseen services or for dialogues corresponding to a
# single service. This aggregation is done through metric_collections, which
# is a dict mapping a collection name to a dict, which maps a metric to a list
# of values for that metric. Each value in this list is the value taken by
# the metric on a frame.
metric_collections = collections.defaultdict(lambda: collections.defaultdict(list))
# Ensure the dialogs in dataset_hyp also occur in dataset_ref.
assert set(dataset_hyp.keys()).issubset(set(dataset_ref.keys()))
logging.debug("len(dataset_hyp)=%d, len(dataset_ref)=%d", len(dataset_hyp), len(dataset_ref))
# Store metrics for every frame for debugging.
per_frame_metric = {}
for dial_id, dial_hyp in dataset_hyp.items():
dial_ref = dataset_ref[dial_id]
if set(dial_ref["services"]) != set(dial_hyp["services"]):
raise ValueError(
"Set of services present in ground truth and predictions don't match "
"for dialogue with id {}".format(dial_id)
)
joint_metrics = [JOINT_GOAL_ACCURACY, JOINT_CAT_ACCURACY, JOINT_NONCAT_ACCURACY]
for turn_id, (turn_ref, turn_hyp) in enumerate(zip(dial_ref["turns"], dial_hyp["turns"])):
metric_collections_per_turn = collections.defaultdict(lambda: collections.defaultdict(lambda: 1.0))
if turn_ref["speaker"] != turn_hyp["speaker"]:
raise ValueError("Speakers don't match in dialogue with id {}".format(dial_id))
# Skip system turns because metrics are only computed for user turns.
if turn_ref["speaker"] != "USER":
continue
if turn_ref["utterance"] != turn_hyp["utterance"]:
logging.error("Ref utt: %s", turn_ref["utterance"])
logging.error("Hyp utt: %s", turn_hyp["utterance"])
raise ValueError("Utterances don't match for dialogue with id {}".format(dial_id))
hyp_frames_by_service = {frame["service"]: frame for frame in turn_hyp["frames"]}
# Calculate metrics for each frame in each user turn.
for frame_ref in turn_ref["frames"]:
service_name = frame_ref["service"]
if service_name not in hyp_frames_by_service:
raise ValueError(
"Frame for service {} not found in dialogue with id {}".format(service_name, dial_id)
)
service = service_schemas[service_name]
frame_hyp = hyp_frames_by_service[service_name]
active_intent_acc = get_active_intent_accuracy(frame_ref, frame_hyp)
slot_tagging_f1_scores = get_slot_tagging_f1(frame_ref, frame_hyp, turn_ref["utterance"], service)
requested_slots_f1_scores = get_requested_slots_f1(frame_ref, frame_hyp)
goal_accuracy_dict = get_average_and_joint_goal_accuracy(
frame_ref, frame_hyp, service, use_fuzzy_match
)
frame_metric = {
ACTIVE_INTENT_ACCURACY: active_intent_acc,
REQUESTED_SLOTS_F1: requested_slots_f1_scores.f1,
REQUESTED_SLOTS_PRECISION: requested_slots_f1_scores.precision,
REQUESTED_SLOTS_RECALL: requested_slots_f1_scores.recall,
}
if slot_tagging_f1_scores is not None:
frame_metric[SLOT_TAGGING_F1] = slot_tagging_f1_scores.f1
frame_metric[SLOT_TAGGING_PRECISION] = slot_tagging_f1_scores.precision
frame_metric[SLOT_TAGGING_RECALL] = slot_tagging_f1_scores.recall
frame_metric.update(goal_accuracy_dict)
frame_id = "{:s}-{:03d}-{:s}".format(dial_id, turn_id, frame_hyp["service"])
per_frame_metric[frame_id] = frame_metric
# Add the frame-level metric result back to dialogues.
frame_hyp["metrics"] = frame_metric
# Get the domain name of the service.
domain_name = frame_hyp["service"].split("_")[0]
domain_keys = [ALL_SERVICES, frame_hyp["service"], domain_name]
if frame_hyp["service"] in in_domain_services:
domain_keys.append(SEEN_SERVICES)
else:
domain_keys.append(UNSEEN_SERVICES)
for domain_key in domain_keys:
for metric_key, metric_value in frame_metric.items():
if metric_value != NAN_VAL:
if joint_acc_across_turn and metric_key in joint_metrics:
metric_collections_per_turn[domain_key][metric_key] *= metric_value
else:
metric_collections[domain_key][metric_key].append(metric_value)
if joint_acc_across_turn:
# Conduct multiwoz style evaluation that computes joint goal accuracy
# across all the slot values of all the domains for each turn.
for domain_key in metric_collections_per_turn:
for metric_key, metric_value in metric_collections_per_turn[domain_key].items():
metric_collections[domain_key][metric_key].append(metric_value)
all_metric_aggregate = {}
for domain_key, domain_metric_vals in metric_collections.items():
domain_metric_aggregate = {}
for metric_key, value_list in domain_metric_vals.items():
if value_list:
# Metrics are macro-averaged across all frames.
domain_metric_aggregate[metric_key] = round(float(np.mean(value_list)) * 100.0, 2)
else:
domain_metric_aggregate[metric_key] = NAN_VAL
all_metric_aggregate[domain_key] = domain_metric_aggregate
return all_metric_aggregate, per_frame_metric
def evaluate(
prediction_dir: str,
data_dir: str,
eval_dataset: str,
in_domain_services: set,
joint_acc_across_turn: bool,
use_fuzzy_match: bool,
) -> dict:
"""Calculate the DSTC8/SGD metrics for given data.
Args:
prediction_dir: prediction location
data_dir: ground truth data location.
eval_dataset: evaluation data split
in_domain_services: The set of services which are present in the training set.
joint_acc_across_turn: Whether to compute joint goal accuracy across turn instead of across service. Should be set to True when conducting multiwoz style evaluation.
use_fuzzy_match: Whether to use fuzzy string matching when comparing non-categorical slot values. Should be set to False when conducting multiwoz style evaluation.
Returns:
A dict mapping a metric collection name to a dict containing the values
for various metrics for all dialogues and all services
"""
with open(os.path.join(data_dir, eval_dataset, "schema.json"), encoding="UTF-8") as f:
eval_services = {}
list_services = json.load(f)
for service in list_services:
eval_services[service["service_name"]] = service
f.close()
dataset_ref = get_dataset_as_dict(os.path.join(data_dir, eval_dataset, "dialogues_*.json"))
dataset_hyp = get_dataset_as_dict(os.path.join(prediction_dir, "*.json"))
# has ALLSERVICE, SEEN_SERVICES, UNSEEN_SERVICES, SERVICE, DOMAIN
all_metric_aggregate, _ = get_metrics(
dataset_ref, dataset_hyp, eval_services, in_domain_services, joint_acc_across_turn, use_fuzzy_match
)
if SEEN_SERVICES in all_metric_aggregate:
logging.info(f'Dialog metrics for {SEEN_SERVICES} : {sorted(all_metric_aggregate[SEEN_SERVICES].items())}')
if UNSEEN_SERVICES in all_metric_aggregate:
logging.info(f'Dialog metrics for {UNSEEN_SERVICES}: {sorted(all_metric_aggregate[UNSEEN_SERVICES].items())}')
if ALL_SERVICES in all_metric_aggregate:
logging.info(f'Dialog metrics for {ALL_SERVICES} : {sorted(all_metric_aggregate[ALL_SERVICES].items())}')
# Write the per-frame metrics values with the corrresponding dialogue frames.
with open(os.path.join(prediction_dir, PER_FRAME_OUTPUT_FILENAME), "w", encoding="UTF-8") as f:
json.dump(dataset_hyp, f, indent=2, separators=(",", ": "))
f.close()
return all_metric_aggregate[ALL_SERVICES]
| NeMo-main | nemo/collections/nlp/data/dialogue/sgd/evaluate.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers for schemas of different services.
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/google-research/blob/master/schema_guided_dst/schema.py
"""
import json
from typing import List, Optional, Union
from nemo.utils import logging
__all__ = ['Schema']
class ServiceSchema(object):
"""A wrapper for schema for a service."""
def __init__(self, schema_json: dict, service_id: Optional[int] = None):
"""
Constructor for ServiceSchema.
Args:
schema_json: schema json dict
service_id: service ID
"""
self._service_name = schema_json["service_name"]
self._description = schema_json["description"]
self._schema_json = schema_json
self._service_id = service_id
# Construct the vocabulary for intents, slots, categorical slots,
# non-categorical slots and categorical slot values.
self._intents = ["NONE"] + sorted(i["name"] for i in schema_json["intents"])
self._intent_descriptions = {i["name"]: i["description"] for i in schema_json["intents"]}
self._intent_descriptions["NONE"] = "none"
self._slots = sorted(s["name"] for s in schema_json["slots"])
self._slots_descriptions = {s["name"]: s["description"] for s in schema_json["slots"]}
self._categorical_slots = sorted(
s["name"] for s in schema_json["slots"] if s["is_categorical"] and s["name"] in self.state_slots
)
self._non_categorical_slots = sorted(
s["name"] for s in schema_json["slots"] if not s["is_categorical"] and s["name"] in self.state_slots
)
slot_schemas = {s["name"]: s for s in schema_json["slots"]}
categorical_slot_values = {}
categorical_slot_value_ids = {}
categorical_slot_ids = {}
non_categorical_slot_ids = {}
for slot_id, slot in enumerate(self._categorical_slots):
slot_schema = slot_schemas[slot]
values = sorted(slot_schema["possible_values"])
categorical_slot_values[slot] = values
value_ids = {value: idx for idx, value in enumerate(values)}
categorical_slot_value_ids[slot] = value_ids
categorical_slot_ids[slot] = slot_id
for slot_id, slot in enumerate(self._non_categorical_slots):
non_categorical_slot_ids[slot] = slot_id
self._categorical_slot_values = categorical_slot_values
self._categorical_slot_value_ids = categorical_slot_value_ids
self._categorical_slot_ids = categorical_slot_ids
self._non_categorical_slot_ids = non_categorical_slot_ids
@property
def schema_json(self) -> dict:
"""Returns schema json dictionary"""
return self._schema_json
@property
def state_slots(self) -> set:
"""Set of slots which are permitted to be in the dialogue state."""
state_slots = set()
for intent in self._schema_json["intents"]:
state_slots.update(intent["required_slots"])
state_slots.update(intent["optional_slots"])
return state_slots
@property
def service_name(self):
return self._service_name
@property
def service_id(self):
return self._service_id
@property
def description(self):
return self._description
@property
def slots(self):
return self._slots
@property
def intents(self):
return self._intents
@property
def intent_descriptions(self):
return self._intent_descriptions
@property
def slot_descriptions(self):
return self._slots_descriptions
@property
def categorical_slots(self):
return self._categorical_slots
@property
def non_categorical_slots(self):
return self._non_categorical_slots
@property
def categorical_slot_values(self):
return self._categorical_slot_values
def get_categorical_slot_values(self, slot):
return self._categorical_slot_values[slot]
def get_slot_from_id(self, slot_id):
return self._slots[slot_id]
def get_intent_from_id(self, intent_id):
return self._intents[intent_id]
def get_categorical_slot_from_id(self, slot_id):
return self._categorical_slots[slot_id]
def get_non_categorical_slot_from_id(self, slot_id):
return self._non_categorical_slots[slot_id]
def get_categorical_slot_value_from_id(self, slot_id, value_id):
slot = self._categorical_slots[slot_id]
return self._categorical_slot_values[slot][value_id]
def get_categorical_slot_value_id(self, slot, value):
return self._categorical_slot_value_ids[slot][value]
def get_categorical_slot_id(self, slot):
return self._categorical_slot_ids[slot]
def get_non_categorical_slot_id(self, slot):
return self._non_categorical_slot_ids[slot]
class Schema(object):
"""Wrapper for schemas for all services in a dataset."""
def __init__(self, schema_json_paths: Union[str, List[str]]):
"""
schema_json_paths: list of .json path to schema files of a single str with path to the json file.
"""
# Load the schema from the json file.
if isinstance(schema_json_paths, str):
with open(schema_json_paths, "r") as f:
all_schemas = json.load(f)
f.close()
else:
# load multiple schemas from the list of the json files
all_schemas = []
completed_services = []
for schema_json_path in schema_json_paths:
with open(schema_json_path, "r") as f:
schemas = json.load(f)
f.close()
logging.debug("Num of services in %s: %s", schema_json_path, len(schemas))
for service in schemas:
if service['service_name'] not in completed_services:
completed_services.append(service['service_name'])
all_schemas.append(service)
self._services = sorted(schema["service_name"] for schema in all_schemas)
self._services_vocab = {v: k for k, v in enumerate(self._services)}
self._services_id_to_vocab = {v: k for k, v in self._services_vocab.items()}
service_schemas = {}
for schema in all_schemas:
service = schema["service_name"]
service_schemas[service] = ServiceSchema(schema, service_id=self.get_service_id(service))
self._service_schemas = service_schemas
self._schemas = all_schemas
self._slots_relation_list = {}
def get_service_id(self, service: str):
return self._services_vocab[service]
def get_service_from_id(self, service_id: int):
return self._services[service_id]
def get_service_schema(self, service: str):
return self._service_schemas[service]
@property
def services(self):
return self._services
def save_to_file(self, file_path):
"""
Saves schema object to file
Args:
file_path: path to store schema object at
"""
with open(file_path, "w") as f:
json.dump(self._schemas, f, indent=2)
| NeMo-main | nemo/collections/nlp/data/dialogue/sgd/schema.py |
# Copyright 2019 The Google Research Authors.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/lasertagger/blob/master/bert_example.py
"""
import logging
from collections import OrderedDict
from os import path
from typing import Dict, List, Optional, Tuple, Union
from transformers import PreTrainedTokenizerBase
from nemo.collections.nlp.data.text_normalization_as_tagging.tagging import EditingTask, Tag
from nemo.collections.nlp.data.text_normalization_as_tagging.utils import yield_sources_and_targets
"""Build BERT Examples from source, target pairs.
The difference from the original Lasertagger approach is that our target already consists of tags,
so the preprocesssing is trivial.
"""
class BertExample(object):
"""Class for training and inference examples for BERT.
Attributes:
editing_task: The EditingTask from which this example was created. Needed
when realizing labels predicted for this example.
features: Feature dictionary.
"""
def __init__(
self,
input_ids: List[int],
input_mask: List[int],
segment_ids: List[int],
labels_mask: List[int],
tag_labels: List[int],
semiotic_labels: List[int],
semiotic_spans: List[Tuple[int, int, int]],
token_start_indices: List[int],
task: EditingTask,
default_label: int,
) -> None:
"""Inputs to the example wrapper
Args:
input_ids: indices of tokens which constitute batches of masked text segments
input_mask: bool tensor with 0s in place of source tokens to be masked
segment_ids: bool tensor with 0's and 1's to denote the text segment type
tag_labels: indices of tokens which should be predicted from each of the
corresponding input tokens
labels_mask: bool tensor with 0s in place of label tokens to be masked
token_start_indices: the indices of the WordPieces that start a token.
semiotic_labels: indices of semiotic classes which should be predicted from each of the
corresponding input tokens
semiotic_spans: list of tuples (class_id, start_wordpiece_idx, end_wordpiece_idx), end is exclusive
task: Example Text-Editing Task used by the LaserTagger model during inference.
default_label: The default label for the KEEP tag-ID
"""
input_len = len(input_ids)
if not (
input_len == len(input_mask)
and input_len == len(segment_ids)
and input_len == len(labels_mask)
and input_len == len(tag_labels)
and input_len == len(semiotic_labels)
):
raise ValueError('All feature lists should have the same length ({})'.format(input_len))
self.features = OrderedDict(
[
("input_ids", input_ids),
("input_mask", input_mask),
("segment_ids", segment_ids),
("labels_mask", labels_mask),
("tag_labels", tag_labels),
("semiotic_labels", semiotic_labels),
("semiotic_spans", semiotic_spans),
]
)
self._token_start_indices = token_start_indices
self.editing_task = task
self._default_label = default_label
def pad_to_max_length(self, max_seq_length: int, max_semiotic_length: int, pad_token_id: int) -> None:
"""Pad the feature vectors so that they all have max_seq_length.
Args:
max_seq_length: The length that all features, except semiotic_classes, will have after padding.
max_semiotic_length: The length that semiotic_classes will have after padding.
pad_token_id: input_ids feature is padded with this ID, other features
with ID 0.
"""
pad_len = max_seq_length - len(self.features['input_ids'])
self.features["semiotic_spans"].extend(
[(-1, -1, -1)] * (max_semiotic_length - len(self.features["semiotic_spans"]))
)
for key in self.features:
if key == "semiotic_spans":
continue
pad_id = pad_token_id if (key == "input_ids") else 0
self.features[key].extend([pad_id] * pad_len)
if len(self.features[key]) != max_seq_length:
raise ValueError(
"{} has length {} (should be {}).".format(key, len(self.features[key]), max_seq_length)
)
def get_token_labels(self, features_key: str) -> List[int]:
"""Returns labels/tags for the original tokens, not for wordpieces."""
labels = []
for idx in self._token_start_indices:
# For unmasked and untruncated tokens, use the label in the features, and
# for the truncated tokens, use the default label.
if idx < len(self.features[features_key]) and self.features["labels_mask"][idx]:
labels.append(self.features[features_key][idx])
else:
labels.append(self._default_label)
return labels
class BertExampleBuilder(object):
"""Builder class for BertExample objects."""
def __init__(
self,
label_map: Dict[str, int],
semiotic_classes: Dict[str, int],
tokenizer: PreTrainedTokenizerBase,
max_seq_length: int,
) -> None:
"""Initializes an instance of BertExampleBuilder.
Args:
label_map: Mapping from tags to tag IDs.
semiotic_classes: Mapping from semiotic classes to their ids.
tokenizer: Tokenizer object.
max_seq_length: Maximum sequence length.
"""
self._label_map = label_map
self._semiotic_classes = semiotic_classes
self._tokenizer = tokenizer
self._max_seq_length = max_seq_length
self._max_semiotic_length = max(4, int(max_seq_length / 2))
self._pad_id = self._tokenizer.pad_token_id
self._keep_tag_id = self._label_map["KEEP"]
def build_bert_example(
self, source: str, target: Optional[str] = None, semiotic_info: Optional[str] = None, infer: bool = False
) -> Optional[BertExample]:
"""Constructs a BERT Example.
Args:
source: Source text.
target: Target text or None when building an example during inference.
semiotic_info: String or None
infer: inference mode
Returns:
BertExample, or None if the conversion from text to tags was infeasible
"""
# Compute target labels.
task = EditingTask(source)
if (target is not None) and (not infer):
tags = BertExampleBuilder._compute_tags(task, target)
if not tags:
return None
else:
# If target is not provided, we set all target labels to KEEP.
tags = [Tag("KEEP") for _ in task.source_tokens]
source_tags = [self._label_map[str(tag)] for tag in tags]
tokens, tag_labels, token_start_indices = self._split_to_wordpieces(task.source_tokens, source_tags)
tokens = self._truncate_list(tokens)
tag_labels = self._truncate_list(tag_labels)
input_tokens = ["[CLS]"] + tokens + ["[SEP]"]
labels_mask = [0] + [1] * len(tag_labels) + [0]
tag_labels = [0] + tag_labels + [0]
if "PLAIN" not in self._semiotic_classes:
raise KeyError("PLAIN should be in self._semiotic_classes")
plain_cid = self._semiotic_classes["PLAIN"]
semiotic_labels = [plain_cid] * len(tag_labels) # we use the same mask for semiotic labels as for tag labels
input_ids = self._tokenizer.convert_tokens_to_ids(input_tokens)
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
semiotic_spans = []
if semiotic_info is not None:
# e.g. semiotic_info="CARDINAL 7 8;DATE 9 12"
# translate class name to its id, translate coords from tokens to wordpieces
semiotic_info_parts = semiotic_info.split(";")
previous_end = 0
for p in semiotic_info_parts:
if p == "":
break
c, start, end = p.split(" ")
if c not in self._semiotic_classes:
raise KeyError("c=" + c + " not found in self._semiotic_classes")
cid = self._semiotic_classes[c]
start = int(start)
end = int(end)
if start >= len(token_start_indices):
raise IndexError(
"start=" + str(start) + " is outside len(token_start_indices)=" + str(len(token_start_indices))
)
while previous_end < start:
subtoken_start = token_start_indices[previous_end]
subtoken_end = (
token_start_indices[previous_end + 1]
if previous_end + 1 < len(token_start_indices)
else len(input_ids) - 1
)
semiotic_spans.append((plain_cid, subtoken_start, subtoken_end))
previous_end += 1
subtoken_start = token_start_indices[start]
subtoken_end = token_start_indices[end] if end < len(token_start_indices) else len(input_ids) - 1
if subtoken_end >= self._max_seq_length: # possible if input_ids gets truncated to the max_seq_length
break
semiotic_spans.append((cid, subtoken_start, subtoken_end))
semiotic_labels[subtoken_start:subtoken_end] = [cid] * (subtoken_end - subtoken_start)
previous_end = end
while previous_end < len(token_start_indices):
subtoken_start = token_start_indices[previous_end]
subtoken_end = (
token_start_indices[previous_end + 1]
if previous_end + 1 < len(token_start_indices)
else len(input_ids) - 1
)
semiotic_spans.append((plain_cid, subtoken_start, subtoken_end))
previous_end += 1
if len(input_ids) > self._max_seq_length or len(semiotic_spans) > self._max_semiotic_length:
return None
example = BertExample(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
labels_mask=labels_mask,
tag_labels=tag_labels,
semiotic_labels=semiotic_labels,
semiotic_spans=semiotic_spans,
token_start_indices=token_start_indices,
task=task,
default_label=self._keep_tag_id,
)
example.pad_to_max_length(self._max_seq_length, self._max_semiotic_length, self._pad_id)
return example
def _split_to_wordpieces(self, tokens: List[str], labels: List[int]) -> Tuple[List[str], List[int], List[int]]:
"""Splits tokens (and the labels accordingly) to WordPieces.
Args:
tokens: Tokens to be split.
labels: Labels (one per token) to be split.
Returns:
3-tuple with the split tokens, split labels, and the indices of the
WordPieces that start a token.
"""
bert_tokens = [] # Original tokens split into wordpieces.
bert_labels = [] # Label for each wordpiece.
# Index of each wordpiece that starts a new token.
token_start_indices = []
for i, token in enumerate(tokens):
# '+ 1' is because bert_tokens will be prepended by [CLS] token later.
token_start_indices.append(len(bert_tokens) + 1)
pieces = self._tokenizer.tokenize(token)
bert_tokens.extend(pieces)
bert_labels.extend([labels[i]] * len(pieces))
return bert_tokens, bert_labels, token_start_indices
def _truncate_list(self, x: Union[List[str], List[int]]) -> Union[List[str], List[int]]:
"""Returns truncated version of x according to the self._max_seq_length."""
# Save two slots for the first [CLS] token and the last [SEP] token.
return x[: self._max_seq_length - 2]
def _get_pad_id(self) -> int:
"""Returns the ID of the [PAD] token (or 0 if it's not in the vocab)."""
try:
return self._tokenizer.pad_token_id
except KeyError:
return 0
@staticmethod
def _compute_tags(task: EditingTask, target: str) -> List[Tag]:
"""Computes tags needed for converting the source into the target.
Args:
task: tagging.EditingTask that specifies the input.
target: Target text.
Returns:
List of tagging.Tag objects.
"""
target_tokens = target.split(" ")
if len(target_tokens) != len(task.source_tokens):
raise ValueError("Length mismatch: " + str(task.source_tokens) + "\n" + target)
tags = []
for t in target_tokens:
if t == "<SELF>":
tags.append(Tag("KEEP"))
elif t == "<DELETE>":
tags.append(Tag("DELETE"))
else:
tags.append(Tag("DELETE|" + t))
return tags
def read_input_file(
example_builder: 'BertExampleBuilder', input_filename: str, infer: bool = False
) -> List['BertExample']:
"""Reads in Tab Separated Value file and converts to training/inference-ready examples.
Args:
example_builder: Instance of BertExampleBuilder
input_filename: Path to the TSV input file.
infer: Whether test files or not.
Returns:
examples: List of converted examples(features and Editing Tasks)
"""
if not path.exists(input_filename):
raise ValueError("Cannot find file: " + input_filename)
examples = []
for i, (source, target, semiotic_info) in enumerate(yield_sources_and_targets(input_filename)):
if len(examples) % 1000 == 0:
logging.info("{} examples processed.".format(len(examples)))
example = example_builder.build_bert_example(source, target, semiotic_info, infer)
if example is None:
continue
examples.append(example)
logging.info(f"Done. {len(examples)} examples converted.")
return examples
| NeMo-main | nemo/collections/nlp/data/text_normalization_as_tagging/bert_example.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List, Optional
import numpy as np
from nemo.collections.nlp.data.text_normalization_as_tagging.bert_example import BertExampleBuilder, read_input_file
from nemo.core.classes.dataset import Dataset
from nemo.core.neural_types import ChannelType, IntType, LabelsType, MaskType, NeuralType
__all__ = ["ThutmoseTaggerDataset", "ThutmoseTaggerTestDataset"]
class ThutmoseTaggerDataset(Dataset):
"""
Dataset as used by the ThutmoseTaggerModel for training, validation, and inference
pipelines.
Args:
input_file (str): path to tsv-file with data
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
"labels_mask": NeuralType(('B', 'T'), MaskType()),
"tag_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_labels": NeuralType(('B', 'T'), LabelsType()),
"semiotic_spans": NeuralType(('B', 'T', 'C'), IntType()),
}
def __init__(self, input_file: str, example_builder: BertExampleBuilder) -> None:
self.examples = read_input_file(example_builder, input_file, infer=False)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
labels_mask = np.array(self.examples[idx].features["labels_mask"])
tag_labels = np.array(self.examples[idx].features["tag_labels"])
semiotic_labels = np.array(self.examples[idx].features["semiotic_labels"])
semiotic_spans = np.array(self.examples[idx].features["semiotic_spans"])
return input_ids, input_mask, segment_ids, labels_mask, tag_labels, semiotic_labels, semiotic_spans
class ThutmoseTaggerTestDataset(Dataset):
"""
Dataset for inference pipeline.
Args:
sents: list of strings
example_builder: instance of BertExampleBuilder
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(('B', 'T'), ChannelType()),
"input_mask": NeuralType(('B', 'T'), MaskType()),
"segment_ids": NeuralType(('B', 'T'), ChannelType()),
}
def __init__(self, sents: List[str], example_builder: BertExampleBuilder) -> None:
self.examples = []
for source in sents:
example = example_builder.build_bert_example(source, infer=True)
if example is None:
raise ValueError("Cannot build example from: " + source)
self.examples.append(example)
def __len__(self):
return len(self.examples)
def __getitem__(self, idx: int):
input_ids = np.array(self.examples[idx].features["input_ids"])
input_mask = np.array(self.examples[idx].features["input_mask"])
segment_ids = np.array(self.examples[idx].features["segment_ids"])
return input_ids, input_mask, segment_ids
| NeMo-main | nemo/collections/nlp/data/text_normalization_as_tagging/thutmose_tagger_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.text_normalization_as_tagging.thutmose_tagger_dataset import (
ThutmoseTaggerDataset,
ThutmoseTaggerTestDataset,
)
| NeMo-main | nemo/collections/nlp/data/text_normalization_as_tagging/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from itertools import groupby
from typing import Dict, List, Tuple
import numpy as np
"""Utility functions for Thutmose Tagger."""
def get_token_list(text: str) -> List[str]:
"""Returns a list of tokens.
This function expects that the tokens in the text are separated by space
character(s). Example: "ca n't , touch". This is the case at least for the
public DiscoFuse and WikiSplit datasets.
Args:
text: String to be split into tokens.
"""
return text.split()
def yield_sources_and_targets(input_filename: str):
"""Reads and yields source lists and targets from the input file.
Args:
input_filename: Path to the input file.
Yields:
Tuple with (list of source texts, target text).
"""
# The format expects a TSV file with the source on the first and the
# target on the second column.
with open(input_filename, 'r') as f:
for line in f:
source, target, semiotic_info = line.rstrip('\n').split('\t')
yield source, target, semiotic_info
def read_label_map(path: str) -> Dict[str, int]:
"""Return label map read from the given path."""
with open(path, 'r') as f:
label_map = {}
empty_line_encountered = False
for tag in f:
tag = tag.strip()
if tag:
label_map[tag] = len(label_map)
else:
if empty_line_encountered:
raise ValueError('There should be no empty lines in the middle of the label map ' 'file.')
empty_line_encountered = True
return label_map
def read_semiotic_classes(path: str) -> Dict[str, int]:
"""Return semiotic classes map read from the given path."""
with open(path, 'r') as f:
semiotic_classes = {}
empty_line_encountered = False
for tag in f:
tag = tag.strip()
if tag:
semiotic_classes[tag] = len(semiotic_classes)
else:
if empty_line_encountered:
raise ValueError('There should be no empty lines in the middle of the label map ' 'file.')
empty_line_encountered = True
return semiotic_classes
def split_text_by_isalpha(s: str):
"""Split string into segments, so that alphabetic sequence is one segment"""
for k, g in groupby(s, str.isalpha):
yield ''.join(g)
def spoken_preprocessing(spoken: str) -> str:
"""Preprocess spoken input for Thuthmose tagger model.
Attention!
This function is used both during data preparation and during inference.
If you change it, you should rerun data preparation and retrain the model.
"""
spoken = spoken.casefold()
spoken = spoken.replace('_trans', '').replace('_letter_latin', '').replace('_letter', '')
# "долларов сэ ш а" => "долларов-сэ-ш-а" #join into one token to simplify alignment
spoken = re.sub(r" долларов сэ ш а", r" долларов-сэ-ш-а", spoken)
spoken = re.sub(r" доллара сэ ш а", r" доллара-сэ-ш-а", spoken)
spoken = re.sub(r" доллар сэ ш а", r" доллар-сэ-ш-а", spoken)
spoken = re.sub(r" фунтов стерлингов", r" фунтов-стерлингов", spoken)
spoken = re.sub(r" фунта стерлингов", r" фунта-стерлингов", spoken)
spoken = re.sub(r" фунт стерлингов", r" фунт-стерлингов", spoken)
spoken = re.sub(r" долларами сэ ш а", r" долларами-сэ-ш-а", spoken)
spoken = re.sub(r" долларам сэ ш а", r" долларам-сэ-ш-а", spoken)
spoken = re.sub(r" долларах сэ ш а", r" долларах-сэ-ш-а", spoken)
spoken = re.sub(r" долларе сэ ш а", r" долларе-сэ-ш-а", spoken)
spoken = re.sub(r" доллару сэ ш а", r" доллару-сэ-ш-а", spoken)
spoken = re.sub(r" долларом сэ ш а", r" долларом-сэ-ш-а", spoken)
spoken = re.sub(r" фунтами стерлингов", r" фунтами-стерлингов", spoken)
spoken = re.sub(r" фунтам стерлингов", r" фунтам-стерлингов", spoken)
spoken = re.sub(r" фунтах стерлингов", r" фунтах-стерлингов", spoken)
spoken = re.sub(r" фунте стерлингов", r" фунте-стерлингов", spoken)
spoken = re.sub(r" фунту стерлингов", r" фунту-стерлингов", spoken)
spoken = re.sub(r" фунтом стерлингов", r" фунтом-стерлингов", spoken)
return spoken
## This function is used only in data preparation (examples/nlp/normalisation_as_tagging/dataset_preparation)
def get_src_and_dst_for_alignment(
semiotic_class: str, written: str, spoken: str, lang: str
) -> Tuple[str, str, str, str]:
"""Tokenize written and spoken span.
Args:
semiotic_class: str - lowercase semiotic class, ex. "cardinal"
written: str - written form, ex. "2015 году"
spoken: str - spoken form, ex. "две тысячи пятнадцатом году"
lang: str - language
Return:
src: str - written part, where digits and foreign letters are tokenized by characters, ex. "2 0 1 5"
dst: str - spoken part tokenized by space, ex. "две тысячи пятнадцатом"
same_begin: str
same_end: str
"""
written = written.casefold()
# ATTENTION!!! This is INPUT transformation! Need to do the same at inference time!
spoken = spoken_preprocessing(spoken)
# remove same fragments at the beginning or at the end of spoken and written form
written_parts = written.split()
spoken_parts = spoken.split()
same_from_begin = 0
same_from_end = 0
for i in range(min(len(written_parts), len(spoken_parts))):
if written_parts[i] == spoken_parts[i]:
same_from_begin += 1
else:
break
for i in range(min(len(written_parts), len(spoken_parts))):
if written_parts[-i - 1] == spoken_parts[-i - 1]:
same_from_end += 1
else:
break
same_begin = written_parts[0:same_from_begin]
same_end = []
if same_from_end == 0:
written = " ".join(written_parts[same_from_begin:])
spoken = " ".join(spoken_parts[same_from_begin:])
else:
written = " ".join(written_parts[same_from_begin:-same_from_end])
spoken = " ".join(spoken_parts[same_from_begin:-same_from_end])
same_end = written_parts[-same_from_end:]
fragments = list(split_text_by_isalpha(written))
written_tokens = []
for frag in fragments:
if frag.isalpha():
if semiotic_class == "plain" or semiotic_class == "letters" or semiotic_class == "electronic":
chars = list(frag.strip())
chars[0] = "_" + chars[0] # prepend first symbol of a word with underscore
chars[-1] = chars[-1] + "_" # append underscore to the last symbol
written_tokens += chars
else:
written_tokens.append("_" + frag + "_")
else:
chars = list(frag.strip().replace(" ", ""))
if len(chars) > 0:
chars[0] = "_" + chars[0] # prepend first symbol of a non-alpha fragment with underscore
chars[-1] = chars[-1] + "_" # append underscore to the last symbol of a non-alpha fragment
written_tokens += chars
written_str = " ".join(written_tokens)
# _н_ _._ _г_ _._ => _н._ _г._
written_str = re.sub(
r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\._", r"\g<1>._", written_str
)
# _тыс_ _. $ => _тыс._ _$
written_str = re.sub(
r"([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя])_ _\. ([^_])]", r"\g<1>._ _\g<2>", written_str
)
if semiotic_class == "ordinal":
# _8 2 -_ _ом_ => _8 2-ом_
written_str = re.sub(
r"([\d]) -_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_",
r"\g<1>-\g<2>_",
written_str,
)
# _8 8_ _й_ _8 8й_
written_str = re.sub(
r"([\d])_ _([abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]+)_", r"\g<1>\g<2>_", written_str
)
if semiotic_class == "cardinal":
# _2 5 -_ _ти_ => _2 5-ти_
written_str = re.sub(r"([\d]) -_ _(ти)_", r"\g<1>-\g<2>_", written_str)
written_str = re.sub(r"([\d]) -_ _(и)_", r"\g<1>-\g<2>_", written_str)
written_str = re.sub(r"([\d]) -_ _(мя)_", r"\g<1>-\g<2>_", written_str)
written_str = re.sub(r"([\d]) -_ _(ех)_", r"\g<1>-\g<2>_", written_str)
# _i b m_ _'_ _s_ => _i b m's_
if lang == "en":
written_str = re.sub(r"_ _'_ _s_", r"'s_", written_str)
if semiotic_class == "date" and lang == "en":
# _1 9 8 0_ _s_ => _1 9 8 0s_
written_str = re.sub(r"([\d])_ _s_", r"\g<1>s_", written_str)
# _1 9 5 0 '_ _s_ => _1 9 5 0's_
written_str = re.sub(r"([\d]) '_ _s_", r"\g<1>'s_", written_str)
# _wednesday_ _2 6_ _th_ _september_ _2 0 1 2_ => _wednesday_ _2 6th_ _september_ _2 0 1 2_
written_str = re.sub(r"([\d])_ _th_", r"\g<1>th_", written_str)
# _wednesday_ _may_ _2 1_ _st_ _, 2 0 1 4_ => _wednesday_ _may_ _2 1st_ _, 2 0 1 4_
written_str = re.sub(r"([\d])_ _st_", r"\g<1>st_", written_str)
# _wednesday_ _2 3_ _rd_ _july_ _2 0 1 4_ => _wednesday_ _2 3rd_ _july_ _2 0 1 4_
written_str = re.sub(r"([\d])_ _rd_", r"\g<1>rd_", written_str)
# _wednesday_ _2 2_ _nd_ _july_ _2 0 1 4_ => _wednesday_ _2 2nd_ _july_ _2 0 1 4_
written_str = re.sub(r"([\d])_ _nd_", r"\g<1>nd_", written_str)
written_str = re.sub(r"_mon_ _\. ", r"_mon._ ", written_str)
written_str = re.sub(r"_tue_ _\. ", r"_tue._ ", written_str)
written_str = re.sub(r"_wen_ _\. ", r"_wen._ ", written_str)
written_str = re.sub(r"_thu_ _\. ", r"_thu._ ", written_str)
written_str = re.sub(r"_fri_ _\. ", r"_fri._ ", written_str)
written_str = re.sub(r"_sat_ _\. ", r"_sat._ ", written_str)
written_str = re.sub(r"_sun_ _\. ", r"_sun._ ", written_str)
written_str = re.sub(r"_jan_ _\. ", r"_jan._ ", written_str)
written_str = re.sub(r"_feb_ _\. ", r"_feb._ ", written_str)
written_str = re.sub(r"_mar_ _\. ", r"_mar._ ", written_str)
written_str = re.sub(r"_apr_ _\. ", r"_apr._ ", written_str)
written_str = re.sub(r"_may_ _\. ", r"_may._ ", written_str)
written_str = re.sub(r"_jun_ _\. ", r"_jun._ ", written_str)
written_str = re.sub(r"_jul_ _\. ", r"_jul._ ", written_str)
written_str = re.sub(r"_aug_ _\. ", r"_aug._ ", written_str)
written_str = re.sub(r"_sep_ _\. ", r"_sep._ ", written_str)
written_str = re.sub(r"_oct_ _\. ", r"_oct._ ", written_str)
written_str = re.sub(r"_nov_ _\. ", r"_nov._ ", written_str)
written_str = re.sub(r"_dec_ _\. ", r"_dec._ ", written_str)
if semiotic_class == "date" and lang == "ru":
# _1 8 . 0 8 . 2 0 0 1_ => _1 8_ .08. _2 0 0 1_
# _1 8 / 0 8 / 2 0 0 1_ => _1 8_ /08/ _2 0 0 1_
# _1 8 - 0 8 - 2 0 0 1_ => _1 8_ -08- _2 0 0 1_
written_str = re.sub(r"([\d]) \. ([01]) ([0123456789]) \. ([\d])", r"\g<1>_ .\g<2>\g<3>. _\g<4>", written_str)
written_str = re.sub(r"([\d]) / ([01]) ([0123456789]) / ([\d])", r"\g<1>_ /\g<2>\g<3>/ _\g<4>", written_str)
written_str = re.sub(r"([\d]) - ([01]) ([0123456789]) - ([\d])", r"\g<1>_ -\g<2>\g<3>- _\g<4>", written_str)
# _1 8 . 8 . 2 0 0 1_ => _1 8_ .8. _2 0 0 1_
# _1 8 / 8 / 2 0 0 1_ => _1 8_ /8/ _2 0 0 1_
# _1 8 - 8 - 2 0 0 1_ => _1 8_ -8- _2 0 0 1_
written_str = re.sub(r"([\d]) \. ([123456789]) \. ([\d])", r"\g<1>_ .\g<2>. _\g<3>", written_str)
written_str = re.sub(r"([\d]) / ([123456789]) / ([\d])", r"\g<1>_ /\g<2>/ _\g<3>", written_str)
written_str = re.sub(r"([\d]) - ([123456789]) - ([\d])", r"\g<1>_ -\g<2>- _\g<3>", written_str)
if semiotic_class == "money":
# if a span start with currency, move it to the end
# "_$ 2 5_" => "_2 5_ _$<<" #<< means "at post-processing move to the beginning of th semiotic span"
written_str = re.sub(
r"^(_[^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя]) ([\d].*)$",
r"_\g<2> \g<1><<",
written_str,
)
# "_us_ _$ 7 0 0_" => "_us__$ 7 0 0_"
written_str = re.sub(r"^_us_ _\$ ([\d].*)$", r"_\g<1> _us__$<<", written_str)
# "_2 5 $_" => "_2 5_ _$_" #insert space between last digit and dollar sign
written_str = re.sub(
r"([\d]) ([^0123456789abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя_]_)",
r"\g<1>_ _\g<2>",
written_str,
)
if semiotic_class == "time":
# "_pm_ _1 0_" => "_1 0_ _pm_<<"
written_str = re.sub(r"^(_[ap]m_) (_[\d].*)$", r"\g<2> \g<1><<", written_str)
# "_8 : 0 0_ _a._ _m._ => _8:00_ _a._ _m._"
# "_1 2 : 0 0_ _a._ _m._ => _1 2:00_ _a._ _m._"
written_str = re.sub(r"(\d) [:.] 0 0_", r"\g<1>:00_", written_str)
# "_2 : 4 2 : 4 4_" => "_2: 4 2: 4 4_"
written_str = re.sub(r"(\d) [:.] ", r"\g<1>: ", written_str)
if semiotic_class == "measure":
# "_6 5 8_ _см_ _³ ._" => " _6 5 8_ _³> _см._"
# > means "at post-processing swap with the next token to the right"
written_str = re.sub(
r"(_[abcdefghijklmnopqrstuvwxyzабвгдеёжзийклмнопрстуфхцчшщъыьэюя.]+_) (_[³²]_?)",
r"\g<2>> \g<1>",
written_str,
)
return written_str, spoken, " ".join(same_begin), " ".join(same_end)
def fill_alignment_matrix(
fline2: str, fline3: str, gline2: str, gline3: str
) -> Tuple[np.ndarray, List[str], List[str]]:
"""Parse Giza++ direct and reverse alignment results and represent them as an alignment matrix
Args:
fline2: e.g. "_2 0 1 4_"
fline3: e.g. "NULL ({ }) twenty ({ 1 }) fourteen ({ 2 3 4 })"
gline2: e.g. "twenty fourteen"
gline3: e.g. "NULL ({ }) _2 ({ 1 }) 0 ({ }) 1 ({ }) 4_ ({ 2 })"
Returns:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
srctokens: e.g. ["twenty", "fourteen"]
dsttokens: e.g. ["_2", "0", "1", "4_"]
For example, the alignment matrix for the above example may look like:
[[3, 0, 0, 0]
[0, 2, 2, 3]]
"""
if fline2 is None or gline2 is None or fline3 is None or gline3 is None:
raise ValueError(f"empty params")
srctokens = gline2.split()
dsttokens = fline2.split()
pattern = r"([^ ]+) \(\{ ([^\(\{\}\)]*) \}\)"
src2dst = re.findall(pattern, fline3.replace("({ })", "({ })"))
dst2src = re.findall(pattern, gline3.replace("({ })", "({ })"))
if len(src2dst) != len(srctokens) + 1:
raise ValueError(
"length mismatch: len(src2dst)="
+ str(len(src2dst))
+ "; len(srctokens)"
+ str(len(srctokens))
+ "\n"
+ gline2
+ "\n"
+ fline3
)
if len(dst2src) != len(dsttokens) + 1:
raise ValueError(
"length mismatch: len(dst2src)="
+ str(len(dst2src))
+ "; len(dsttokens)"
+ str(len(dsttokens))
+ "\n"
+ fline2
+ "\n"
+ gline3
)
matrix = np.zeros((len(srctokens), len(dsttokens)))
for i in range(1, len(src2dst)):
token, to_str = src2dst[i]
if to_str == "":
continue
to = list(map(int, to_str.split()))
for t in to:
matrix[i - 1][t - 1] = 2
for i in range(1, len(dst2src)):
token, to_str = dst2src[i]
if to_str == "":
continue
to = list(map(int, to_str.split()))
for t in to:
matrix[t - 1][i - 1] += 1
return matrix, srctokens, dsttokens
def check_monotonicity(matrix: np.ndarray) -> bool:
"""Check if alignment is monotonous - i.e. the relative order is preserved (no swaps).
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
"""
is_sorted = lambda k: np.all(k[:-1] <= k[1:])
a = np.argwhere(matrix == 3)
b = np.argwhere(matrix == 2)
c = np.vstack((a, b))
d = c[c[:, 1].argsort()] # sort by second column (less important)
d = d[d[:, 0].argsort(kind="mergesort")]
return is_sorted(d[:, 1])
def get_targets(matrix: np.ndarray, dsttokens: List[str], delimiter: str) -> List[str]:
"""Join some of the destination tokens, so that their number becomes the same as the number of input words.
Unaligned tokens tend to join to the left aligned token.
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
dsttokens: e.g. ["_2", "0", "1", "4_"]
Returns:
targets: list of string tokens, with one-to-one correspondence to matrix.shape[0]
Example:
If we get
matrix=[[3, 0, 0, 0]
[0, 2, 2, 3]]
dsttokens=["_2", "0", "1", "4_"]
it gives
targets = ["_201", "4_"]
Actually, this is a mistake instead of ["_20", "14_"]. That will be further corrected by regular expressions.
"""
targets = []
last_covered_dst_id = -1
for i in range(len(matrix)):
dstlist = []
for j in range(last_covered_dst_id + 1, len(dsttokens)):
# matrix[i][j] == 3: safe alignment point
if matrix[i][j] == 3 or (
j == last_covered_dst_id + 1
and np.all(matrix[i, :] == 0) # if the whole line does not have safe points
and np.all(matrix[:, j] == 0) # and the whole column does not have safe points, match them
):
if len(targets) == 0: # if this is first safe point, attach left unaligned columns to it, if any
for k in range(0, j):
if np.all(matrix[:, k] == 0): # if column k does not have safe points
dstlist.append(dsttokens[k])
else:
break
dstlist.append(dsttokens[j])
last_covered_dst_id = j
for k in range(j + 1, len(dsttokens)):
if np.all(matrix[:, k] == 0): # if column k does not have safe points
dstlist.append(dsttokens[k])
last_covered_dst_id = k
else:
break
if len(dstlist) > 0:
targets.append(delimiter.join(dstlist))
else:
targets.append("<DELETE>")
return targets
def get_targets_from_back(matrix: np.ndarray, dsttokens: List[str], delimiter: str) -> List[str]:
"""Join some of the destination tokens, so that their number becomes the same as the number of input words.
Unaligned tokens tend to join to the right aligned token.
Args:
matrix: a numpy array of shape (src_len, dst_len) filled with [0, 1, 2, 3], where 3 means a reliable alignment
the corresponding words were aligned to one another in direct and reverse alignment runs, 1 and 2 mean that the
words were aligned only in one direction, 0 - no alignment.
dsttokens: e.g. ["_2", "0", "1", "4_"]
Returns:
targets: list of string tokens, with one-to-one correspondence to matrix.shape[0]
Example:
If we get
matrix=[[3, 0, 0, 0]
[0, 2, 2, 3]]
dsttokens=["_2", "0", "1", "4_"]
it gives
targets = ["_2", "014_"]
Actually, this is a mistake instead of ["_20", "14_"]. That will be further corrected by regular expressions.
"""
targets = []
last_covered_dst_id = len(dsttokens)
for i in range(len(matrix) - 1, -1, -1):
dstlist = []
for j in range(last_covered_dst_id - 1, -1, -1):
if matrix[i][j] == 3 or (
j == last_covered_dst_id - 1 and np.all(matrix[i, :] == 0) and np.all(matrix[:, j] == 0)
):
if len(targets) == 0:
for k in range(len(dsttokens) - 1, j, -1):
if np.all(matrix[:, k] == 0):
dstlist.append(dsttokens[k])
else:
break
dstlist.append(dsttokens[j])
last_covered_dst_id = j
for k in range(j - 1, -1, -1):
if np.all(matrix[:, k] == 0):
dstlist.append(dsttokens[k])
last_covered_dst_id = k
else:
break
if len(dstlist) > 0:
targets.append(delimiter.join(list(reversed(dstlist))))
else:
targets.append("<DELETE>")
return list(reversed(targets))
| NeMo-main | nemo/collections/nlp/data/text_normalization_as_tagging/utils.py |
# Copyright 2019 The Google Research Authors.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file contains code artifacts adapted from the original implementation:
https://github.com/google-research/lasertagger/blob/master/tagging.py
"""
import re
from enum import Enum
from typing import List, Tuple
import nemo.collections.nlp.data.text_normalization_as_tagging.utils as utils
"""Classes representing a tag and a text editing task.
Tag corresponds to an edit operation, while EditingTask is a container for the
input that LaserTagger takes. EditingTask also has a method for realizing the
output text given the predicted tags.
"""
class SwapType(Enum):
"""Type of swap"""
LONG_LEFT = 1 # token should be moved to the leftmost position of the whole semiotic span
LONG_RIGHT = 2 # token should be moved to the rightmost position of the whole semiotic span
SHORT_LEFT = 3 # token should be swapped with the left adjacent token
SHORT_RIGHT = 4 # token should be swapped with the right adjacent token
class Token:
"""Class for the output token"""
def __init__(self, inp: str, tag: str, out: str) -> None:
self.inp = inp
self.tag = tag
self.out = out
self.swap = None
if self.out.endswith(">>"):
self.swap = SwapType.LONG_RIGHT
elif self.out.endswith("<<"):
self.swap = SwapType.LONG_LEFT
elif self.out.endswith(">"):
self.swap = SwapType.SHORT_RIGHT
elif self.out.endswith("<"):
self.swap = SwapType.SHORT_LEFT
@property
def is_begin(self) -> bool:
return self.out.startswith("_")
@property
def is_end(self) -> bool:
return self.out.endswith("_")
class TagType(Enum):
"""Base tag which indicates the type of an edit operation."""
# Keep the tagged token.
KEEP = 1
# Delete the tagged token.
DELETE = 2
class Tag(object):
"""Tag that corresponds to a token edit operation.
Attributes:
tag_type: TagType of the tag.
added_phrase: A phrase that's inserted before the tagged token (can be
empty).
"""
def __init__(self, tag: str) -> None:
"""Constructs a Tag object by parsing tag to tag_type and added_phrase.
Args:
tag: String representation for the tag which should have the following
format "<TagType>|<added_phrase>" or simply "<TagType>" if no phrase
is added before the tagged token. Examples of valid tags include "KEEP",
"DELETE|and".
Raises:
ValueError: If <TagType> is invalid.
"""
if '|' in tag:
pos_pipe = tag.index('|')
tag_type, added_phrase = tag[:pos_pipe], tag[pos_pipe + 1 :]
else:
tag_type, added_phrase = tag, ''
try:
self.tag_type = TagType[tag_type]
except KeyError:
raise ValueError('TagType should be KEEP or DELETE, not {}'.format(tag_type))
self.added_phrase = added_phrase
def __str__(self) -> str:
if not self.added_phrase:
return self.tag_type.name
else:
return '{}|{}'.format(self.tag_type.name, self.added_phrase)
class EditingTask(object):
"""Text-editing task.
Attributes:
source_tokens: Tokens of the source texts concatenated into a single list.
first_tokens: The indices of the first tokens of each source text.
"""
def __init__(self, source: str) -> None:
"""Initializes an instance of EditingTask.
Args:
source: string.
"""
token_list = utils.get_token_list(source)
# Tokens of the source texts concatenated into a single list.
self.source_tokens = []
# The indices of the first tokens of each source text.
self.first_tokens = []
self.first_tokens.append(len(self.source_tokens))
self.source_tokens.extend(token_list)
def realize_output(self, tags: List[Tag], semiotic_labels: List[str]) -> Tuple[str, str, str, str]:
"""Realize output text based on the source tokens and predicted tags.
Args:
tags: Predicted tags (one for each token in `self.source_tokens`).
semiotic_labels: Predicted semiotic labels (one for each token in `self.source_tokens`).
Returns:
The realizer output text.
Raises:
ValueError: If the number of tags doesn't match the number of source
tokens.
"""
if len(tags) != len(self.source_tokens) or len(tags) != len(semiotic_labels):
raise ValueError(
'The number of tags ({}) should match the number of '
'source tokens ({}) and semiotic labels({})'.format(
len(tags), len(self.source_tokens), len(semiotic_labels)
)
)
sequence = []
for inp_token, tag in zip(self.source_tokens, tags):
if tag.added_phrase:
sequence.append(Token(inp_token, tag.added_phrase, tag.added_phrase))
elif tag.tag_type == TagType.KEEP:
sequence.append(Token(inp_token, "<SELF>", inp_token))
else:
sequence.append(Token(inp_token, "<DELETE>", ""))
if len(sequence) != len(semiotic_labels):
raise ValueError(
"Length mismatch: len(sequence)="
+ str(len(sequence))
+ "; len(semiotic_labels)="
+ str(len(semiotic_labels))
)
out_tokens_with_swap = [t.out for t in sequence]
out_tags_with_swap = [t.tag for t in sequence]
out_tags_without_swap = [t.tag for t in sequence]
previous_semiotic_label_end = -1
current_semiotic_label = ""
for i in range(len(sequence)):
if sequence[i].swap == SwapType.SHORT_LEFT or sequence[i - 1].swap == SwapType.SHORT_RIGHT:
out_tokens_with_swap[i - 1], out_tokens_with_swap[i] = (
out_tokens_with_swap[i],
out_tokens_with_swap[i - 1],
)
out_tags_with_swap[i - 1], out_tags_with_swap[i] = out_tags_with_swap[i], out_tags_with_swap[i - 1]
if semiotic_labels[i] != current_semiotic_label:
previous_semiotic_label_end = i - 1
current_semiotic_label = semiotic_labels[i]
if sequence[i].swap == SwapType.LONG_LEFT:
token = out_tokens_with_swap.pop(i)
tag = out_tags_with_swap.pop(i)
out_tokens_with_swap.insert(previous_semiotic_label_end + 1, token)
out_tags_with_swap.insert(previous_semiotic_label_end + 1, tag)
# detokenize
output_tokens_str = " ".join(out_tokens_with_swap).replace("<", "").replace(">", "")
output_tags_with_swap_str = " ".join(out_tags_with_swap)
frags = re.split(r"(_[^ ][^_]+[^ ]_)", output_tokens_str)
output_tokens = []
for frag in frags:
if frag.startswith("_") and frag.endswith("_"):
output_tokens.append(frag.replace(" ", "").replace("_", ""))
else:
output_tokens.append(frag.strip().replace("_", ""))
output_str = " ".join(output_tokens)
output_str = re.sub(r" +", " ", output_str)
return (
output_str,
" ".join(self.source_tokens),
" ".join(out_tags_without_swap),
output_tags_with_swap_str,
)
| NeMo-main | nemo/collections/nlp/data/text_normalization_as_tagging/tagging.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import SequenceToSequenceDataset
| NeMo-main | nemo/collections/nlp/data/common/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import (
get_indexed_dataset_,
get_samples_mapping,
)
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import TextMemMapDataset
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['SequenceToSequenceDataset', 'TextMemmapSequenceToSequenceDataset']
class SequenceToSequenceDataset(Dataset):
"""Sequence to Sequence Dataset in memory."""
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
add_bos_to_input: bool = True,
add_eos_to_input: bool = True,
replace_bos_with_pad: bool = False,
):
super().__init__()
self.src_file_name = src_file_name
self.tgt_file_name = tgt_file_name
self.src_tokenizer = src_tokenizer
self.tgt_tokenizer = tgt_tokenizer
self.max_src_seq_length = max_src_seq_length
self.max_tgt_seq_length = max_tgt_seq_length
self.add_bos_to_input = add_bos_to_input
self.add_eos_to_input = add_eos_to_input
self.replace_bos_with_pad = replace_bos_with_pad
assert self.max_src_seq_length > 0
assert self.max_tgt_seq_length > 0
self._check_files_exist()
self._get_examples()
def _check_files_exist(self):
if not os.path.exists(self.src_file_name):
raise FileNotFoundError(f"Source file {self.src_file_name} not found")
if not os.path.exists(self.tgt_file_name):
raise FileNotFoundError(f"Source file {self.src_file_name} not found")
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
example = self.examples[idx]
text_enc = example['src']
text_dec = example['tgt'][:-1]
labels = example['tgt'][1:]
return {'text_enc': text_enc, 'text_dec': text_dec, 'labels': labels}
def _get_examples(self):
self.examples = []
with open(self.src_file_name, encoding='utf8') as f_src, open(self.tgt_file_name, encoding='utf8') as f_tgt:
for i, (src, tgt) in enumerate(zip(f_src, f_tgt)):
if i % 10000 == 0 and i != 0:
logging.info(f"Read {i} lines from {self.src_file_name} & {self.tgt_file_name}")
src = self.src_tokenizer.text_to_ids(src.strip())
if self.add_bos_to_input:
src = [self.src_tokenizer.pad_id if self.replace_bos_with_pad else self.src_tokenizer.bos_id] + src
if self.add_eos_to_input:
src = src + [self.src_tokenizer.eos_id]
tgt = (
[self.tgt_tokenizer.pad_id if self.replace_bos_with_pad else self.tgt_tokenizer.bos_id]
+ self.tgt_tokenizer.text_to_ids(tgt.strip())
+ [self.tgt_tokenizer.eos_id]
)
# Truncate to max sequence length.
if len(src) > self.max_src_seq_length:
src = src[-self.max_src_seq_length + 1 :]
if len(tgt) > self.max_tgt_seq_length:
tgt = tgt[-self.max_tgt_seq_length + 1 :]
self.examples.append({'src': src, 'tgt': tgt})
logging.info(f'Dataset Length : {len(self.examples)}')
def collate_fn(self, batch):
text_enc = [item['text_enc'] for item in batch]
text_dec = [item['text_dec'] for item in batch]
labels = [item['labels'] for item in batch]
if isinstance(text_enc[0], np.ndarray):
text_enc = [x.tolist() for x in text_enc]
if isinstance(text_dec[0], np.ndarray):
text_dec = [x.tolist() for x in text_dec]
if isinstance(labels[0], np.ndarray):
labels = [x.tolist() for x in labels]
max_dec_input_length = max([len(item) for item in text_dec]) if text_dec else 0
max_enc_input_length = max([len(item) for item in text_enc]) if text_enc else 0
max_label_length = max([len(item) for item in labels]) if labels else 0
loss_mask = [([1] * (len(item))) + ([0] * (max_label_length - len(item))) for item in labels]
text_enc = [item + [self.src_tokenizer.pad_id] * (max_enc_input_length - len(item)) for item in text_enc]
text_dec = [item + [self.tgt_tokenizer.pad_id] * (max_dec_input_length - len(item)) for item in text_dec]
labels = [item + [self.tgt_tokenizer.pad_id] * (max_label_length - len(item)) for item in labels]
text_enc = torch.LongTensor(text_enc)
text_dec = torch.LongTensor(text_dec)
labels = torch.LongTensor(labels)
loss_mask = torch.LongTensor(loss_mask)
enc_mask = (text_enc != self.src_tokenizer.pad_id).long()
dec_mask = (text_dec != self.tgt_tokenizer.pad_id).long()
return {
'text_enc': text_enc,
'text_dec': text_dec,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
class IndexedSequenceToSequenceDataset(SequenceToSequenceDataset):
"""Abstract class for TextMemmapSequenceToSequenceDataset and BinarizedMemmapSequenceToSequenceDataset.
This class is not meant to be used standalone and just as an abstract class for the two subclasses.
"""
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
seed: int = 1234,
add_bos_to_enc: bool = True,
add_eos_to_enc: bool = True,
max_num_samples: int = None,
prepend_id: int = None,
):
"""
src_file_name: Path to a single source file on disk. This is either the path to a raw text file or the prefix to the processed src_file_name.bin/idx files.
src_file_name: Path to a single target file on disk. This is either the path to a raw text file or the prefix to the processed tgt_file_name.bin/idx files.
src_tokenizer: Tokenizer for the source dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
tgt_tokenizer: Tokenizer for the target dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
max_src_seq_length: Maximum length of the source sequences. Lines above this length will be truncated.
max_tgt_seq_length: Maximum length of the target sequences. Lines above this length will be truncated.
seed: Random seed for data shuffling.
max_num_samples: Maximum number of samples to load. This can be > dataset length if you want to oversample data. If None, all samples will be loaded.
prepend_id: If not None, prepend this id to the encoder input.
"""
super().__init__(
src_file_name=src_file_name,
tgt_file_name=tgt_file_name,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
)
self.seed = seed
self.max_num_samples = max_num_samples
self.add_bos_to_enc = add_bos_to_enc
self.add_eos_to_enc = add_eos_to_enc
self.prepend_id = prepend_id
logging.info(f'Desired number of samples : {self.max_num_samples}')
logging.info(f'Source Dataset Length : {len(self.src_indexed_dataset)}')
logging.info(f'Target Dataset Length : {len(self.tgt_indexed_dataset)}')
def __len__(self):
if self.max_num_samples is None:
return len(self.src_indexed_dataset)
else:
return self.max_num_samples
def _get_sample(self, idx):
if isinstance(idx, np.int64):
idx = idx.item()
if self.samples_mapping is not None:
assert idx < len(self.samples_mapping)
idx, _, _ = self.samples_mapping[idx]
if isinstance(idx, np.uint32):
idx = idx.item()
assert idx < len(self.src_indexed_dataset)
src = self.src_indexed_dataset[idx]
tgt = self.tgt_indexed_dataset[idx]
return src, tgt
def __getitem__(self, idx):
src, tgt = self._get_sample(idx)
offset = 0
if self.add_bos_to_enc:
offset += 1
if self.add_eos_to_enc:
offset += 1
if self.prepend_id is not None:
offset += 1
if len(src) > self.max_src_seq_length - offset:
src = src[: self.max_src_seq_length - offset]
if self.add_bos_to_enc:
src = np.concatenate([[self.src_tokenizer.bos_id], src])
if self.prepend_id is not None:
src = np.concatenate([[self.prepend_id], src])
if self.add_eos_to_enc:
src = np.concatenate([src, [self.src_tokenizer.eos_id]])
if len(tgt) > self.max_tgt_seq_length - 2:
tgt = tgt[: self.max_tgt_seq_length - 2]
text_dec = np.concatenate([[self.tgt_tokenizer.bos_id], tgt])
labels = np.concatenate([tgt, [self.tgt_tokenizer.eos_id]])
return {'text_enc': src, 'text_dec': text_dec, 'labels': labels}
def _build_samples_mapping(self):
if self.max_num_samples is not None:
# This means max src and max tgt sequence length need to be the same
if self.max_src_seq_length != self.max_tgt_seq_length:
raise ValueError(
f"max_src_seq_length ({self.max_src_seq_length}) != max_tgt_seq_length ({self.max_tgt_seq_length}). This is needed for max_samples based training for now."
)
self.samples_mapping = get_samples_mapping(
indexed_dataset=self.src_indexed_dataset,
data_prefix=self.src_file_name,
num_epochs=None,
max_num_samples=self.max_num_samples,
max_seq_length=self.max_src_seq_length - 2,
short_seq_prob=0,
seed=self.seed,
name=self.src_file_name.split('/')[-1],
binary_head=False,
)
else:
self.samples_mapping = None
class TextMemmapSequenceToSequenceDataset(IndexedSequenceToSequenceDataset):
"""Memory-mapped text sequence to sequence dataset. Operates on raw text files and tokenizes the text on-the-fly."""
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
seed: int = 1234,
max_num_samples: int = None,
add_bos_to_enc: bool = True,
add_eos_to_enc: bool = True,
prepend_id: int = None,
):
"""
src_file_name: Path to a single source file on disk. The file should contain one sentence per line and be raw text.
tgt_file_name: Path to a single target file on disk. The file should contain one sentence per line aligned with src_file_name and be raw text.
src_tokenizer: Tokenizer for the source dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
tgt_tokenizer: Tokenizer for the target dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
max_src_seq_length: Maximum length of the source sequences. Lines above this length will be truncated.
max_tgt_seq_length: Maximum length of the target sequences. Lines above this length will be truncated.
seed: Random seed for data shuffling.
max_num_samples: Maximum number of samples to load. This can be > dataset length if you want to oversample data. If None, all samples will be loaded.
add_bos_to_enc: Add BOS token to the encoder input.
add_eos_to_enc: Add EOS token to the encoder input.
prepend_id: If not None, prepend this id to the encoder input.
"""
self.seed = seed
self.max_num_samples = max_num_samples
super().__init__(
src_file_name=src_file_name,
tgt_file_name=tgt_file_name,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
seed=seed,
max_num_samples=max_num_samples,
add_bos_to_enc=add_bos_to_enc,
add_eos_to_enc=add_eos_to_enc,
prepend_id=prepend_id,
)
def _get_examples(self):
self.src_indexed_dataset = TextMemMapDataset(
dataset_paths=[self.src_file_name], tokenizer=self.src_tokenizer, header_lines=0
)
self.tgt_indexed_dataset = TextMemMapDataset(
dataset_paths=[self.tgt_file_name], tokenizer=self.tgt_tokenizer, header_lines=0
)
assert len(self.src_indexed_dataset) == len(
self.tgt_indexed_dataset
), "src and tgt has different number of lines"
self._build_samples_mapping()
class BinarizedMemmapSequenceToSequenceDataset(IndexedSequenceToSequenceDataset):
"""Memory-mapped text sequence to sequence dataset. Operates pre-tokenized binarized data files."""
def __init__(
self,
src_dataset_prefix: str,
tgt_dataset_prefix: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
seed: int = 1234,
max_num_samples: int = None,
add_bos_to_enc: bool = True,
add_eos_to_enc: bool = True,
prepend_id: int = None,
):
"""
src_dataset_prefix: Path to the *prefix* of a single source bin/idx file on disk. This necessitates the existance src_file_prefix.bin and src_file_prefix.idx.
tgt_dataset_prefix: Path to the *prefix* of a single target aligned with source bin/idx file on disk. This necessitates the existance tgt_file_prefix.bin and tgt_file_prefix.idx.
src_tokenizer: Tokenizer for the source dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
tgt_tokenizer: Tokenizer for the target dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
max_src_seq_length: Maximum length of the source sequences. Lines above this length will be truncated.
max_tgt_seq_length: Maximum length of the target sequences. Lines above this length will be truncated.
seed: Random seed for data shuffling.
max_num_samples: Maximum number of samples to load. This can be > dataset length if you want to oversample data. If None, all samples will be loaded.
add_bos_to_enc: Add BOS token to the encoder input.
add_eos_to_enc: Add EOS token to the encoder input.
prepend_id: If not None, prepend this id to the encoder input.
"""
self.src_dataset_prefix = src_dataset_prefix
self.tgt_dataset_prefix = tgt_dataset_prefix
self.seed = seed
self.max_num_samples = max_num_samples
super().__init__(
src_file_name=src_dataset_prefix,
tgt_file_name=tgt_dataset_prefix,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
seed=seed,
max_num_samples=max_num_samples,
add_bos_to_enc=add_bos_to_enc,
add_eos_to_enc=add_eos_to_enc,
prepend_id=prepend_id,
)
def _check_files_exist(self):
if not os.path.exists(self.src_dataset_prefix + ".bin") or not os.path.exists(
self.src_dataset_prefix + ".idx"
):
raise FileNotFoundError(f"{self.src_dataset_prefix}.bin or {self.src_dataset_prefix}.idx not found")
if not os.path.exists(self.tgt_dataset_prefix + ".bin") or not os.path.exists(
self.tgt_dataset_prefix + ".idx"
):
raise FileNotFoundError(f"{self.tgt_dataset_prefix}.bin or {self.tgt_dataset_prefix}.idx not found")
def _get_examples(self):
self.src_indexed_dataset = self._get_indexed_dataset(
self.src_dataset_prefix, data_impl='mmap', skip_warmup=True
)
self.tgt_indexed_dataset = self._get_indexed_dataset(
self.tgt_dataset_prefix, data_impl='mmap', skip_warmup=True
)
assert len(self.src_indexed_dataset) == len(self.tgt_indexed_dataset)
self._build_samples_mapping()
def _get_indexed_dataset(self, data_prefix, data_impl, skip_warmup):
indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup)
return indexed_dataset
| NeMo-main | nemo/collections/nlp/data/common/sequence_to_sequence_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.text2sparql.text2sparql_dataset import Text2SparqlDataset
| NeMo-main | nemo/collections/nlp/data/text2sparql/__init__.py |
# Copyright (c) 2020, MeetKai Inc. All rights reserved.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from typing import Dict, List, Optional, Tuple
import numpy as np
from nemo.collections.common.tokenizers import AutoTokenizer
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, MaskType, NeuralType
__all__ = ["Text2SparqlDataset"]
class Text2SparqlDataset(Dataset):
"""A dataset class that converts raw data to a dataset that can be used by NeuralMachineTranslationModel.
Args:
filepath: .tsv file to sequence + label.
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
encoder_tokenizer: encoder tokenizer object such as AutoTokenizer
decoder_tokenizer: decoder tokenizer object. If using BART or end to end model, set this to encoder_tokenizer
max_seq_length: max sequence length including bos and eos tokens
num_samples: number of samples you want to use for the dataset. If -1, use all dataset. Useful for testing.
convert_labels: if true, converts labels for masked lm and updates pad_id to -100
for hf masked loss
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
"input_ids": NeuralType(("B", "T"), ChannelType()),
"attention_mask": NeuralType(("B", "T"), MaskType()),
"decoder_input_ids": NeuralType(("B", "T"), ChannelType()),
"lm_labels": NeuralType(("B", "T"), ChannelType()),
}
def __init__(
self,
filepath: str,
encoder_tokenizer: AutoTokenizer,
decoder_tokenizer: AutoTokenizer,
encoder_add_special_tokens: bool,
decoder_add_special_tokens: bool,
max_seq_length: int,
num_samples: int = -1,
convert_labels: bool = False,
):
self.filepath = filepath
self.encoder_tokenizer = encoder_tokenizer
self.decoder_tokenizer = decoder_tokenizer
self.encoder_add_special_tokens = encoder_add_special_tokens
self.decoder_add_special_tokens = decoder_add_special_tokens
self.max_seq_length = max_seq_length
self.num_samples = num_samples
self.convert_labels = convert_labels
if num_samples == 0:
raise ValueError("num_samples has to be positive.", num_samples)
if self.max_seq_length and self.max_seq_length <= 2:
self.max_seq_length = None
if not os.path.exists(filepath):
raise FileNotFoundError(
f"{filepath} not found. The filepath must be set in train_ds.filepath and validation_ds.filepath."
)
with open(filepath) as f:
lines = f.readlines()[1:]
if num_samples > 0:
lines = lines[:num_samples]
input_ids, input_masks, label_ids = [], [], []
for line in lines:
try:
sentence, label = line.split("\t")
except ValueError:
raise ValueError("Each line of input file should contain the format [sentence][tab][label].")
ids, mask = self.text_to_ids(
sentence, tokenizer=encoder_tokenizer, add_special_tokens=encoder_add_special_tokens
)
input_ids.append(ids)
input_masks.append(mask)
label_ids.append(
self.text_to_ids(label, tokenizer=decoder_tokenizer, add_special_tokens=decoder_add_special_tokens)[0]
)
self.input_ids = np.asarray(input_ids)
self.input_masks = np.asarray(input_masks)
self.label_ids = np.asarray(label_ids)
def text_to_ids(
self, text: str, tokenizer: AutoTokenizer, add_special_tokens=False
) -> Tuple[List[int], List[int]]:
"""Converts text to ids. Truncates and adds padding."""
text_tokens = tokenizer.text_to_ids(text.strip())
num_special_tokens = 2 if add_special_tokens else 0
if self.max_seq_length and self.max_seq_length > num_special_tokens:
text_tokens = text_tokens[: self.max_seq_length - num_special_tokens]
if add_special_tokens:
text_tokens = [tokenizer.bos_id] + text_tokens + [tokenizer.eos_id]
mask = [1] * len(text_tokens)
if self.max_seq_length and self.max_seq_length > num_special_tokens:
pad_length = self.max_seq_length - len(text_tokens)
text_tokens += [tokenizer.pad_id] * pad_length
mask += [0] * pad_length
return text_tokens, mask
def __len__(self):
return len(self.input_ids)
def convert_label_ids(self, label_ids: List[int]) -> Tuple[List[int], List[int]]:
decoder_input_ids = label_ids[:-1]
lm_labels = label_ids[1:].copy()
lm_labels[label_ids[1:] == self.decoder_tokenizer.pad_id] = -100 # for huggingface masked lm loss
return decoder_input_ids, lm_labels
def __getitem__(self, idx):
if self.convert_labels:
decoder_input_ids, lm_labels = self.convert_label_ids(self.label_ids[idx])
else:
decoder_input_ids = self.label_ids[idx]
lm_labels = self.label_ids[idx]
return self.input_ids[idx], self.input_masks[idx], decoder_input_ids, lm_labels
| NeMo-main | nemo/collections/nlp/data/text2sparql/text2sparql_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
from tqdm import tqdm
from transformers import PreTrainedTokenizerBase
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data.text_normalization import constants
from nemo.collections.nlp.data.text_normalization.utils import read_data_file
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['TextNormalizationTaggerDataset']
class TextNormalizationTaggerDataset(Dataset):
"""
Creates dataset to use to train a DuplexTaggerModel.
Converts from raw data to an instance that can be used by Dataloader.
For dataset to use to do end-to-end inference, see TextNormalizationTestDataset.
Args:
input_file: path to the raw data file (e.g., train.tsv). For more info about the data format, refer to the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
tokenizer: tokenizer of the model that will be trained on the dataset
tokenizer_name: name of the tokenizer,
mode: should be one of the values ['tn', 'itn', 'joint']. `tn` mode is for TN only. `itn` mode is for ITN only. `joint` is for training a system that can do both TN and ITN at the same time.
tagger_data_augmentation (bool): a flag indicates whether to augment the dataset with additional data instances
lang: language of the dataset
use_cache: Enables caching to use pickle format to store and read data from,
max_insts: Maximum number of instances (-1 means no limit)
"""
def __init__(
self,
input_file: str,
tokenizer: PreTrainedTokenizerBase,
tokenizer_name: str,
mode: str,
tagger_data_augmentation: bool,
lang: str,
max_seq_length: int,
use_cache: bool = False,
max_insts: int = -1,
):
assert mode in constants.MODES
assert lang in constants.SUPPORTED_LANGS
self.mode = mode
self.lang = lang
self.use_cache = use_cache
self.max_insts = max_insts
# Get cache path
data_dir, filename = os.path.split(input_file)
tokenizer_name_normalized = tokenizer_name.replace('/', '_')
cached_data_file = os.path.join(
data_dir, f'cached_tagger_{filename}_{tokenizer_name_normalized}_{lang}_{max_insts}_{max_seq_length}.pkl',
)
if use_cache and os.path.exists(cached_data_file):
logging.warning(
f"Processing of {input_file} is skipped as caching is enabled and a cache file "
f"{cached_data_file} already exists."
)
with open(cached_data_file, 'rb') as f:
data = pickle.load(f)
self.insts, self.tag2id, self.encodings, self.labels = data
else:
# Read the input raw data file, returns list of sentences parsed as list of class, w_words, s_words
raw_insts = read_data_file(input_file, lang=lang)
if max_insts >= 0:
raw_insts = raw_insts[:max_insts]
# Convert raw instances to TaggerDataInstance
insts = []
for (_, w_words, s_words) in tqdm(raw_insts):
for inst_dir in constants.INST_DIRECTIONS:
if inst_dir == constants.INST_BACKWARD and mode == constants.TN_MODE:
continue
if inst_dir == constants.INST_FORWARD and mode == constants.ITN_MODE:
continue
# filter out examples that are longer than the maximum sequence length value
if (
len(tokenizer(w_words, is_split_into_words=True, padding=False, truncation=True)['input_ids'])
>= max_seq_length
or len(
tokenizer(s_words, is_split_into_words=True, padding=False, truncation=True)['input_ids']
)
>= max_seq_length
):
continue
# Create a new TaggerDataInstance
inst = TaggerDataInstance(w_words, s_words, inst_dir, lang=self.lang)
insts.append(inst)
# Data Augmentation (if enabled)
if tagger_data_augmentation:
filtered_w_words, filtered_s_words = [], []
for ix, (w, s) in enumerate(zip(w_words, s_words)):
if not s in constants.SPECIAL_WORDS:
filtered_w_words.append(w)
filtered_s_words.append(s)
if len(filtered_s_words) > 1:
inst = TaggerDataInstance(filtered_w_words, filtered_s_words, inst_dir, lang)
insts.append(inst)
self.insts = insts
texts = [inst.input_words for inst in insts]
tags = [inst.labels for inst in insts]
# Tags Mapping
self.tag2id = {tag: id for id, tag in enumerate(constants.ALL_TAG_LABELS)}
# Finalize
self.encodings = tokenizer(texts, is_split_into_words=True, padding=False, truncation=True)
self.labels = self.encode_tags(tags, self.encodings)
# Write to cache (if use_cache)
if use_cache:
with open(cached_data_file, 'wb') as out_file:
data = self.insts, self.tag2id, self.encodings, self.labels
pickle.dump(data, out_file, protocol=pickle.HIGHEST_PROTOCOL)
def __getitem__(self, idx: int):
"""
Args:
idx: item index
Returns:
item: dictionary with input_ids and attention_mask as dictionary keys and the tensors at given idx as values
"""
item = {key: val[idx] for key, val in self.encodings.items()}
item['labels'] = self.labels[idx]
return item
def __len__(self):
return len(self.labels)
def encode_tags(self, tags, encodings):
encoded_labels = []
for i, label in enumerate(tags):
word_ids = encodings.word_ids(batch_index=i)
previous_word_idx = None
label_ids = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label
# to -100 (LABEL_PAD_TOKEN_ID) so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(constants.LABEL_PAD_TOKEN_ID)
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_id = self.tag2id[constants.B_PREFIX + label[word_idx]]
label_ids.append(label_id)
# We set the label for the other tokens in a word
else:
if 'SAME' in label[word_idx]:
label_id = self.tag2id[constants.B_PREFIX + label[word_idx]]
else:
label_id = self.tag2id[constants.I_PREFIX + label[word_idx]]
label_ids.append(label_id)
previous_word_idx = word_idx
encoded_labels.append(label_ids)
return encoded_labels
class TaggerDataInstance:
"""
This class represents a data instance in a TextNormalizationTaggerDataset.
Args:
w_words: List of words in a sentence in the written form
s_words: List of words in a sentence in the spoken form
direction: Indicates the direction of the instance (i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).
lang: Language
"""
def __init__(self, w_words, s_words, direction, lang):
# moses tokenization before LM tokenization
# e.g., don't -> don 't, 12/3 -> 12 / 3
processor = MosesProcessor(lang_id=lang)
# Build input_words and labels
input_words, labels = [], []
# Task Prefix
if direction == constants.INST_BACKWARD:
input_words.append(constants.ITN_PREFIX)
if direction == constants.INST_FORWARD:
input_words.append(constants.TN_PREFIX)
labels.append(constants.TASK_TAG)
# Main Content
for w_word, s_word in zip(w_words, s_words):
w_word = processor.tokenize(w_word)
if not s_word in constants.SPECIAL_WORDS:
s_word = processor.tokenize(s_word)
# Update input_words and labels
if s_word == constants.SIL_WORD and direction == constants.INST_BACKWARD:
continue
if s_word in constants.SPECIAL_WORDS:
input_words.append(w_word)
labels.append(constants.SAME_TAG)
else:
if direction == constants.INST_BACKWARD:
input_words.append(s_word)
if direction == constants.INST_FORWARD:
input_words.append(w_word)
labels.append(constants.TRANSFORM_TAG)
self.input_words = input_words
self.labels = labels
| NeMo-main | nemo/collections/nlp/data/text_normalization/tagger_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DECODE_CTX_SIZE = 3 # the size of the input context to be provided to the DuplexDecoderModel
LABEL_PAD_TOKEN_ID = -100
# Split names
TRAIN, DEV, TEST = 'train', 'dev', 'test'
SPLIT_NAMES = [TRAIN, DEV, TEST]
# Languages
ENGLISH = 'en'
RUSSIAN = 'ru'
GERMAN = 'de'
MULTILINGUAL = 'multilingual'
SUPPORTED_LANGS = [ENGLISH, RUSSIAN, GERMAN, MULTILINGUAL]
# Task Prefixes
ITN_TASK = 0
TN_TASK = 1
ITN_PREFIX = str(ITN_TASK)
TN_PREFIX = str(TN_TASK)
# Tagger Labels Prefixes
B_PREFIX = 'B-' # Denote beginning
I_PREFIX = 'I-' # Denote middle
TAGGER_LABELS_PREFIXES = [B_PREFIX, I_PREFIX]
# Modes
TN_MODE = 'tn'
ITN_MODE = 'itn'
JOINT_MODE = 'joint'
MODES = [TN_MODE, ITN_MODE, JOINT_MODE]
TASK_ID_TO_MODE = {ITN_TASK: ITN_MODE, TN_TASK: TN_MODE}
MODE_TO_TASK_ID = {v: k for k, v in TASK_ID_TO_MODE.items()}
# Instance Directions
INST_BACKWARD = 'BACKWARD'
INST_FORWARD = 'FORWARD'
INST_DIRECTIONS = [INST_BACKWARD, INST_FORWARD]
DIRECTIONS_TO_ID = {INST_BACKWARD: ITN_TASK, INST_FORWARD: TN_TASK}
DIRECTIONS_ID_TO_NAME = {ITN_TASK: INST_BACKWARD, TN_TASK: INST_FORWARD}
DIRECTIONS_TO_MODE = {ITN_MODE: INST_BACKWARD, TN_MODE: INST_FORWARD}
# TAGS
SAME_TAG = 'SAME' # Tag indicates that a token can be kept the same without any further transformation
TASK_TAG = 'TASK' # Tag indicates that a token belongs to a task prefix (the prefix indicates whether the current task is TN or ITN)
PUNCT_TAG = 'PUNCT' # Tag indicates that a token is a punctuation
TRANSFORM_TAG = 'TRANSFORM' # Tag indicates that a token needs to be transformed by the decoder
ALL_TAGS = [TASK_TAG, SAME_TAG, TRANSFORM_TAG]
# ALL_TAG_LABELS
ALL_TAG_LABELS = []
for prefix in TAGGER_LABELS_PREFIXES:
for tag in ALL_TAGS:
ALL_TAG_LABELS.append(prefix + tag)
ALL_TAG_LABELS.sort()
LABEL_IDS = {l: idx for idx, l in enumerate(ALL_TAG_LABELS)}
# Special Words
SIL_WORD = 'sil'
SELF_WORD = '<self>'
SPECIAL_WORDS = [SIL_WORD, SELF_WORD]
# IDs for special tokens for encoding inputs of the decoder models
EXTRA_ID_0 = '<extra_id_0>'
EXTRA_ID_1 = '<extra_id_1>'
EN_GREEK_TO_SPOKEN = {
'Τ': 'tau',
'Ο': 'omicron',
'Δ': 'delta',
'Η': 'eta',
'Κ': 'kappa',
'Ι': 'iota',
'Θ': 'theta',
'Α': 'alpha',
'Σ': 'sigma',
'Υ': 'upsilon',
'Μ': 'mu',
'Χ': 'chi',
'Π': 'pi',
'Ν': 'nu',
'Λ': 'lambda',
'Γ': 'gamma',
'Β': 'beta',
'Ρ': 'rho',
'τ': 'tau',
'υ': 'upsilon',
'φ': 'phi',
'α': 'alpha',
'λ': 'lambda',
'ι': 'iota',
'ς': 'sigma',
'ο': 'omicron',
'σ': 'sigma',
'η': 'eta',
'π': 'pi',
'ν': 'nu',
'γ': 'gamma',
'κ': 'kappa',
'ε': 'epsilon',
'β': 'beta',
'ρ': 'rho',
'ω': 'omega',
'χ': 'chi',
}
| NeMo-main | nemo/collections/nlp/data/text_normalization/constants.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.text_normalization.decoder_dataset import TextNormalizationDecoderDataset
from nemo.collections.nlp.data.text_normalization.tagger_dataset import TextNormalizationTaggerDataset
from nemo.collections.nlp.data.text_normalization.test_dataset import TextNormalizationTestDataset
| NeMo-main | nemo/collections/nlp/data/text_normalization/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import pickle
import random
from collections import OrderedDict
from typing import List, Optional, Tuple
import braceexpand
import numpy as np
import torch
import webdataset as wd
from torch.utils.data import IterableDataset
from tqdm import tqdm
from transformers import PreTrainedTokenizerBase
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data.text_normalization import constants
from nemo.collections.nlp.data.text_normalization.utils import read_data_file
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['TextNormalizationDecoderDataset', 'TarredTextNormalizationDecoderDataset']
class TextNormalizationDecoderDataset(Dataset):
"""
Creates dataset to use to train a DuplexDecoderModel.
Converts from raw data to an instance that can be used by Dataloader.
For dataset to use to do end-to-end inference, see TextNormalizationTestDataset.
Args:
input_file: path to the raw data file (e.g., train.tsv).
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization/nn_text_normalization.rst>`.
raw_instances: processed raw instances in the Google TN dataset format (used for tarred dataset)
tokenizer: tokenizer of the model that will be trained on the dataset
tokenizer_name: name of the tokenizer,
mode: should be one of the values ['tn', 'itn', 'joint']. `tn` mode is for TN only.
`itn` mode is for ITN only. `joint` is for training a system that can do both TN and ITN at the same time.
max_len: maximum length of sequence in tokens. The code will discard any training instance whose input or
output is longer than the specified max_len.
decoder_data_augmentation (bool): a flag indicates whether to augment the dataset with additional data
instances that may help the decoder become more robust against the tagger's errors.
Refer to the doc for more info.
lang: language of the dataset
use_cache: Enables caching to use pickle format to store and read data from
max_insts: Maximum number of instances (-1 means no limit)
do_tokenize: Tokenize each instance (set to False for Tarred dataset)
initial_shuffle: Set to True to shuffle the data
"""
def __init__(
self,
input_file: str,
tokenizer: PreTrainedTokenizerBase,
tokenizer_name: str,
raw_instances: Optional[List[List[str]]] = None,
mode: str = "joint",
max_len: int = 512,
decoder_data_augmentation: bool = False,
lang: str = "en",
use_cache: bool = False,
max_insts: int = -1,
do_tokenize: bool = True,
initial_shuffle: bool = False,
):
assert mode in constants.MODES
assert lang in constants.SUPPORTED_LANGS
self.mode = mode
self.lang = lang
self.use_cache = use_cache
self.max_insts = max_insts
self.tokenizer = tokenizer
self.max_seq_len = max_len
self.mode = mode
# Get cache path
data_dir, filename = os.path.split(input_file)
tokenizer_name_normalized = tokenizer_name.replace('/', '_')
cached_data_file = os.path.join(
data_dir, f'cached_decoder_{filename}_{tokenizer_name_normalized}_{lang}_{max_insts}_{mode}_{max_len}.pkl',
)
if use_cache and os.path.exists(cached_data_file):
logging.warning(
f"Processing of {input_file} is skipped as caching is enabled and a cache file "
f"{cached_data_file} already exists."
)
with open(cached_data_file, 'rb') as f:
data = pickle.load(f)
self.insts, self.inputs, self.examples, self.tn_count, self.itn_count, self.label_ids_semiotic = data
else:
if raw_instances is None:
raw_instances = read_data_file(fp=input_file, lang=self.lang, max_insts=max_insts)
else:
raw_instances = raw_instances[:max_insts]
if initial_shuffle:
random.shuffle(raw_instances)
logging.debug(f"Converting raw instances to DecoderDataInstance for {input_file}...")
self.insts, all_semiotic_classes = self.__process_raw_entries(
raw_instances, decoder_data_augmentation=decoder_data_augmentation
)
logging.debug(
f"Extracted {len(self.insts)} DecoderDateInstances out of {len(raw_instances)} raw instances."
)
self.label_ids_semiotic = OrderedDict({l: idx for idx, l in enumerate(all_semiotic_classes)})
logging.debug(f'Label_ids: {self.label_ids_semiotic}')
# save labels list from the training file to the input_file to the file
dir_name, file_name = os.path.split(input_file)
if 'train' in file_name:
with open(os.path.join(dir_name, f"label_ids_{file_name}"), 'w') as f:
f.write('\n'.join(self.label_ids_semiotic.keys()))
if do_tokenize:
logging.debug(f'Processing samples, total number: {len(self.insts)}')
self.__tokenize_samples(use_cache=use_cache, cached_data_file=cached_data_file)
def __process_raw_entries(self, raw_instances: List[Tuple[str]], decoder_data_augmentation):
"""
Converts raw instances to DecoderDataInstance
raw_instances: raw entries: (semiotic class, written words, spoken words)
decoder_data_augmentation (bool): a flag indicates whether to augment the dataset with additional data
instances that may help the decoder become more robust against the tagger's errors.
Refer to the doc for more info.
Returns:
converted instances and all semiotic classes present in the data
"""
all_semiotic_classes = set([])
insts = []
for (classes, w_words, s_words) in tqdm(raw_instances):
for ix, (_class, w_word, s_word) in enumerate(zip(classes, w_words, s_words)):
all_semiotic_classes.update([_class])
if s_word in constants.SPECIAL_WORDS:
continue
for inst_dir in constants.INST_DIRECTIONS:
if inst_dir == constants.INST_BACKWARD and self.mode == constants.TN_MODE:
continue
if inst_dir == constants.INST_FORWARD and self.mode == constants.ITN_MODE:
continue
# Create a DecoderDataInstance
inst = DecoderDataInstance(
w_words, s_words, inst_dir, start_idx=ix, end_idx=ix + 1, lang=self.lang, semiotic_class=_class
)
insts.append(inst)
if decoder_data_augmentation:
noise_left = random.randint(1, 2)
noise_right = random.randint(1, 2)
inst = DecoderDataInstance(
w_words,
s_words,
inst_dir,
start_idx=ix - noise_left,
end_idx=ix + 1 + noise_right,
semiotic_class=_class,
lang=self.lang,
)
insts.append(inst)
all_semiotic_classes = list(all_semiotic_classes)
all_semiotic_classes.sort()
return insts, all_semiotic_classes
def __tokenize_samples(self, use_cache: bool = False, cached_data_file: str = None):
"""
Tokenizes the entries, samples longer than max_seq_len are discarded
Args:
use_cache: Enables caching to use pickle format to store and read data from
cached_data_file: path the cache file
"""
inputs = [inst.input_str.strip() for inst in self.insts]
inputs_center = [inst.input_center_str.strip() for inst in self.insts]
targets = [inst.output_str.strip() for inst in self.insts]
classes = [self.label_ids_semiotic[inst.semiotic_class] for inst in self.insts]
directions = [constants.DIRECTIONS_TO_ID[inst.direction] for inst in self.insts]
# Tokenization
self.inputs, self.examples, _inputs_center = [], [], []
self.tn_count, self.itn_count, long_examples_filtered = 0, 0, 0
input_max_len, target_max_len = 0, 0
for idx in tqdm(range(len(inputs))):
# Input
_input = self.tokenizer([inputs[idx]])
input_len = len(_input['input_ids'][0])
if input_len > self.max_seq_len:
long_examples_filtered += 1
continue
# Target
_target = self.tokenizer([targets[idx]])
target_len = len(_target['input_ids'][0])
if target_len > self.max_seq_len:
long_examples_filtered += 1
continue
# Update
self.inputs.append(inputs[idx])
_input['labels'] = _target['input_ids']
_input['semiotic_class_id'] = [[classes[idx]]]
_input['direction'] = [[directions[idx]]]
_inputs_center.append(inputs_center[idx])
self.examples.append(_input)
if inputs[idx].startswith(constants.TN_PREFIX):
self.tn_count += 1
if inputs[idx].startswith(constants.ITN_PREFIX):
self.itn_count += 1
input_max_len = max(input_max_len, input_len)
target_max_len = max(target_max_len, target_len)
logging.info(f'long_examples_filtered: {long_examples_filtered}')
logging.info(f'input_max_len: {input_max_len} | target_max_len: {target_max_len}')
# we need to pad input_center, so we first collect all values, and then batch_tokenize with padding
_input_centers = self.tokenizer(_inputs_center, padding=True)
for idx in range(len(self.examples)):
self.examples[idx]['input_center'] = [_input_centers['input_ids'][idx]]
# Write to cache (if use_cache)
if use_cache:
with open(cached_data_file, 'wb') as out_file:
data = (
self.insts,
self.inputs,
self.examples,
self.tn_count,
self.itn_count,
self.label_ids_semiotic,
)
pickle.dump(data, out_file, protocol=pickle.HIGHEST_PROTOCOL)
def __getitem__(self, idx):
"""
Returns a dataset item
Args:
idx: ID of the item
Returns:
A dictionary that represents the item, the dictionary contains the following fields:
input_ids: input ids
attention_mask: attention mask
labels: ground truth labels
semiotic_class_id: id of the semiotic class of the example
direction: id of the TN/ITN tast (see constants for the values)
inputs_center: ids of input center (only semiotic span, no special tokens and context)
"""
example = self.examples[idx]
item = {key: val[0] for key, val in example.items()}
return item
def __len__(self):
return len(self.examples)
def batchify(self, batch_size: int):
"""
Creates a batch
Args:
batch_size: the size of the batch
"""
logging.info("Padding the data and creating batches...")
long_examples_filtered = 0
inputs_all = [inst.input_str.strip() for inst in self.insts]
targets_all = [inst.output_str.strip() for inst in self.insts]
batch, batches = [], []
for idx in tqdm(range(len(self.insts))):
# exclude examples that are longer than maximum sequence length
# Input
_input = self.tokenizer([inputs_all[idx]])
input_len = len(_input['input_ids'][0])
if input_len > self.max_seq_len:
long_examples_filtered += 1
continue
# Target
_target = self.tokenizer([targets_all[idx]])
target_len = len(_target['input_ids'][0])
if target_len > self.max_seq_len:
long_examples_filtered += 1
continue
batch.append(self.insts[idx])
if len(batch) == batch_size:
inputs = [inst.input_str.strip() for inst in batch]
inputs_center = [inst.input_center_str.strip() for inst in batch]
targets = [inst.output_str.strip() for inst in batch]
# Here we assume that every input_file contains examples from every semiotic class
classes = [[self.label_ids_semiotic[inst.semiotic_class]] for inst in batch]
directions = [[constants.DIRECTIONS_TO_ID[inst.direction]] for inst in batch]
batch = self.tokenizer(inputs, padding=True)
batch['input_center'] = self.tokenizer(inputs_center, padding=True)['input_ids']
batch['direction'] = directions
batch['semiotic_class_id'] = classes
labels = self.tokenizer(targets, padding=True)['input_ids']
batch['decoder_input_ids'] = np.insert(
[x[:-1] for x in labels], 0, self.tokenizer.pad_token_id, axis=-1
)
# use LABEL_PAD_TOKEN_ID to disregard padded values for the loss calculations
batch['labels'] = [[x if x != 0 else constants.LABEL_PAD_TOKEN_ID for x in l] for l in labels]
batches.append(batch)
batch = []
logging.info(f'long_examples_filtered: {long_examples_filtered}')
self.batches = batches
class DecoderDataInstance:
"""
This class represents a data instance in a TextNormalizationDecoderDataset.
Intuitively, each data instance can be thought as having the following form:
Input: <Left Context of Input> <Input Span> <Right Context of Input>
Output: <Output Span>
where the context size is determined by the constant DECODE_CTX_SIZE.
Args:
w_words: List of words in the written form
s_words: List of words in the spoken form
inst_dir: Indicates the direction of the instance (i.e., INST_BACKWARD for ITN or INST_FORWARD for TN).
start_idx: The starting index of the input span in the original input text
end_idx: The ending index of the input span (exclusively)
lang: Language of the instance
semiotic_class: The semiotic class of the input span (can be set to None if not available)
"""
def __init__(
self,
w_words: List[str],
s_words: List[str],
inst_dir: str,
start_idx: int,
end_idx: int,
lang: str,
semiotic_class: str = None,
):
processor = MosesProcessor(lang_id=lang)
start_idx = max(start_idx, 0)
end_idx = min(end_idx, len(w_words))
ctx_size = constants.DECODE_CTX_SIZE
extra_id_0 = constants.EXTRA_ID_0
extra_id_1 = constants.EXTRA_ID_1
# Extract center words
c_w_words = w_words[start_idx:end_idx]
c_s_words = s_words[start_idx:end_idx]
# Extract context
w_left = w_words[max(0, start_idx - ctx_size) : start_idx]
w_right = w_words[end_idx : end_idx + ctx_size]
s_left = s_words[max(0, start_idx - ctx_size) : start_idx]
s_right = s_words[end_idx : end_idx + ctx_size]
# Process sil words and self words
for jx in range(len(s_left)):
if s_left[jx] == constants.SIL_WORD:
s_left[jx] = ''
if s_left[jx] == constants.SELF_WORD:
s_left[jx] = w_left[jx]
for jx in range(len(s_right)):
if s_right[jx] == constants.SIL_WORD:
s_right[jx] = ''
if s_right[jx] == constants.SELF_WORD:
s_right[jx] = w_right[jx]
for jx in range(len(c_s_words)):
if c_s_words[jx] == constants.SIL_WORD:
c_s_words[jx] = c_w_words[jx]
if inst_dir == constants.INST_BACKWARD:
c_w_words[jx] = ''
c_s_words[jx] = ''
if c_s_words[jx] == constants.SELF_WORD:
c_s_words[jx] = c_w_words[jx]
# Extract input_words and output_words
c_w_words = processor.tokenize(' '.join(c_w_words)).split()
c_s_words = processor.tokenize(' '.join(c_s_words)).split()
# for cases when nearby words are actually multiple tokens, e.g. '1974,'
w_left = processor.tokenize(' '.join(w_left)).split()[-constants.DECODE_CTX_SIZE :]
w_right = processor.tokenize(' '.join(w_right)).split()[: constants.DECODE_CTX_SIZE]
w_input = w_left + [extra_id_0] + c_w_words + [extra_id_1] + w_right
s_input = s_left + [extra_id_0] + c_s_words + [extra_id_1] + s_right
if inst_dir == constants.INST_BACKWARD:
input_center_words = c_s_words
input_words = [constants.ITN_PREFIX] + s_input
output_words = c_w_words
if inst_dir == constants.INST_FORWARD:
input_center_words = c_w_words
input_words = [constants.TN_PREFIX] + w_input
output_words = c_s_words
# Finalize
self.input_str = ' '.join(input_words)
self.input_center_str = ' '.join(input_center_words)
self.output_str = ' '.join(output_words)
self.direction = inst_dir
self.semiotic_class = semiotic_class
class TarredTextNormalizationDecoderDataset(IterableDataset):
"""
A similar Dataset to the TextNormalizationDecoderDataset, but which loads tarred tokenized pickle files.
Accepts a single JSON metadata file containing the total number of batches
as well as the path(s) to the tarball(s) containing the pickled dataset batch files.
Valid formats for the text_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/text.tar' or 'path/to/text_{1..100}.tar', or
(2) a list of file paths that will not be brace-expanded, e.g. ['text_1.tar', 'text_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
Additionally, please note that the len() of this DataLayer is assumed to be the number of tokens
of the text data. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
text_tar_filepaths: Either a list of tokenized text tarball filepaths, or a string (can be brace-expandable).
num_batches: total number of batches
shuffle_n: How many samples to look ahead and load to be shuffled.See WebDataset documentation for more details.
shard_strategy: Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank: Worker rank, used for partitioning shards.
world_size: Total number of processes, used for partitioning shards.
"""
def __init__(
self,
text_tar_filepaths: str,
num_batches: int,
shuffle_n: int = 0,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 1,
):
super(TarredTextNormalizationDecoderDataset, self).__init__()
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(
f"Invalid shard strategy of type {type(shard_strategy)} "
f"{repr(shard_strategy) if len(repr(shard_strategy)) < 100 else repr(shard_strategy)[:100] + '...'}! "
f"Allowed values are: {valid_shard_strategies}."
)
if isinstance(text_tar_filepaths, str):
# Replace '(', '[', '<' and '_OP_' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "{")
# Replace ')', ']', '>' and '_CL_' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "}")
if isinstance(text_tar_filepaths, str):
# Brace expand
text_tar_filepaths = list(braceexpand.braceexpand(text_tar_filepaths))
if shard_strategy == 'scatter':
logging.info("Tarred dataset shards will be scattered evenly across all nodes.")
if len(text_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(text_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size}). "
f"Some shards will not be used ({len(text_tar_filepaths) % world_size})."
)
batches_per_tar = num_batches // len(text_tar_filepaths)
begin_idx = (len(text_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(text_tar_filepaths) // world_size)
logging.info('Begin Index : %d' % (begin_idx))
logging.info('End Index : %d' % (end_idx))
text_tar_filepaths = text_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
self.length = batches_per_tar * len(text_tar_filepaths) * world_size
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
self.length = num_batches
else:
raise ValueError(f"Invalid shard strategy! Allowed values are: {valid_shard_strategies}")
# Put together WebDataset
self._dataset = wd.WebDataset(urls=text_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.rename(pkl='pkl', key='__key__').to_tuple('pkl', 'key').map(f=self._build_sample)
def _build_sample(self, fname):
# Load file
pkl_file, _ = fname
pkl_file = io.BytesIO(pkl_file)
data = pickle.load(pkl_file) # loads np.int64 vector
pkl_file.close()
data = {k: torch.tensor(v) for k, v in data.items()}
return data
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return self.length
| NeMo-main | nemo/collections/nlp/data/text_normalization/decoder_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from copy import deepcopy
import regex as re
from tqdm import tqdm
from nemo.collections.nlp.data.text_normalization import constants
__all__ = [
'read_data_file',
'normalize_str',
'flatten',
'convert_fraction',
'convert_superscript',
'add_space_around_dash',
]
def flatten(l):
""" flatten a list of lists """
return [item for sublist in l for item in sublist]
def add_space_around_dash(input: str):
""" adds space around dash between numbers and non-numbers"""
input = re.sub(r"([^\s0-9])-([0-9])", r"\1 - \2", input)
input = re.sub(r"([0-9])-([^\s0-9])", r"\1 - \2", input)
input = re.sub(r"([^\s0-9])-([0-9])", r"\1 - \2", input)
input = re.sub(r"([0-9])-([^\s0-9])", r"\1 - \2", input)
return input
def convert_superscript(written: str):
"""convert superscript to regular character"""
written = re.sub("²", "2", written)
written = re.sub("³", "3", written)
return written
def convert_fraction(written: str):
"""
converts fraction to standard form, e.g "½" -> "1/2", "1 ½" -> "1 1/2"
Args:
written: written form
Returns:
written: modified form
"""
written = re.sub(" ½", " 1/2", written)
written = re.sub(" ⅓", " 1/3", written)
written = re.sub(" ⅔", " 2/3", written)
written = re.sub(" ¼", " 1/4", written)
written = re.sub(" ¾", " 3/4", written)
written = re.sub(" ⅕", " 1/5", written)
written = re.sub(" ⅖", " 2/5", written)
written = re.sub(" ⅗", " 3/5", written)
written = re.sub(" ⅘", " 4/5", written)
written = re.sub(" ⅙", " 1/6", written)
written = re.sub(" ⅚", " 5/6", written)
written = re.sub(" ⅛", " 1/8", written)
written = re.sub(" ⅜", " 3/8", written)
written = re.sub(" ⅝", " 5/8", written)
written = re.sub(" ⅞", " 7/8", written)
written = re.sub("^½", "1/2", written)
written = re.sub("^⅓", "1/3", written)
written = re.sub("^⅔", "2/3", written)
written = re.sub("^¼", "1/4", written)
written = re.sub("^¾", "3/4", written)
written = re.sub("^⅕", "1/5", written)
written = re.sub("^⅖", "2/5", written)
written = re.sub("^⅗", "3/5", written)
written = re.sub("^⅘", "4/5", written)
written = re.sub("^⅙", "1/6", written)
written = re.sub("^⅚", "5/6", written)
written = re.sub("^⅛", "1/8", written)
written = re.sub("^⅜", "3/8", written)
written = re.sub("^⅝", "5/8", written)
written = re.sub("^⅞", "7/8", written)
written = re.sub("-½", "-1/2", written)
written = re.sub("-⅓", "-1/3", written)
written = re.sub("-⅔", "-2/3", written)
written = re.sub("-¼", "-1/4", written)
written = re.sub("-¾", "-3/4", written)
written = re.sub("-⅕", "-1/5", written)
written = re.sub("-⅖", "-2/5", written)
written = re.sub("-⅗", "-3/5", written)
written = re.sub("-⅘", "-4/5", written)
written = re.sub("-⅙", "-1/6", written)
written = re.sub("-⅚", "-5/6", written)
written = re.sub("-⅛", "-1/8", written)
written = re.sub("-⅜", "-3/8", written)
written = re.sub("-⅝", "-5/8", written)
written = re.sub("-⅞", "-7/8", written)
written = re.sub("([0-9])\s?½", "\\1 1/2", written)
written = re.sub("([0-9])\s?⅓", "\\1 1/3", written)
written = re.sub("([0-9])\s?⅔", "\\1 2/3", written)
written = re.sub("([0-9])\s?¼", "\\1 1/4", written)
written = re.sub("([0-9])\s?¾", "\\1 3/4", written)
written = re.sub("([0-9])\s?⅕", "\\1 1/5", written)
written = re.sub("([0-9])\s?⅖", "\\1 2/5", written)
written = re.sub("([0-9])\s?⅗", "\\1 3/5", written)
written = re.sub("([0-9])\s?⅘", "\\1 4/5", written)
written = re.sub("([0-9])\s?⅙", "\\1 1/6", written)
written = re.sub("([0-9])\s?⅚", "\\1 5/6", written)
written = re.sub("([0-9])\s?⅛", "\\1 1/8", written)
written = re.sub("([0-9])\s?⅜", "\\1 3/8", written)
written = re.sub("([0-9])\s?⅝", "\\1 5/8", written)
written = re.sub("([0-9])\s?⅞", "\\1 7/8", written)
return written
def input_preprocessing(sent: str, lang: str):
""" Function for preprocessing the input texts. The function first does
some basic tokenization. For English, it then also processes Greek letters
such as Δ or λ (if any).
Args:
sent: input text.
lang: language
Returns: preprocessed input text.
"""
# Basic Preprocessing and Tokenization
if lang == constants.ENGLISH:
sent = sent.replace('+', ' plus ')
sent = sent.replace('=', ' equals ')
sent = sent.replace('@', ' at ')
sent = sent.replace('*', ' times ')
# Greek letters processing
for jx, tok in enumerate(sent):
if tok in constants.EN_GREEK_TO_SPOKEN:
sent = sent[:jx] + constants.EN_GREEK_TO_SPOKEN[tok] + sent[jx + 1 :]
sent = convert_superscript(sent)
sent = convert_fraction(sent)
sent = add_space_around_dash(sent)
return sent
def read_data_file(fp: str, lang: str, max_insts: int = -1):
""" Reading the raw data from a file of NeMo format
For more info about the data format, refer to the
`text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
Args:
fp: file paths
lang: language
max_insts: Maximum number of instances (-1 means no limit)
Returns:
insts: List of sentences parsed as list of words
"""
insts, w_words, s_words, classes = [], [], [], []
# Read input file
with open(fp, 'r', encoding='utf-8') as f:
for line in tqdm(f):
es = [e.strip() for e in input_preprocessing(line.strip(), lang=lang).split('\t')]
if es[0] == '<eos>':
inst = (deepcopy(classes), deepcopy(w_words), deepcopy(s_words))
insts.append(inst)
# Reset
w_words, s_words, classes = [], [], []
if max_insts > 0 and len(insts) >= max_insts:
break
else:
classes.append(es[0])
w_words.append(es[1])
s_words.append(es[2])
return insts
def normalize_str(input_str):
""" Normalize an input string """
return input_str.strip().lower().replace(" ", " ")
def remove_puncts(input_str):
""" Remove punctuations from an input string """
return input_str.translate(str.maketrans('', '', string.punctuation))
| NeMo-main | nemo/collections/nlp/data/text_normalization/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from typing import List
from nemo.collections.common.tokenizers.moses_tokenizers import MosesProcessor
from nemo.collections.nlp.data.text_normalization import constants
from nemo.collections.nlp.data.text_normalization.utils import normalize_str, read_data_file, remove_puncts
from nemo.utils import logging
__all__ = ['TextNormalizationTestDataset']
# Test Dataset
class TextNormalizationTestDataset:
"""
Creates dataset to use to do end-to-end inference
Args:
input_file: path to the raw data file (e.g., train.tsv). For more info about the data format, refer to the `text_normalization doc <https://github.com/NVIDIA/NeMo/blob/main/docs/source/nlp/text_normalization.rst>`.
mode: should be one of the values ['tn', 'itn', 'joint']. `tn` mode is for TN only. `itn` mode is for ITN only. `joint` is for training a system that can do both TN and ITN at the same time.
lang: Language of the dataset
"""
def __init__(self, input_file: str, mode: str, lang: str):
self.lang = lang
insts = read_data_file(input_file, lang=lang)
processor = MosesProcessor(lang_id=lang)
# Build inputs and targets
self.directions, self.inputs, self.targets, self.classes, self.nb_spans, self.span_starts, self.span_ends = (
[],
[],
[],
[],
[],
[],
[],
)
for (classes, w_words, s_words) in insts:
# Extract words that are not punctuations
for direction in constants.INST_DIRECTIONS:
if direction == constants.INST_BACKWARD:
if mode == constants.TN_MODE:
continue
# ITN mode
(
processed_w_words,
processed_s_words,
processed_classes,
processed_nb_spans,
processed_s_span_starts,
processed_s_span_ends,
) = ([], [], [], 0, [], [])
s_word_idx = 0
for cls, w_word, s_word in zip(classes, w_words, s_words):
if s_word == constants.SIL_WORD:
continue
elif s_word == constants.SELF_WORD:
processed_s_words.append(w_word)
else:
processed_s_words.append(s_word)
s_word_last = processor.tokenize(processed_s_words.pop()).split()
processed_s_words.append(" ".join(s_word_last))
num_tokens = len(s_word_last)
processed_nb_spans += 1
processed_classes.append(cls)
processed_s_span_starts.append(s_word_idx)
s_word_idx += num_tokens
processed_s_span_ends.append(s_word_idx)
processed_w_words.append(w_word)
self.span_starts.append(processed_s_span_starts)
self.span_ends.append(processed_s_span_ends)
self.classes.append(processed_classes)
self.nb_spans.append(processed_nb_spans)
input_words = ' '.join(processed_s_words)
# Update self.directions, self.inputs, self.targets
self.directions.append(direction)
self.inputs.append(input_words)
self.targets.append(
processed_w_words
) # is list of lists where inner list contains target tokens (not words)
# TN mode
elif direction == constants.INST_FORWARD:
if mode == constants.ITN_MODE:
continue
(
processed_w_words,
processed_s_words,
processed_classes,
processed_nb_spans,
w_span_starts,
w_span_ends,
) = ([], [], [], 0, [], [])
w_word_idx = 0
for cls, w_word, s_word in zip(classes, w_words, s_words):
# TN forward mode
# this is done for cases like `do n't`, this w_word will be treated as 2 tokens
w_word = processor.tokenize(w_word).split()
num_tokens = len(w_word)
if s_word in constants.SPECIAL_WORDS:
processed_s_words.append(" ".join(w_word))
else:
processed_s_words.append(s_word)
w_span_starts.append(w_word_idx)
w_word_idx += num_tokens
w_span_ends.append(w_word_idx)
processed_nb_spans += 1
processed_classes.append(cls)
processed_w_words.extend(w_word)
self.span_starts.append(w_span_starts)
self.span_ends.append(w_span_ends)
self.classes.append(processed_classes)
self.nb_spans.append(processed_nb_spans)
input_words = ' '.join(processed_w_words)
# Update self.directions, self.inputs, self.targets
self.directions.append(direction)
self.inputs.append(input_words)
self.targets.append(
processed_s_words
) # is list of lists where inner list contains target tokens (not words)
self.examples = list(
zip(
self.directions,
self.inputs,
self.targets,
self.classes,
self.nb_spans,
self.span_starts,
self.span_ends,
)
)
def __getitem__(self, idx):
return self.examples[idx]
def __len__(self):
return len(self.inputs)
@staticmethod
def is_same(pred: str, target: str, inst_dir: str):
"""
Function for checking whether the predicted string can be considered
the same as the target string
Args:
pred: Predicted string
target: Target string
inst_dir: Direction of the instance (i.e., INST_BACKWARD or INST_FORWARD).
Return: an int value (0/1) indicating whether pred and target are the same.
"""
if inst_dir == constants.INST_BACKWARD:
pred = remove_puncts(pred)
target = remove_puncts(target)
pred = normalize_str(pred)
target = normalize_str(target)
return int(pred == target)
@staticmethod
def compute_sent_accuracy(preds: List[str], targets: List[str], inst_directions: List[str]):
"""
Compute the sentence accuracy metric.
Args:
preds: List of predicted strings.
targets: List of target strings.
inst_directions: A list of str where each str indicates the direction of the corresponding instance (i.e., INST_BACKWARD or INST_FORWARD).
Return: the sentence accuracy score
"""
assert len(preds) == len(targets)
if len(targets) == 0:
return 'NA'
# Sentence Accuracy
correct_count = 0
for inst_dir, pred, target in zip(inst_directions, preds, targets):
correct_count += TextNormalizationTestDataset.is_same(pred, target, inst_dir)
sent_accuracy = correct_count / len(targets)
return sent_accuracy
@staticmethod
def compute_class_accuracy(
inputs: List[List[str]],
targets: List[List[str]],
tag_preds: List[List[str]],
inst_directions: List[str],
output_spans: List[List[str]],
classes: List[List[str]],
nb_spans: List[int],
span_ends: List[List[int]],
) -> dict:
"""
Compute the class based accuracy metric. This uses model's predicted tags.
Args:
inputs: List of lists where inner list contains words of input text
targets: List of lists where inner list contains target strings grouped by class boundary
tag_preds: List of lists where inner list contains predicted tags for each of the input words
inst_directions: A list of str where each str indicates the direction of the corresponding instance (i.e., INST_BACKWARD or INST_FORWARD).
output_spans: A list of lists where each inner list contains the decoded spans for the corresponding input sentence
classes: A list of lists where inner list contains the class for each semiotic token in input sentence
nb_spans: A list that contains the number of tokens in the input
span_ends: A list of lists where inner list contains the end word index of the current token
Return: the class accuracy scores as dict
"""
if len(targets) == 0:
return 'NA'
class2stats, class2correct = defaultdict(int), defaultdict(int)
for ix, (sent, tags) in enumerate(zip(inputs, tag_preds)):
try:
assert len(sent) == len(tags)
except:
logging.warning(f"Error: skipping example {ix}")
continue
cur_words = [[] for _ in range(nb_spans[ix])]
jx, span_idx = 0, 0
cur_spans = output_spans[ix]
class_idx = 0
if classes[ix]:
class2stats[classes[ix][class_idx]] += 1
while jx < len(sent):
tag, word = tags[jx], sent[jx]
while jx >= span_ends[ix][class_idx]:
class_idx += 1
class2stats[classes[ix][class_idx]] += 1
if constants.SAME_TAG in tag:
cur_words[class_idx].append(word)
jx += 1
else:
jx += 1
tmp = cur_spans[span_idx]
cur_words[class_idx].append(tmp)
span_idx += 1
while jx < len(sent) and tags[jx] == constants.I_PREFIX + constants.TRANSFORM_TAG:
while jx >= span_ends[ix][class_idx]:
class_idx += 1
class2stats[classes[ix][class_idx]] += 1
cur_words[class_idx].append(tmp)
jx += 1
target_token_idx = 0
# assert len(cur_words) == len(targets[ix])
for class_idx in range(nb_spans[ix]):
correct = TextNormalizationTestDataset.is_same(
" ".join(cur_words[class_idx]), targets[ix][target_token_idx], inst_directions[ix]
)
class2correct[classes[ix][class_idx]] += correct
target_token_idx += 1
for key in class2stats:
class2stats[key] = (class2correct[key] / class2stats[key], class2correct[key], class2stats[key])
return class2stats
| NeMo-main | nemo/collections/nlp/data/text_normalization/test_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.glue_benchmark.glue_benchmark_dataset import GLUEDataset
| NeMo-main | nemo/collections/nlp/data/glue_benchmark/__init__.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Some code of this file was adapted from the HuggingFace library available at
# https://github.com/huggingface/transformers
import os
import pickle
from typing import Dict, List, Optional, Union
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.glue_benchmark.data_processors import (
ColaProcessor,
MnliMismatchedProcessor,
MnliProcessor,
MrpcProcessor,
QnliProcessor,
QqpProcessor,
RteProcessor,
Sst2Processor,
StsbProcessor,
WnliProcessor,
XNLIProcessor,
)
from nemo.core.classes import Dataset
from nemo.core.neural_types import CategoricalValuesType, ChannelType, MaskType, NeuralType, RegressionValuesType
from nemo.utils import logging
__all__ = ['GLUEDataset', 'TextToTextGLUEDataset', 'TextToTextXNLIDataset']
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
"xnli": XNLIProcessor,
}
output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
"xnli": "classification",
}
GLUE_TASKS_NUM_LABELS = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
class GLUEDataset(Dataset):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
"labels": NeuralType(
tuple('B'), RegressionValuesType() if self.task_name == 'sts-b' else CategoricalValuesType()
),
}
def __init__(
self,
file_name: str,
task_name: str,
tokenizer: TokenizerSpec,
max_seq_length: str,
use_cache: bool = True,
compute_features: bool = True,
):
"""
Processes GLUE datasets
Args:
file_name: path to file
task_name: GLUE task name
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
use_cache: whether to use data cache
"""
original_file_name = file_name
logging.info(f'Processing {file_name}')
data_dir, file_name = os.path.split(file_name)
file_name = file_name[:-4]
self.tokenizer = tokenizer
evaluate = False if 'train' in file_name else True
if task_name not in processors:
raise ValueError(f'{task_name} not supported. Choose from {processors.keys()}')
if task_name == 'mnli' and 'dev_mismatched' in file_name:
self.task_name = 'mnli-mm'
else:
self.task_name = task_name
processor = processors[self.task_name]()
output_mode = output_modes[self.task_name]
self.label_list = processor.get_labels()
# TODO: use a different variable to decide whether to trust the user provided filename. This is a temporary workaround for T5 GLUE and XNLI.
if not compute_features:
if not os.path.exists(original_file_name):
raise ValueError(f"Could not find file : {original_file_name}")
self.examples = processor.get_examples(original_file_name)
else:
self.examples = (
processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir)
)
processor_name = type(processor).__name__
vocab_size = getattr(tokenizer, "vocab_size", 0)
if compute_features:
cached_features_file = os.path.join(
data_dir,
"cached_{}_{}_{}_{}_{}".format(
processor_name, file_name, tokenizer.name, str(max_seq_length), str(vocab_size)
),
)
if use_cache and os.path.exists(cached_features_file):
logging.info(f"loading from {cached_features_file}")
with open(cached_features_file, "rb") as reader:
self.features = pickle.load(reader)
else:
token_params = {
'bos_token': None,
'eos_token': tokenizer.eos_token,
'pad_token': tokenizer.pad_token,
'cls_token': tokenizer.cls_token,
'sep_token_extra': tokenizer.eos_token if 'roberta' in tokenizer.name.lower() else None,
}
self.features = self.convert_examples_to_features(
self.examples, self.label_list, max_seq_length, tokenizer, output_mode, **token_params
)
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if master_device:
logging.info(f'Saving train features into {cached_features_file}')
with open(cached_features_file, "wb") as writer:
pickle.dump(self.features, writer)
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
feature = self.features[idx]
return (
np.array(feature.input_ids),
np.array(feature.segment_ids),
np.array(feature.input_mask, dtype=np.long),
np.array(feature.label_id),
)
def convert_examples_to_features(
self,
examples: List[str],
label_list: List[int],
max_seq_length: int,
tokenizer: TokenizerSpec,
output_mode: str,
bos_token: str = None,
eos_token: str = '[SEP]',
pad_token: str = '[PAD]',
cls_token: str = '[CLS]',
sep_token_extra: str = None,
cls_token_at_end: bool = False,
cls_token_segment_id: int = 0,
pad_token_segment_id: int = 0,
pad_on_left: bool = False,
mask_padding_with_zero: bool = True,
sequence_a_segment_id: int = 0,
sequence_b_segment_id: int = 1,
):
"""
Loads a data file into a list of `InputBatch`s.
The `cls_token_at_end` defines the location of the CLS token:
* False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
* True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
The `cls_token_segment_id` defines the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
The convention in BERT is:
a. For sequence pairs:
* tokens: [CLS] is this jack ##ville ? [SEP] no it is not . [SEP]
* type_ids: 0 0 0 0 0 0 0 1 1 1 1 1 1
b. For single sequences:
* tokens: [CLS] the dog is hairy . [SEP]
* type_ids: 0 0 0 0 0 0 0
Where "type_ids" are used to indicate whether this is the first
sequence or the second sequence. The embedding vectors for `type=0`
and `type=1` were learned during pre-training and are added to the
wordpiece embedding vector (and position vector). This is
not *strictly* necessarysince the [SEP] token unambiguously separates
the sequences, but it makes it easier for the model to learn
the concept of sequences.
For classification tasks, the first vector (corresponding to [CLS])
is used as as the "sentence vector". Note that this only makes sense
because the entire model is fine-tuned.
The convention for NMT is:
a. For sequence pairs:
* tokens:<BOS> is this jack ##ville ? <EOS> <BOS> no it is not . <EOS>
* type_ids:0 0 0 0 0 0 0 1 1 1 1 1 1 1
b. For single sequences:
* tokens: <BOS> the dog is hairy . <EOS>
* type_ids: 0 0 0 0 0 0 0
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if example.label == "-": # skip examples without a consensus label (e.g. in SNLI data set)
continue
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d" % (ex_index, len(examples)))
tokens_a = tokenizer.text_to_tokens(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.text_to_tokens(example.text_b)
special_tokens_count = 2 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 2 if bos_token else 0
special_tokens_count += 1 if cls_token else 0
self._truncate_seq_pair(tokens_a, tokens_b, max_seq_length - special_tokens_count)
else:
special_tokens_count = 1 if eos_token else 0
special_tokens_count += 1 if sep_token_extra else 0
special_tokens_count += 1 if bos_token else 0
if len(tokens_a) > max_seq_length - special_tokens_count:
tokens_a = tokens_a[: max_seq_length - special_tokens_count]
# Add special tokens to sequence_a
tokens = tokens_a
if bos_token:
tokens = [bos_token] + tokens
if eos_token:
tokens += [eos_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
# Add sequence separator between sequences
if tokens_b and sep_token_extra:
tokens += [sep_token_extra]
segment_ids += [sequence_a_segment_id]
# Add special tokens to sequence_b
if tokens_b:
if bos_token:
tokens += [bos_token]
segment_ids += [sequence_b_segment_id]
tokens += tokens_b
segment_ids += [sequence_b_segment_id] * (len(tokens_b))
if eos_token:
tokens += [eos_token]
segment_ids += [sequence_b_segment_id]
# Add classification token - for BERT models
if cls_token:
if cls_token_at_end:
tokens += [cls_token]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
pad_token_id = tokenizer.tokens_to_ids([pad_token])[0]
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
input_mask = input_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids = segment_ids + ([pad_token_segment_id] * padding_length)
if len(input_ids) != max_seq_length:
raise ValueError("input_ids must be of length max_seq_length")
if len(input_mask) != max_seq_length:
raise ValueError("input_mask must be of length max_seq_length")
if len(segment_ids) != max_seq_length:
raise ValueError("segment_ids must be of length max_seq_length")
if output_mode == "classification":
label_id = label_map[example.label]
elif output_mode == "regression":
label_id = np.float32(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logging.info("*** Example ***")
logging.info("guid: %s" % (example.guid))
logging.info("tokens: %s" % " ".join(list(map(str, tokens))))
logging.info("input_ids: %s" % " ".join(list(map(str, input_ids))))
logging.info("input_mask: %s" % " ".join(list(map(str, input_mask))))
logging.info("segment_ids: %s" % " ".join(list(map(str, segment_ids))))
logging.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_id=label_id)
)
return features
def _truncate_seq_pair(self, tokens_a: str, tokens_b: str, max_length: int):
"""Truncates a sequence pair in place to the maximum length.
This will always truncate the longer sequence one token at a time.
This makes more sense than truncating an equal percent
of tokens from each, since if one sequence is very short then each token
that's truncated likely contains more information than a longer sequence.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class TextToTextGLUEDataset(GLUEDataset):
"""GLUE Dataset in a text-to-text format."""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return
def __init__(
self,
file_name: str,
task_name: str,
tokenizer: TokenizerSpec,
max_seq_length: int,
max_seq_length_decoder: int = 128,
use_cache: bool = True,
prefix_override: str = None,
pad_to_max_length: bool = True,
):
"""
Processes GLUE datasets
Args:
file_name: path to file
task_name: GLUE task name
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
use_cache: whether to use data cache
prefix_override: if you want to override default prompt for this task specify this via a string.
pad_to_max_length: If true, pad to the maximum length.
"""
super().__init__(file_name, task_name, tokenizer, max_seq_length, use_cache, compute_features=False)
self.max_seq_length = max_seq_length
self.max_seq_length_decoder = max_seq_length_decoder
self.pad_to_max_length = pad_to_max_length
self.processor = processors[self.task_name]()
self.prefix_override = prefix_override
self.features = self.convert_examples_to_features()
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
enc_query, dec_input, labels = self.features[idx]
return {'text_enc': enc_query, 'text_dec': dec_input, 'labels': labels}
def collate_fn(self, batch):
enc_query = [item['text_enc'] for item in batch]
dec_input = [item['text_dec'] for item in batch]
labels = [item['labels'] for item in batch]
max_enc_query_length = max([len(item) for item in enc_query]) if enc_query else 0
max_dec_input_length = max([len(item) for item in dec_input]) if dec_input else 0
max_label_length = max([len(item) for item in labels]) if labels else 0
if self.pad_to_max_length:
assert max_enc_query_length <= self.max_seq_length
assert max_dec_input_length <= self.max_seq_length_decoder
assert max_label_length <= self.max_seq_length_decoder
max_enc_query_length = self.max_seq_length
max_dec_input_length = self.max_seq_length_decoder
max_label_length = self.max_seq_length_decoder
loss_mask = [([1] * (len(item))) + ([0] * (max_label_length - len(item))) for item in labels]
enc_query = [item + [self.tokenizer.pad_id] * (max_enc_query_length - len(item)) for item in enc_query]
dec_input = [item + [self.tokenizer.pad_id] * (max_dec_input_length - len(item)) for item in dec_input]
labels = [item + [self.tokenizer.pad_id] * (max_label_length - len(item)) for item in labels]
enc_query = torch.LongTensor(enc_query)
dec_input = torch.LongTensor(dec_input)
labels = torch.LongTensor(labels)
loss_mask = torch.LongTensor(loss_mask)
enc_mask = (enc_query != self.tokenizer.pad_id).long()
dec_mask = (dec_input != self.tokenizer.pad_id).long()
return {
'text_enc': enc_query,
'text_dec': dec_input,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
def make_history_mask_3d(self, block):
batch, length = block.shape
arange = np.arange(length)
history_mask = (arange[None,] <= arange[:, None])[
None,
]
history_mask = np.repeat(history_mask, batch, 0)
return history_mask
def convert_examples_to_features(self):
"""
Converts examples into Text-to-Text batches to be used with a model like T5.
Inputs are prefixed with a text prompt that indicates the task to perform.
"""
features = []
for ex_index, example in enumerate(self.examples):
if ex_index % 10000 == 0:
logging.info(f"Writing example {ex_index} of {len(self.examples)}")
text_to_text_query = self.processor.get_t5_prompted_query(example.text_a, example.text_b)
enc_query = self.tokenizer.text_to_ids(text_to_text_query)
if len(enc_query) > self.max_seq_length:
enc_query = enc_query[: self.max_seq_length]
dec_query = (
[self.tokenizer.bos_id]
+ self.tokenizer.text_to_ids(self.processor.label2string(example.label))
+ [self.tokenizer.eos_id]
)
dec_input = dec_query[:-1]
labels = dec_query[1:]
features.append([enc_query, dec_input, labels])
return features
class TextToTextXNLIDataset(TextToTextGLUEDataset):
"""XNLI Dataset in a text-to-text format."""
def __init__(
self,
file_name: str,
task_name: str,
tokenizer: TokenizerSpec,
max_seq_length: int,
max_seq_length_decoder: int = 128,
use_cache: bool = True,
prefix_override: str = None,
lang_list: List[str] = None,
pad_to_max_length: bool = True,
):
self.lang_list = set(lang_list)
super().__init__(
file_name,
task_name,
tokenizer,
max_seq_length,
max_seq_length_decoder,
use_cache,
prefix_override,
pad_to_max_length,
)
if len(lang_list) <= 0 or lang_list is None:
raise ValueError(f"Found an empty or None lang_list for {self.task_name}")
self.features = self.convert_xnli_examples_to_features()
def __getitem__(self, idx):
enc_query, dec_input, labels, lang = self.features[idx]
return {'text_enc': enc_query, 'text_dec': dec_input, 'labels': labels, 'lang': lang}
def collate_fn(self, batch):
base_batch = super().collate_fn(batch)
base_batch['lang'] = [item['lang'] for item in batch]
return base_batch
def convert_xnli_examples_to_features(self):
"""
Converts examples into Text-to-Text batches to be used with a model like T5.
Inputs are prefixed with a text prompt that indicates the task to perform.
"""
features = self.features
lang_filtered_features = []
for ex_index, example in enumerate(self.examples):
language = example.guid.split('-')[1]
if language in self.lang_list:
lang_filtered_features.append(features[ex_index] + [language])
return lang_filtered_features
def __len__(self):
return len(self.features)
class InputFeatures(object):
"""A single set of features of data.
Args:
input_ids: input/token ids
input_mask: masks out subword tokens
segment_ids: distinguish one sentence from the other one (if present)
label_ids: label for the current example
"""
def __init__(
self, input_ids: List[int], input_mask: List[int], segment_ids: List[int], label_id: Union[float, int]
):
"""Initialized InputFeatures."""
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
| NeMo-main | nemo/collections/nlp/data/glue_benchmark/glue_benchmark_dataset.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from nemo.collections.nlp.data.data_utils.data_preprocessing import DataProcessor
from nemo.utils import logging
__all__ = [
'ColaProcessor',
'MnliProcessor',
'MnliMismatchedProcessor',
'MrpcProcessor',
'Sst2Processor',
'StsbProcessor',
'QqpProcessor',
'QnliProcessor',
'RteProcessor',
'WnliProcessor',
'XNLIProcessor',
]
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logging.info(f'LOOKING AT {os.path.join(data_dir, "train.tsv")}')
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"mrpc sentence1: {text_a} sentence2: {text_b}"
def label2string(self, label):
return "equivalent" if label == "1" else "not equivalent"
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"mnli hypothesis: {text_a} premise: {text_b}"
def label2string(self, label):
return label
class XNLIProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[6]
text_b = line[7]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"mnli hypothesis: {text_a} premise: {text_b}"
def label2string(self, label):
return label
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_matched")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
assert text_b is None
return f"cola sentence: {text_a}"
def label2string(self, label):
return "acceptable" if label == "1" else "not acceptable"
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
assert text_b is None
return f"sst2 sentence: {text_a}"
def label2string(self, label):
return "positive" if label == "1" else "negative"
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[7]
text_b = line[8]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"stsb sentence1: {text_a} sentence2: {text_b}"
def label2string(self, label):
return '%.1f' % float(label)
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"qqp question1: {text_a} question2: {text_b}"
def label2string(self, label):
return "duplicate" if label == "1" else "not_duplicate"
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"qnli question: {text_a} sentence: {text_b}"
def label2string(self, label):
return label
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
return f"rte sentence1: {text_a} sentence2: {text_b}"
def label2string(self, label):
return label
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_examples(self, file_path):
return self._create_examples(self._read_tsv(file_path), "example")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_t5_prompted_query(self, text_a, text_b):
raise NotImplementedError("NeMo-Megatron T5 does not support WNLI at the moment.")
def label2string(self, label):
raise NotImplementedError("NeMo-Megatron T5 does not support WNLI at the moment.")
class InputExample(object):
"""A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: The untokenized text of the first sequence.
For single sequence tasks, only this sequence must be specified.
text_b: The untokenized text of the second
sequence. Only must be specified for sequence pair tasks.
label:The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid: int, text_a: str, text_b: str = None, label: str = None):
"""Constructs a InputExample."""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return (
f"InputExample(guid='{self.guid}', text_a='{self.text_a}', text_b='{self.text_b}', label='{self.label}')"
)
| NeMo-main | nemo/collections/nlp/data/glue_benchmark/data_processors.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import random
from typing import Dict, List, Optional
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_label_stats,
get_stats,
)
from nemo.collections.nlp.parts.utils_funcs import list2str
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
from nemo.utils.env_var_parsing import get_envint
__all__ = ['TextClassificationDataset', 'calc_class_weights']
class TextClassificationDataset(Dataset):
"""A dataset class that converts from raw data to
a dataset that can be used by DataLayerNM.
Args:
input_file: file to sequence + label.
the first line is header (sentence [tab] label)
each line should be [sentence][tab][label]
tokenizer: tokenizer object such as AutoTokenizer
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
num_samples: number of samples you want to use for the dataset.
If -1, use all dataset. Useful for testing.
shuffle: Shuffles the dataset after loading.
use_cache: Enables caching to use pickle format to store and read data from
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'label': NeuralType(('B',), LabelsType()),
}
def __init__(
self,
tokenizer: TokenizerSpec,
input_file: str = None,
queries: List[str] = None,
max_seq_length: int = -1,
num_samples: int = -1,
shuffle: bool = False,
use_cache: bool = False,
):
if not input_file and not queries:
raise ValueError("Either input_file or queries should be passed to the text classification dataset.")
if input_file and not os.path.exists(input_file):
raise FileNotFoundError(
f'Data file `{input_file}` not found! Each line of the data file should contain text sequences, where '
f'words are separated with spaces and the label separated by [TAB] following this format: '
f'[WORD][SPACE][WORD][SPACE][WORD][TAB][LABEL]'
)
self.input_file = input_file
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.num_samples = num_samples
self.shuffle = shuffle
self.use_cache = use_cache
self.vocab_size = self.tokenizer.vocab_size
self.pad_id = tokenizer.pad_id
self.features = None
labels, all_sents = [], []
if input_file:
data_dir, filename = os.path.split(input_file)
vocab_size = getattr(tokenizer, "vocab_size", 0)
tokenizer_name = tokenizer.name
cached_features_file = os.path.join(
data_dir,
f"cached_{filename}_{tokenizer_name}_{max_seq_length}_{vocab_size}_{num_samples}_{self.pad_id}_{shuffle}.pkl",
)
if get_envint("LOCAL_RANK", 0) == 0:
if use_cache and os.path.exists(cached_features_file):
logging.warning(
f"Processing of {input_file} is skipped as caching is enabled and a cache file "
f"{cached_features_file} already exists."
)
logging.warning(
f"You may need to delete the cache file if any of the processing parameters (eg. tokenizer) or "
f"the data are updated."
)
else:
with open(input_file, "r") as f:
lines = f.readlines()
logging.info(f'Read {len(lines)} examples from {input_file}.')
if num_samples > 0:
lines = lines[:num_samples]
logging.warning(
f"Parameter 'num_samples' is set, so just the first {len(lines)} examples are kept."
)
if shuffle:
random.shuffle(lines)
for index, line in enumerate(lines):
if index % 20000 == 0:
logging.debug(f"Processing line {index}/{len(lines)}")
line_splited = line.strip().split()
try:
label = int(line_splited[-1])
except ValueError:
logging.debug(f"Skipping line {line}")
continue
labels.append(label)
sent_words = line_splited[:-1]
all_sents.append(sent_words)
verbose = True
self.features = self.get_features(
all_sents=all_sents,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
labels=labels,
verbose=verbose,
)
with open(cached_features_file, 'wb') as out_file:
pickle.dump(self.features, out_file, protocol=pickle.HIGHEST_PROTOCOL)
else:
for query in queries:
all_sents.append(query.strip().split())
labels = [-1] * len(all_sents)
verbose = False
self.features = self.get_features(
all_sents=all_sents, tokenizer=tokenizer, max_seq_length=max_seq_length, labels=labels, verbose=verbose
)
# wait until the master process writes to the processed data files
if torch.distributed.is_initialized():
torch.distributed.barrier()
if input_file:
with open(cached_features_file, "rb") as input_file:
self.features = pickle.load(input_file)
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
return self.features[idx]
def _collate_fn(self, batch):
"""collate batch of input_ids, segment_ids, input_mask, and label
Args:
batch: A list of tuples of (input_ids, segment_ids, input_mask, label).
"""
max_length = 0
for input_ids, segment_ids, input_mask, label in batch:
if len(input_ids) > max_length:
max_length = len(input_ids)
padded_input_ids = []
padded_segment_ids = []
padded_input_mask = []
labels = []
for input_ids, segment_ids, input_mask, label in batch:
if len(input_ids) < max_length:
pad_width = max_length - len(input_ids)
padded_input_ids.append(np.pad(input_ids, pad_width=[0, pad_width], constant_values=self.pad_id))
padded_segment_ids.append(np.pad(segment_ids, pad_width=[0, pad_width], constant_values=self.pad_id))
padded_input_mask.append(np.pad(input_mask, pad_width=[0, pad_width], constant_values=self.pad_id))
else:
padded_input_ids.append(input_ids)
padded_segment_ids.append(segment_ids)
padded_input_mask.append(input_mask)
labels.append(label)
return (
torch.LongTensor(padded_input_ids),
torch.LongTensor(padded_segment_ids),
torch.LongTensor(padded_input_mask),
torch.LongTensor(labels),
)
@staticmethod
def get_features(all_sents, tokenizer, max_seq_length, labels=None, verbose=True):
"""Encode a list of sentences into a list of tuples of (input_ids, segment_ids, input_mask, label)."""
features = []
sent_lengths = []
too_long_count = 0
for sent_id, sent in enumerate(all_sents):
if sent_id % 1000 == 0:
logging.debug(f"Encoding sentence {sent_id}/{len(all_sents)}")
sent_subtokens = [tokenizer.cls_token]
for word in sent:
word_tokens = tokenizer.text_to_tokens(word)
sent_subtokens.extend(word_tokens)
if max_seq_length > 0 and len(sent_subtokens) + 1 > max_seq_length:
sent_subtokens = sent_subtokens[: max_seq_length - 1]
too_long_count += 1
sent_subtokens.append(tokenizer.sep_token)
sent_lengths.append(len(sent_subtokens))
input_ids = [tokenizer.tokens_to_ids(t) for t in sent_subtokens]
# The mask has 1 for real tokens and 0 for padding tokens.
# Only real tokens are attended to.
input_mask = [1] * len(input_ids)
segment_ids = [0] * len(input_ids)
if verbose and sent_id < 2:
logging.info("*** Example ***")
logging.info(f"example {sent_id}: {sent}")
logging.info("subtokens: %s" % " ".join(sent_subtokens))
logging.info("input_ids: %s" % list2str(input_ids))
logging.info("segment_ids: %s" % list2str(segment_ids))
logging.info("input_mask: %s" % list2str(input_mask))
logging.info("label: %s" % labels[sent_id] if labels else "**Not Provided**")
label = labels[sent_id] if labels else -1
features.append([np.asarray(input_ids), np.asarray(segment_ids), np.asarray(input_mask), label])
if max_seq_length > -1 and too_long_count > 0:
logging.warning(
f'Found {too_long_count} out of {len(all_sents)} sentences with more than {max_seq_length} subtokens. '
f'Truncated long sentences from the end.'
)
if verbose:
get_stats(sent_lengths)
return features
def calc_class_weights(file_path: str, num_classes: int):
"""
iterates over a data file and calculate the weights of each class to be used for class_balancing
Args:
file_path: path to the data file
num_classes: number of classes in the dataset
"""
if not os.path.exists(file_path):
raise FileNotFoundError(f"Could not find data file {file_path} to calculate the class weights!")
with open(file_path, 'r') as f:
input_lines = f.readlines()
labels = []
for input_line in input_lines:
parts = input_line.strip().split()
try:
label = int(parts[-1])
except ValueError:
raise ValueError(
f'No numerical labels found for {file_path}. Labels should be integers and separated by [TAB] at the end of each line.'
)
labels.append(label)
logging.info(f'Calculating stats of {file_path}...')
total_sents, sent_label_freq, max_id = get_label_stats(labels, f'{file_path}_sentence_stats.tsv', verbose=False)
if max_id >= num_classes:
raise ValueError(f'Found an invalid label in {file_path}! Labels should be from [0, num_classes-1].')
class_weights_dict = get_freq_weights(sent_label_freq)
logging.info(f'Total Sentence: {total_sents}')
logging.info(f'Sentence class frequencies: {sent_label_freq}')
logging.info(f'Class Weights: {class_weights_dict}')
class_weights = fill_class_weights(weights=class_weights_dict, max_id=num_classes - 1)
return class_weights
| NeMo-main | nemo/collections/nlp/data/text_classification/text_classification_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.text_classification.text_classification_dataset import (
TextClassificationDataset,
calc_class_weights,
)
| NeMo-main | nemo/collections/nlp/data/text_classification/__init__.py |
# Copyright 2022 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from typing import Dict, List, Optional
from nemo.core.classes import Dataset
from nemo.core.neural_types import NeuralType, StringLabel, StringType
__all__ = ['PTuneTextClassificationDataset', 'token_wrapper']
def load_file(filename):
data = []
with open(filename, "r") as f:
for line in f.readlines():
data.append(json.loads(line))
return data
def token_wrapper(token: str) -> str:
return 'Ġ' + token
class PTuneTextClassificationDataset(Dataset):
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
return {"sentences": [NeuralType(('T'), StringType())], "labels": [NeuralType(('T'), StringLabel())]}
def __init__(self, input_file: str, queries: List[str] = None, prompt: str = 'Sentiment'):
"""
A dataset class that feed data for P-tuning model
Args:
input_file: loose json data file. The format is {"sentence":"input sentence", "label":"class label"}
queries: list of query input sentences
prompt: the prompt string appended at the end of your input sentence
"""
super().__init__()
if input_file and not os.path.exists(input_file):
raise FileNotFoundError(
f'Data file `{input_file}` not found! Each line of the data file should contain json object'
f'where `sentence` key maps to sentence and `label` key maps to label'
)
if queries is None:
json_data = load_file(input_file)
else:
json_data = []
for line in queries:
json_data.append({'sentence': line + f' {prompt} ', 'label': ''})
self.data = json_data
def __len__(self):
return len(self.data)
def __getitem__(self, i):
return self.data[i]['sentence'], self.data[i]['label']
| NeMo-main | nemo/collections/nlp/data/text_classification/ptune_text_classification_dataset.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from typing import List
import ijson
import numpy as np
from transformers.models.bert.tokenization_bert import BasicTokenizer
from nemo.collections.nlp.data.data_utils import (
DataProcessor,
check_chinese_char,
normalize_answer,
normalize_chinese_answer,
)
from nemo.utils import logging
"""
Utility functions for Question Answering NLP tasks
Some parts of this code were adapted from the HuggingFace library at
https://github.com/huggingface/transformers
"""
TRAINING_MODE = "train"
EVALUATION_MODE = "eval"
INFERENCE_MODE = "infer"
def _get_tokens(s):
"""get normalized tokens for both Chinese and English"""
if not s:
return []
# separate answers to en and ch pieces
ch_seq = ""
en_seq = ""
pos = 0
# Normalize and connect
final_tokens = []
while pos < len(s):
if check_chinese_char(s[pos]):
if en_seq != "":
final_tokens.extend(normalize_answer(en_seq).split())
en_seq = ""
ch_seq += s[pos]
else:
if ch_seq != "":
final_tokens.extend(normalize_chinese_answer(ch_seq))
ch_seq = ""
en_seq += s[pos]
pos += 1
if en_seq != "":
final_tokens.extend(normalize_answer(en_seq).split())
if ch_seq != "":
final_tokens.extend(normalize_chinese_answer(ch_seq))
return final_tokens
def get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
best_indices = np.argsort(logits)[::-1]
return best_indices[:n_best_size]
def get_final_text(pred_text: str, orig_text: str, do_lower_case: bool, verbose_logging: bool = False):
"""Project the tokenized prediction back to the original text.
When we created the data, we kept track of the alignment between original
(whitespace tokenized) tokens and our WordPiece tokenized tokens. So
now `orig_text` contains the span of our original text corresponding to
the span that we predicted.
However, `orig_text` may contain extra characters that we don't want in
our prediction.
For example, let's say:
pred_text = steve smith
orig_text = Steve Smith's
We don't want to return `orig_text` because it contains the extra "'s".
We don't want to return `pred_text` because it's already been normalized
(the SQuAD eval script also does punctuation stripping/lower casing but
our tokenizer does additional normalization like stripping accent
characters).
What we really want to return is "Steve Smith".
Therefore, we have to apply a semi-complicated alignment heuristic
between `pred_text` and `orig_text` to get a character-to-character
alignment. This can fail in certain cases in which case we just return
`orig_text`."""
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logging.warning("Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logging.warning(
"Length not equal after stripping spaces: '%s' vs '%s'", orig_ns_text, tok_ns_text,
)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logging.warning("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logging.warning("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position : (orig_end_position + 1)]
return output_text
def f1_score(prediction, ground_truth):
"""computes f1 score between prediction and ground truth"""
prediction_tokens = _get_tokens(prediction)
ground_truth_tokens = _get_tokens(ground_truth)
common = collections.Counter(prediction_tokens) & collections.Counter(ground_truth_tokens)
num_same = sum(common.values())
if len(ground_truth_tokens) == 0 or len(prediction_tokens) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(ground_truth_tokens == prediction_tokens)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
"""computes exact match between prediction and ground truth"""
return int(normalize_answer(prediction) == normalize_answer(ground_truth))
def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
"""Applies no answer threshhold"""
new_scores = {}
for qid, s in scores.items():
pred_na = na_probs[qid] > na_prob_thresh
if pred_na:
new_scores[qid] = float(not qid_to_has_ans[qid])
else:
new_scores[qid] = s
return new_scores
def make_eval_dict(exact_scores, f1_scores, qid_list=None):
"""returns dictionary with formatted evaluation scores"""
if not qid_list:
total = len(exact_scores)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores.values()) / total),
("f1", 100.0 * sum(f1_scores.values()) / total),
("total", total),
]
)
else:
total = len(qid_list)
return collections.OrderedDict(
[
("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
("total", total),
]
)
def merge_eval(main_eval, new_eval, prefix):
"""Merges 2 evaluation dictionaries into the first one by adding prefix as key for name collision handling"""
for k in new_eval:
main_eval["%s_%s" % (prefix, k)] = new_eval[k]
def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
"""
Find best threshholds to maximize all evaluation metrics.
"""
best_exact, exact_thresh = _find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh = _find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval["best_exact"] = best_exact
main_eval["best_exact_thresh"] = exact_thresh
main_eval["best_f1"] = best_f1
main_eval["best_f1_thresh"] = f1_thresh
def _find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
"""
Find best threshhold to maximize evaluation metric
"""
num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
cur_score = num_no_ans
best_score = cur_score
best_thresh = 0.0
qid_list = sorted(na_probs, key=lambda k: na_probs[k])
for _, qid in enumerate(qid_list):
if qid not in scores:
continue
if qid_to_has_ans[qid]:
diff = scores[qid]
else:
if preds[qid]:
diff = -1
else:
diff = 0
cur_score += diff
if cur_score > best_score:
best_score = cur_score
best_thresh = na_probs[qid]
return 100.0 * best_score / len(scores), best_thresh
def _improve_answer_span(
doc_tokens: List[str], input_start: int, input_end: int, tokenizer: object, orig_answer_text: str
):
"""Returns tokenized answer spans that better match the annotated answer."""
tok_answer_text = " ".join(tokenizer.text_to_tokens(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
class SquadProcessor(DataProcessor):
"""
Processor for the SQuAD data set.
used by the version 1.1 and version 2.0 of SQuAD, respectively.
Args:
data_file: data file path
mode: TRAINING_MODE/EVALUATION_MODE/INFERENCE_MODE for creating training/evaluation/inference dataset
"""
def __init__(self, data_file: str, mode: str):
self.data_file = data_file
self.mode = mode
# Memoizes documents to reduce memory use (as the same document is often used for many questions)
self.doc_id = 0
self.context_text_to_doc_id = {}
self.doc_id_to_context_text = {}
def get_examples(self):
"""
Get examples from raw json file
"""
if self.data_file is None:
raise ValueError(f"{self.mode} data file is None.")
# remove this line and the replace cache line below - which is a temp fix
with open(self.data_file.replace('_cache', ''), "r", encoding="utf-8") as reader:
input_data = ijson.items(reader, "data.item")
examples = []
for entry in input_data:
len_docs = []
title = entry["title"]
for paragraph in entry["paragraphs"]:
context_text = paragraph["context"]
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
if not question_text:
continue
start_position_character = None
answer_text = None
answers = []
if "is_impossible" in qa:
is_impossible = qa["is_impossible"] or len(qa["answers"]) < 1
else:
is_impossible = False
if not is_impossible:
if self.mode in [TRAINING_MODE, EVALUATION_MODE]:
answer = qa["answers"][0]
answer_text = answer["text"]
start_position_character = answer["answer_start"]
if self.mode == EVALUATION_MODE:
answers = qa["answers"]
if context_text in self.context_text_to_doc_id:
doc_id = self.context_text_to_doc_id[context_text]
else:
doc_id = self.doc_id
self.context_text_to_doc_id[context_text] = doc_id
self.doc_id_to_context_text[doc_id] = context_text
self.doc_id += 1
len_docs.append(len(context_text))
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
context_text=context_text,
context_id=doc_id,
answer_text=answer_text,
start_position_character=start_position_character,
title=title,
is_impossible=is_impossible,
answers=answers,
)
examples.append(example)
logging.info('mean no. of chars in doc: {}'.format(np.mean(len_docs)))
logging.info('max no. of chars in doc: {}'.format(np.max(len_docs)))
return examples
class SquadExample(object):
"""
A single training/test example for the Squad dataset, as loaded from disk.
Args:
qas_id: The example's unique identifier
question_text: The question string
context_text: The context string
context_id: id representing context string
answer_text: The answer string
start_position_character: The character position of the start of
the answer, 0 indexed
title: The title of the example
answers: None by default, this is used during evaluation.
Holds answers as well as their start positions.
is_impossible: False by default, set to True if the example has
no possible answer.
"""
def __init__(
self,
qas_id: str,
question_text: str,
context_text: str,
context_id: int,
answer_text: str,
start_position_character: int,
title: str,
answers: List[str] = [],
is_impossible: bool = False,
):
self.qas_id = qas_id
self.question_text = question_text
self.context_id = context_id
self.answer_text = answer_text
self.title = title
self.is_impossible = is_impossible
self.answers = answers
self.start_position_character = start_position_character
| NeMo-main | nemo/collections/nlp/data/question_answering_squad/qa_squad_processing.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/data/question_answering_squad/__init__.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import pickle
from functools import lru_cache
from typing import Dict, List, Optional
import numpy as np
import psutil
import torch
from tqdm import trange
from nemo.collections.common.parts.utils import _compute_softmax
from nemo.collections.nlp.data.data_utils import is_whitespace
from nemo.collections.nlp.data.question_answering_squad.qa_squad_processing import (
EVALUATION_MODE,
INFERENCE_MODE,
TRAINING_MODE,
SquadProcessor,
_improve_answer_span,
apply_no_ans_threshold,
exact_match_score,
f1_score,
find_all_best_thresh,
get_best_indexes,
get_final_text,
make_eval_dict,
merge_eval,
normalize_answer,
)
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['SquadDataset', 'InputFeatures', '_check_is_max_context']
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
unique_id: int,
input_ids: List[int],
input_mask: List[int],
segment_ids: List[int],
example_index: int = None,
doc_span_index: int = None,
tokens: List[str] = None,
token_to_orig_map: Dict[int, int] = None,
token_is_max_context: Dict[int, bool] = None,
start_position: Optional[int] = None,
end_position: Optional[int] = None,
is_impossible: Optional[int] = None,
):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token.
Because of the sliding window approach taken to scoring documents,
a single token can appear in multiple documents.
Example:
Doc: the man went to the store and bought a gallon of milk
Span A: the man went to the
Span B: to the store and bought
Span C: and bought a gallon of
...
Now the word 'bought' will have two scores from spans B and C. We only
want to consider the score with "maximum context", which we define as
the *minimum* of its left and right context (the *sum* of left and
right context will always be the same, of course).
In the example the maximum context for 'bought' would be span C since
it has 1 left context and 3 right context, while span B has 4 left context
and 0 right context.
Code adapted from the code by the Google AI and HuggingFace.
"""
best_span_index = get_best_span_index(doc_spans, position)
return cur_span_index == best_span_index
@lru_cache(maxsize=10000)
def get_best_span_index(doc_spans, position):
"""
For a particular position, identify which doc_span gives the most context around token
Helper function for _check_is_max_context; see _check_is_max_context for more details
"""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return best_span_index
class SquadDataset(Dataset):
"""
Creates SQuAD dataset for Question Answering.
Args:
data_file (str): train.*.json eval.*.json or test.*.json.
tokenizer (obj): Tokenizer object, e.g. AutoTokenizer.
version_2_with_negative (bool): True if training should allow
unanswerable questions.
doc_stride (int): When splitting up a long document into chunks,
how much stride to take between chunks.
max_query_length (iny): All training files which have a duration less
than min_duration are dropped. Can't be used if the `utt2dur` file
does not exist. Defaults to None.
max_seq_length (int): All training files which have a duration more
than max_duration are dropped. Can't be used if the `utt2dur` file
does not exist. Defaults to None.
num_samples: number of samples you want to use for the dataset.
If -1, use all dataset. Useful for testing.
mode (str): Use TRAINING_MODE/EVALUATION_MODE/INFERENCE_MODE to define between
training, evaluation and inference dataset.
use_cache (bool): Caches preprocessed data for future usage
"""
def __init__(
self,
data_file: str,
keep_doc_spans: str,
tokenizer: object,
doc_stride: int,
max_query_length: int,
max_seq_length: int,
version_2_with_negative: bool,
num_samples: int,
mode: str,
use_cache: bool,
):
self.tokenizer = tokenizer
self.version_2_with_negative = version_2_with_negative
self.processor = SquadProcessor(data_file=data_file, mode=mode)
self.mode = mode
self.keep_doc_spans = keep_doc_spans
# hashing to reduce memory use
self.input_mask_id = 0
self.input_mask_id_to_input_mask = {}
self.input_mask_to_input_mask_id = {}
self.segment_mask_id = 0
self.segment_mask_id_to_segment_mask = {}
self.segment_mask_to_segment_mask_id = {}
if mode not in [TRAINING_MODE, EVALUATION_MODE, INFERENCE_MODE]:
raise ValueError(
f"mode should be either {TRAINING_MODE}, {EVALUATION_MODE}, {INFERENCE_MODE} but got {mode}"
)
self.examples = self.processor.get_examples()
vocab_size = getattr(tokenizer, "vocab_size", 0)
cached_features_file = (
data_file
+ '_cache'
+ '_{}_{}_{}_{}_{}_{}_{}'.format(
mode,
tokenizer.name,
str(vocab_size),
str(max_seq_length),
str(doc_stride),
str(max_query_length),
str(num_samples),
)
)
# check number of samples. Should be either -1 not to limit or positive number
if num_samples == 0:
raise ValueError(
f"num_samples has to be positive or -1 (to use the entire dataset), however got {num_samples}."
)
elif num_samples > 0:
self.examples = self.examples[:num_samples]
if use_cache and os.path.exists(cached_features_file):
logging.info(f"loading from {cached_features_file}")
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
del self.examples
del self.processor
with open(cached_features_file, "rb") as reader:
items_to_pickle = pickle.load(reader)
(
self.features,
self.input_mask_id_to_input_mask,
self.input_mask_to_input_mask_id,
self.segment_mask_id_to_segment_mask,
self.segment_mask_to_segment_mask_id,
) = items_to_pickle
items_to_pickle = None
del items_to_pickle
else:
logging.info(f"Preprocessing data.")
self.features = self.convert_examples_to_features(
examples=self.examples,
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
has_groundtruth=mode != INFERENCE_MODE,
)
if use_cache:
master_device = not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0
if master_device:
logging.info(" Saving train features into cached file %s", cached_features_file)
with open(cached_features_file, "wb") as writer:
items_to_pickle = [
self.features,
self.input_mask_id_to_input_mask,
self.input_mask_to_input_mask_id,
self.segment_mask_id_to_segment_mask,
self.segment_mask_to_segment_mask_id,
]
pickle.dump(items_to_pickle, writer)
# delete self.examples during training mode to save memory
if self.mode == TRAINING_MODE:
self.examples = []
del self.processor
logging.info("Converting dict features into object features")
for i in trange(len(self.features)):
self.features[i] = InputFeatures(**self.features[i])
@staticmethod
def get_doc_tokens_and_offset_from_context_id(
context_id, start_position_character, is_impossible, answer_text, context_id_to_context_text
):
start_position, end_position = 0, 0
context_text = context_id_to_context_text[context_id]
doc_tokens, char_to_word_offset = SquadDataset.split_into_words(context_text)
# Start end end positions only has a value during evaluation.
if start_position_character is not None and not is_impossible:
# start_position is index of word, end_position inclusive
start_position = char_to_word_offset[start_position_character]
end_position = char_to_word_offset[
min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
]
return doc_tokens, char_to_word_offset, start_position, end_position, context_text
@staticmethod
def split_into_words(context_text):
"""
Split on whitespace so that different tokens
may be attributed to their original position.
ex: context_text = "hi yo"
char_to_word_offset = [0, 0, 0, 1, 1]
doc_tokens = ["hi", "yo"]
"""
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in context_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
return doc_tokens, char_to_word_offset
def __len__(self):
return len(self.features)
def __getitem__(self, idx):
"""Some features are obtained from hashmap to reduce CPU memory use"""
feature = self.features[idx]
if self.mode == INFERENCE_MODE:
return (
np.array(feature.input_ids),
np.array(self.segment_mask_id_to_segment_mask[feature.segment_ids]),
np.array(self.input_mask_id_to_input_mask[feature.input_mask]),
np.array(feature.unique_id),
)
else:
return (
np.array(feature.input_ids),
np.array(self.segment_mask_id_to_segment_mask[feature.segment_ids]),
np.array(self.input_mask_id_to_input_mask[feature.input_mask]),
np.array(feature.unique_id),
np.array(feature.start_position),
np.array(feature.end_position),
)
@staticmethod
def get_docspans(all_doc_tokens, max_tokens_for_doc, doc_stride):
"""
Get docspans which are sliding window spans from a document
Args:
all_doc_tokens: list of all tokens in document
max_tokens_for_doc: maximum number of tokens in each doc span
doc_stride: stride size which sliding window moves with
Returns:
doc_spans: all possible doc_spans from document
"""
_DocSpan = collections.namedtuple("DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
return doc_spans
@staticmethod
def check_if_sufficient_memory():
"""
Check if there is sufficient memory to prevent system from being unresponsive
Otherwise system can become unresponsive as memory is slowly filled up, possibly leading to system unable to kill process
Interrupts run if CPU memory use is more than 75%, to leave some capacity for model loading
"""
percent_memory = psutil.virtual_memory().percent
if percent_memory > 75:
raise ValueError('Please use a device with more CPU ram or a smaller dataset')
@staticmethod
def get_average_dist_to_tok_start_and_end(doc_span, tok_start_position, tok_end_position):
"""
Find distance between doc_span and answer_span to determine if doc_span is likely to be useful for the answer
Helper function to filter out doc_spans that may not be helpful
Args:
doc_span
tok_start_position: start position of answer in document
tok_end_position: end position of answer in document
Returns:
average distance of doc_span to answer
"""
center_answer = (tok_start_position + tok_end_position) // 2
dist_to_start = abs(doc_span.start - center_answer)
dist_to_end = abs(doc_span.start + doc_span.length - 1 - center_answer)
return (dist_to_start + dist_to_end) // 2
@staticmethod
def keep_relevant_docspans(doc_spans, tok_start_position, tok_end_position, mode):
"""
Filters out doc_spans, which might not be relevant to answering question,
which can be helpful when document is extremely long leading to many doc_spans with no answers
Args:
doc_spans: all possible doc_spans
tok_start_position: start position of answer in document
tok_end_position: end position of answer in document
mode:
all: do not filter
only_positive: only keep doc_spans containing the answer
limited_negative: only keep 10 doc_spans that are nearest to answer
Returns:
doc_spans: doc_spans after filtering
"""
if mode == 'all':
return doc_spans
elif mode == 'only_positive':
if tok_start_position in [-1, None] or tok_end_position in [-1, None]:
return []
else:
return [
doc_span
for doc_span in doc_spans
if tok_start_position >= doc_span.start
and tok_end_position <= doc_span.start + doc_span.length - 1
]
elif mode == 'limited_negative':
n_candidates = 10
if tok_start_position in [-1, None] or tok_end_position in [-1, None]:
pass
else:
doc_spans.sort(
key=lambda doc_span: SquadDataset.get_average_dist_to_tok_start_and_end(
doc_span, tok_start_position, tok_end_position
)
)
return doc_spans[:n_candidates]
else:
raise ValueError('mode can only be in {all, only_positive and limited_negative')
def convert_examples_to_features(
self,
examples: List[object],
tokenizer: object,
max_seq_length: int,
doc_stride: int,
max_query_length: int,
has_groundtruth: bool,
):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
text_to_tokens_dict = {}
for example_index in trange(len(examples)):
if example_index % 1000 == 0:
SquadDataset.check_if_sufficient_memory()
example = examples[example_index]
if example.question_text not in text_to_tokens_dict:
text_to_tokens_dict[example.question_text] = tokenizer.text_to_tokens(example.question_text)[
:max_query_length
]
query_tokens = text_to_tokens_dict[example.question_text]
# context: index of token -> index of word
tok_to_orig_index = []
# context: index of word -> index of first token in token list
orig_to_tok_index = []
# context without white spaces after tokenization
all_doc_tokens = []
# doc tokens is word separated context
(
doc_tokens,
char_to_word_offset,
start_position,
end_position,
context_text,
) = SquadDataset.get_doc_tokens_and_offset_from_context_id(
example.context_id,
example.start_position_character,
example.is_impossible,
example.answer_text,
self.processor.doc_id_to_context_text,
)
example.start_position = start_position
example.end_position = end_position
if self.mode != TRAINING_MODE:
example.doc_tokens = doc_tokens
# the text to tokens step is the slowest step
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if token not in text_to_tokens_dict:
text_to_tokens_dict[token] = tokenizer.text_to_tokens(token)
sub_tokens = text_to_tokens_dict[token]
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# idx of query token start and end in context
tok_start_position = None
tok_end_position = None
if has_groundtruth and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if has_groundtruth and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
)
# The -3 accounts for tokenizer.cls_token, tokenizer.sep_token and tokenizer.sep_token
# doc_spans contains all possible contexts options of given length
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
doc_spans = SquadDataset.get_docspans(all_doc_tokens, max_tokens_for_doc, doc_stride)
doc_spans = SquadDataset.keep_relevant_docspans(
doc_spans, tok_start_position, tok_end_position, self.keep_doc_spans
)
# make compatible for hashing
doc_spans = tuple(doc_spans)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = [tokenizer.cls_token] + query_tokens + [tokenizer.sep_token]
segment_ids = [0 for i in range(len(tokens))]
token_is_max_context = {}
# maps context tokens idx in final input -> word idx in context
token_to_orig_map = {}
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append(tokenizer.sep_token)
segment_ids.append(1)
input_ids = tokenizer.tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens.
# Only real tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(tokenizer.pad_id)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# calculate start and end position in final array
# of tokens in answer if no answer,
# 0 for both pointing to tokenizer.cls_token
start_position = 0
end_position = 0
if has_groundtruth and not example.is_impossible:
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if has_groundtruth and example.is_impossible:
# if our document chunk does not contain
# an annotation we throw it out, since there is nothing
# to predict.
start_position = 0
end_position = 0
if example_index < 1:
logging.info("*** Example ***")
logging.info("unique_id: %s" % (unique_id))
logging.info("example_index: %s" % (example_index))
logging.info("doc_span_index: %s" % (doc_span_index))
logging.info("tokens: %s" % " ".join(tokens))
logging.info(
"token_to_orig_map: %s" % " ".join(["%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()])
)
logging.info(
"token_is_max_context: %s"
% " ".join(["%d:%s" % (x, y) for (x, y) in token_is_max_context.items()])
)
logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if has_groundtruth and example.is_impossible:
logging.info("impossible example")
if has_groundtruth and not example.is_impossible:
answer_text = " ".join(tokens[start_position : (end_position + 1)])
logging.info("start_position: %d" % (start_position))
logging.info("end_position: %d" % (end_position))
logging.info("answer: %s" % (answer_text))
# memoization to save CPU memory for large datasets
input_mask = tuple(input_mask)
if input_mask in self.input_mask_to_input_mask_id:
feature_input_mask_id = self.input_mask_to_input_mask_id[input_mask]
else:
self.input_mask_id_to_input_mask[self.input_mask_id] = input_mask
self.input_mask_to_input_mask_id[input_mask] = self.input_mask_id
feature_input_mask_id = self.input_mask_id
self.input_mask_id += 1
segment_mask = tuple(segment_ids)
if segment_mask in self.segment_mask_to_segment_mask_id:
feature_segment_mask_id = self.segment_mask_to_segment_mask_id[segment_mask]
else:
self.segment_mask_id_to_segment_mask[self.segment_mask_id] = segment_mask
self.segment_mask_to_segment_mask_id[segment_mask] = self.segment_mask_id
feature_segment_mask_id = self.segment_mask_id
self.segment_mask_id += 1
# end memoization
if self.mode == TRAINING_MODE:
input_feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_mask": feature_input_mask_id,
"segment_ids": feature_segment_mask_id,
"start_position": start_position,
"end_position": end_position,
}
else:
input_feature = {
"unique_id": unique_id,
"input_ids": input_ids,
"input_mask": feature_input_mask_id,
"segment_ids": feature_segment_mask_id,
"start_position": start_position,
"end_position": end_position,
"example_index": example_index,
"doc_span_index": doc_span_index,
"tokens": tokens,
"token_to_orig_map": token_to_orig_map,
"token_is_max_context": token_is_max_context,
"is_impossible": example.is_impossible,
}
features.append(input_feature)
unique_id += 1
return features
def get_predictions(
self,
unique_ids: List[int],
start_logits: List[List[float]],
end_logits: List[List[float]],
n_best_size: int,
max_answer_length: int,
do_lower_case: bool,
version_2_with_negative: bool,
null_score_diff_threshold: float,
):
example_index_to_features = collections.defaultdict(list)
unique_id_to_pos = {}
for index, unique_id in enumerate(unique_ids):
unique_id_to_pos[unique_id] = index
for feature in self.features:
example_index_to_features[feature.example_index].append(feature)
_PrelimPrediction = collections.namedtuple(
"PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
)
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(self.examples):
# finish this loop if we went through all batch examples
if example_index >= len(unique_ids):
break
features = example_index_to_features[example_index]
doc_tokens, _, _, _, _ = SquadDataset.get_doc_tokens_and_offset_from_context_id(
example.context_id,
example.start_position_character,
example.is_impossible,
example.answer_text,
self.processor.doc_id_to_context_text,
)
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
# large and positive
score_null = 1000000
# the paragraph slice with min null score
min_null_feature_index = 0
# start logit at the slice with min null score
null_start_logit = 0
# end logit at the slice with min null score
null_end_logit = 0
for (feature_index, feature) in enumerate(features):
pos = unique_id_to_pos[feature.unique_id]
start_indexes = get_best_indexes(start_logits[pos], n_best_size)
end_indexes = get_best_indexes(end_logits[pos], n_best_size)
# if we could have irrelevant answers,
# get the min score of irrelevant
if version_2_with_negative:
feature_null_score = start_logits[pos][0] + end_logits[pos][0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = start_logits[pos][0]
null_end_logit = end_logits[pos][0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions,
# e.g., predict that the start of the span is in the
# question. We throw out all invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=start_logits[pos][start_index],
end_logit=end_logits[pos][end_index],
)
)
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit,
)
)
prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
_NbestPrediction = collections.namedtuple("NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = doc_tokens[orig_doc_start : (orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
# In very rare edge cases we could only
# have single null pred. We just create a nonce prediction
# in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["question"] = example.question_text
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = (
entry.start_logit
if (isinstance(entry.start_logit, float) or isinstance(entry.start_logit, int))
else list(entry.start_logit)
)
output["end_logit"] = (
entry.end_logit
if (isinstance(entry.end_logit, float) or isinstance(entry.end_logit, int))
else list(entry.end_logit)
)
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score -
# the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json, scores_diff_json
def evaluate_predictions(
self,
all_predictions: Dict[str, str],
no_answer_probs: Optional[float] = None,
no_answer_probability_threshold: float = 1.0,
):
qas_id_to_has_answer = {
example.qas_id: bool(example.answers) for example in self.examples[: len(all_predictions)]
}
has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
if no_answer_probs is None:
no_answer_probs = {k: 0.0 for k in all_predictions}
exact, f1 = self.get_raw_scores(all_predictions)
exact_threshold = apply_no_ans_threshold(
exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
f1_threshold = apply_no_ans_threshold(
f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
)
evaluation = make_eval_dict(exact_threshold, f1_threshold)
if has_answer_qids:
has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
merge_eval(evaluation, has_ans_eval, "HasAns")
if no_answer_qids:
no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
merge_eval(evaluation, no_ans_eval, "NoAns")
if no_answer_probs:
find_all_best_thresh(evaluation, all_predictions, exact, f1, no_answer_probs, qas_id_to_has_answer)
return evaluation["best_exact"], evaluation["best_f1"]
def get_raw_scores(self, preds: Dict[str, str]):
"""
Computes the exact and f1 scores from the examples
and the model predictions
"""
exact_scores = {}
f1_scores = {}
for example in self.examples:
qas_id = example.qas_id
gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
if not gold_answers:
# For unanswerable questions,
# only correct answer is empty string
gold_answers = [""]
if qas_id not in preds:
logging.warning("Missing prediction for %s" % qas_id)
continue
prediction = preds[qas_id]
exact_scores[qas_id] = max(exact_match_score(a, prediction) for a in gold_answers)
f1_scores[qas_id] = max(f1_score(a, prediction) for a in gold_answers)
return exact_scores, f1_scores
def evaluate(
self,
unique_ids: List[str],
start_logits: List[List[float]],
end_logits: List[List[float]],
n_best_size: int,
max_answer_length: int,
do_lower_case: bool,
version_2_with_negative: bool,
null_score_diff_threshold: float,
):
(all_predictions, all_nbest_json, scores_diff_json) = self.get_predictions(
unique_ids,
start_logits,
end_logits,
n_best_size,
max_answer_length,
do_lower_case,
version_2_with_negative,
null_score_diff_threshold,
)
exact_match, f1 = self.evaluate_predictions(all_predictions)
return exact_match, f1, all_predictions, all_nbest_json
| NeMo-main | nemo/collections/nlp/data/question_answering_squad/qa_dataset.py |
# Copyright 2018 The Google AI Language Team Authors and
# The HuggingFace Inc. team.
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for Token Classification NLP tasks
Some parts of this code were adapted from the HuggingFace library at
https://github.com/huggingface/pytorch-pretrained-BERT
"""
import os
import pickle
import tempfile
import time
from typing import Dict, List, Optional
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import ChannelType, LabelsType, MaskType, NeuralType
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
__all__ = ['BertTokenClassificationDataset', 'BertTokenClassificationInferDataset']
def get_features(
queries: List[str],
tokenizer: TokenizerSpec,
max_seq_length: int = -1,
label_ids: dict = None,
pad_label: str = 'O',
raw_labels: List[str] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = False,
):
"""
Processes the data and returns features.
Args:
queries: text sequences
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length minus 2 for [CLS] and [SEP], when -1 - use the max len from the data
pad_label: pad value use for labels. By default, it's the neutral label.
raw_labels: list of labels for every word in a sequence
label_ids: dict to map labels to label ids.
Starts with pad_label->0 and then increases in alphabetical order.
Required for training and evaluation, not needed for inference.
ignore_extra_tokens: whether to ignore extra tokens in the loss_mask
ignore_start_end: whether to ignore bos and eos tokens in the loss_mask
"""
all_subtokens = []
all_loss_mask = []
all_subtokens_mask = []
all_segment_ids = []
all_input_ids = []
all_input_mask = []
sent_lengths = []
all_labels = []
with_label = False
if raw_labels is not None:
with_label = True
for i, query in enumerate(queries):
words = query.strip().split()
# add bos token
subtokens = [tokenizer.cls_token]
loss_mask = [1 - ignore_start_end]
subtokens_mask = [0]
if with_label:
pad_id = label_ids[pad_label]
labels = [pad_id]
query_labels = [label_ids[lab] for lab in raw_labels[i]]
for j, word in enumerate(words):
word_tokens = tokenizer.text_to_tokens(word)
# to handle emojis that could be neglected during tokenization
if len(word.strip()) > 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.ids_to_tokens(tokenizer.unk_id)]
subtokens.extend(word_tokens)
loss_mask.append(1)
loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1))
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_tokens) - 1))
if with_label:
labels.extend([query_labels[j]] * len(word_tokens))
# add eos token
subtokens.append(tokenizer.sep_token)
loss_mask.append(1 - ignore_start_end)
subtokens_mask.append(0)
sent_lengths.append(len(subtokens))
all_subtokens.append(subtokens)
all_loss_mask.append(loss_mask)
all_subtokens_mask.append(subtokens_mask)
all_input_mask.append([1] * len(subtokens))
if with_label:
labels.append(pad_id)
all_labels.append(labels)
max_seq_length_data = max(sent_lengths)
max_seq_length = min(max_seq_length, max_seq_length_data) if max_seq_length > 0 else max_seq_length_data
logging.info(f'Setting Max Seq length to: {max_seq_length}')
get_stats(sent_lengths)
too_long_count = 0
for i, subtokens in enumerate(all_subtokens):
if len(subtokens) > max_seq_length:
subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :]
all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :]
all_loss_mask[i] = [int(not ignore_start_end)] + all_loss_mask[i][-max_seq_length + 1 :]
all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :]
if with_label:
all_labels[i] = [pad_id] + all_labels[i][-max_seq_length + 1 :]
too_long_count += 1
all_input_ids.append(tokenizer.tokens_to_ids(subtokens))
if len(subtokens) < max_seq_length:
extra = max_seq_length - len(subtokens)
all_input_ids[i] = all_input_ids[i] + [0] * extra
all_loss_mask[i] = all_loss_mask[i] + [0] * extra
all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra
all_input_mask[i] = all_input_mask[i] + [0] * extra
if with_label:
all_labels[i] = all_labels[i] + [pad_id] * extra
all_segment_ids.append([0] * max_seq_length)
logging.warning(f'{too_long_count} are longer than {max_seq_length}')
for i in range(min(len(all_input_ids), 1)):
logging.info("*** Example ***")
logging.info("i: %s", i)
logging.info("subtokens: %s", " ".join(list(map(str, all_subtokens[i]))))
logging.info("loss_mask: %s", " ".join(list(map(str, all_loss_mask[i]))))
logging.info("input_mask: %s", " ".join(list(map(str, all_input_mask[i]))))
logging.info("subtokens_mask: %s", " ".join(list(map(str, all_subtokens_mask[i]))))
if with_label:
logging.info("labels: %s", " ".join(list(map(str, all_labels[i]))))
return (all_input_ids, all_segment_ids, all_input_mask, all_subtokens_mask, all_loss_mask, all_labels)
class BertTokenClassificationDataset(Dataset):
"""
Creates dataset to use during training for token classification tasks with a pretrained model.
Converts from raw data to an instance that can be used by Dataloader.
For dataset to use during inference without labels, see BertTokenClassificationInferDataset.
Args:
text_file: file to sequences, each line should a sentence, no header.
label_file: file to labels, each line corresponds to word labels for a sentence in the text_file. No header.
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
tokenizer: such as AutoTokenizer
num_samples: number of samples you want to use for the dataset.
If -1, use all dataset. Useful for testing.
pad_label: pad value use for labels. By default, it's the neutral label.
label_ids: label_ids (dict): dict to map labels to label ids.
Starts with pad_label->0 and then increases in alphabetical order
For dev set use label_ids generated during training to support
cases when not all labels are present in the dev set.
For training set label_ids should be None.
ignore_extra_tokens: whether to ignore extra tokens in the loss_mask
ignore_start_end: whether to ignore bos and eos tokens in the loss_mask
use_cache: whether to use processed data cache or not
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: str,
label_file: str,
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
pad_label: str = 'O',
label_ids: Dict[str, int] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = False,
use_cache: bool = True,
):
""" Initializes BertTokenClassificationDataset. """
data_dir = os.path.dirname(text_file)
text_filename = os.path.basename(text_file)
lbl_filename = os.path.basename(label_file)
if not text_filename.endswith('.txt'):
raise ValueError("{text_file} should have extension .txt")
vocab_size = getattr(tokenizer, "vocab_size", 0)
features_pkl = os.path.join(
data_dir,
f"cached__{text_filename}__{lbl_filename}__{tokenizer.name}_{max_seq_length}_{vocab_size}_{num_samples}",
)
master_device = is_global_rank_zero()
features = None
if master_device and (not use_cache or not os.path.exists(features_pkl)):
if num_samples == 0:
raise ValueError("num_samples has to be positive", num_samples)
with open(text_file, 'r') as f:
text_lines = f.readlines()
labels_lines = []
with open(label_file, 'r') as f:
for line in f:
line = line.strip().split()
labels_lines.append(line)
if len(labels_lines) != len(text_lines):
raise ValueError("Labels file should contain labels for every word")
if num_samples > 0:
dataset = list(zip(text_lines, labels_lines))
dataset = dataset[:num_samples]
dataset = list(zip(*dataset))
text_lines = dataset[0]
labels_lines = dataset[1]
features = get_features(
queries=text_lines,
max_seq_length=max_seq_length,
tokenizer=tokenizer,
pad_label=pad_label,
raw_labels=labels_lines,
label_ids=label_ids,
ignore_extra_tokens=ignore_extra_tokens,
ignore_start_end=ignore_start_end,
)
# save features to a temp file first to make sure that non-master processes don't start reading the file
# until the master process is done with writing
ofd, tmp_features_pkl = tempfile.mkstemp(
suffix='.pkl', prefix=os.path.basename(features_pkl), dir=os.path.dirname(features_pkl)
)
with os.fdopen(ofd, 'wb') as temp_f:
pickle.dump(features, temp_f)
os.rename(tmp_features_pkl, features_pkl)
logging.info(f'features saved to {features_pkl}')
# wait until the master process writes to the processed data files
if not master_device:
while features is None and not os.path.exists(features_pkl):
time.sleep(10)
if features is None:
features = pickle.load(open(features_pkl, 'rb'))
logging.info(f'features restored from {features_pkl}')
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_subtokens_mask = features[3]
self.all_loss_mask = features[4]
self.all_labels = features[5]
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_subtokens_mask[idx]),
np.array(self.all_loss_mask[idx]),
np.array(self.all_labels[idx]),
)
class BertTokenClassificationInferDataset(Dataset):
"""
Creates dataset to use during inference for token classification tasks with a pretrained model.
For dataset to use during training with labels, see BertTokenClassificationDataset.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports.
"""
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
}
def __init__(
self, queries: List[str], max_seq_length: int, tokenizer: TokenizerSpec,
):
"""
Initializes BertTokenClassificationInferDataset
Args:
queries: text sequences
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
tokenizer: such as AutoTokenizer
"""
features = get_features(queries=queries, max_seq_length=max_seq_length, tokenizer=tokenizer)
self.all_input_ids = features[0]
self.all_segment_ids = features[1]
self.all_input_mask = features[2]
self.all_subtokens_mask = features[3]
def __len__(self):
return len(self.all_input_ids)
def __getitem__(self, idx):
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.long),
np.array(self.all_subtokens_mask[idx]),
)
| NeMo-main | nemo/collections/nlp/data/token_classification/token_classification_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'BertPunctuationCapitalizationDataset',
'LABEL_ID_DIR_FOR_NEMO_CHECKPOINT',
'Progress',
'PunctuationCapitalizationEvalDataConfig',
'PunctuationCapitalizationTrainDataConfig',
'create_label_ids',
'create_masks_and_segment_ids',
'is_legacy_data_config',
'legacy_data_config_to_new_data_config',
'load_label_ids',
'raise_not_equal_labels_error',
'save_label_ids',
]
import itertools
import multiprocessing as mp
import os
import pickle
import tempfile
from dataclasses import dataclass
from math import ceil
from pathlib import Path
from queue import Empty
from time import sleep
from typing import Any, Dict, List, Optional, Set, Tuple, Union
import numpy as np
import torch
from numpy import ndarray
from omegaconf import MISSING, DictConfig, OmegaConf
from torch.nn.utils.rnn import pad_sequence
from tqdm import tqdm
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils.data_preprocessing import get_label_stats, get_stats
from nemo.core.classes import Dataset
from nemo.core.neural_types import AudioSignal, ChannelType, LabelsType, LengthsType, MaskType, NeuralType
from nemo.utils import logging
from nemo.utils.get_rank import is_global_rank_zero
try:
from nemo.collections.asr.parts.preprocessing import AudioSegment
ASR_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
ASR_AVAILABLE = False
MAX_NUM_QUERIES_IN_SPLIT = 10 ** 4
TOKENIZATION_PROGRESS_REPORT_PERIOD = 10 ** 3
BATCH_MARK_UP_PROGRESS_REPORT_PERIOD = 10 ** 4
BATCH_BUILDING_PROGRESS_REPORT_PERIOD = 10 ** 4
LABEL_ID_DIR_FOR_NEMO_CHECKPOINT = "label_id_files_for_nemo_checkpoint"
@dataclass
class PunctuationCapitalizationDataConfigBase:
"""A base class for punctuation and capitalization data configs. This class does not define ``ds_item``
attribute which works differently for train and evaluation data."""
###################################################
# AUDIO DATASET PARAMETERS
###################################################
use_audio: bool = False
"""
Whether to use audio or not. If set to True you should provide ``audio_file``.
"""
audio_file: Optional[str] = None
"""
Path to the file with audio paths one per row.
"""
sample_rate: Optional[int] = 16000
"""
Sample rate of audios to use.
"""
use_bucketing: Optional[bool] = True
"""
Whether to pack samples into ``tokens_in_batch`` or not. Increases GPU utilization but may cause significant RAM consumption if used together with ``use_audio``.
"""
batch_size: Optional[int] = 32
"""
Batch size used if ``use_bucketing`` set to False.
"""
preload_audios: Optional[bool] = True
"""
If set to True audios will be loaded during ``__init__`` call of dataset. Otherwise it will be loaded during ``collate_fn ``call
"""
###################################################
# PARAMETERS COMMON FOR REGULAR AND TARRED DATASETS
###################################################
use_tarred_dataset: bool = MISSING
"""Whether to use tarred dataset. If True, then you should provide ``tar_metadata_file``. Otherwise, you should
provide ``text_file``, ``labels_file``, ``tokens_in_batch``."""
label_info_save_dir: Optional[str] = None
"""A path to a directory where files created during dataset processing are stored. These files include label id
files and label stats files. By default, it is a directory containing ``text_file`` or ``tar_metadata_file``.
You may need this parameter if dataset directory is read-only and thus does not allow saving anything near dataset
files"""
#################################################
# REGULAR DATASET PARAMETERS
#################################################
text_file: Optional[str] = None
"""A path to a file with source text data without punctuation and capitalization."""
labels_file: Optional[str] = None
"""A path to a file with punctuation and capitalization labels in NeMo format. NeMo format is described in
`documentation
<https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format>`_
"""
tokens_in_batch: Optional[int] = None
"""Number of tokens in a batch including paddings and special tokens ([CLS], [SEP], [UNK]). This config does
not have ``batch_size`` parameter."""
max_seq_length: int = 512
"""Max number of tokens in a source sequence. ``max_seq_length`` includes [CLS] and [SEP] tokens. Sequences
which are too long will be clipped by removal of tokens from the end of a sequence."""
num_samples: int = -1
"""A number of samples loaded from ``text_file`` and ``labels_file`` which are used in the dataset. If this
parameter equals ``-1``, then all samples are used."""
use_cache: bool = True
"""Whether to use pickled features. If pickled features file does not exist or ``use_cache=False``, then features
are pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in words),
encoded punctuation and capitalization labels, label ids. Features creation consumes considerable time and this
``use_cache=True`` significantly speeds up training starting. Pickled features are also used for sharing features
between processes if data parallel training is used."""
cache_dir: Optional[str] = None
"""A path to a directory containing cache or directory where newly created cache is saved. By default, it is
a directory containing ``text_file``. You may need this parameter if cache for a dataset is going to be created
and the dataset directory is read-only.
``cache_dir`` and ``label_info_save_dir`` are separate parameters for the case when a cache is ready and this cache
is stored in a read only directory. In this case you will separate ``label_info_save_dir``."""
get_label_frequences: bool = False
"""Whether to show and save label frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir``"""
verbose: bool = True
"""If ``True`` dataset instance will print progress messages and examples of acquired features."""
n_jobs: Optional[int] = 0
"""Number of workers used for features creation (tokenization, label encoding, and clipping). If 0, then
multiprocessing is not used; if ``None``, then n_jobs is equal to the number of CPU cores.
There can be weird deadlocking errors with some tokenizers (e.g. SentencePiece) if ``n_jobs`` is greater than zero.
"""
#################################################
# TARRED DATASET PARAMETERS
#################################################
tar_metadata_file: Optional[str] = None
"""A path to tarred dataset metadata file. Tarred metadata file and other parts of tarred dataset are usually
created by the script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_
"""
tar_shuffle_n: int = 1
"""The size of shuffle buffer of `webdataset`. The number of batches which are permuted."""
shard_strategy: Optional[str] = 'scatter'
"""Tarred dataset shard distribution strategy chosen as a str value during ddp. Accepted values are `scatter` and `replicate`.
`scatter`: The default shard strategy applied by WebDataset, where each node gets a unique set of shards, which are permanently
pre-allocated and never changed at runtime. `replicate` is an optional shard strategy, where each node gets the entire set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime. The benefit of replication is that
it allows each node to sample data points from the entire dataset independently of other nodes, and reduces dependence on value of
``tar_shuffle_n``.
.. warning::
Replicated strategy allows every node to sample the entire set of available tar files, and therefore more than one node may sample
the same tarfile, and even sample the same data points! As such, there is no assured guarantee that all samples in the dataset
will be sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific occasions (when the number of
shards is not divisible with ``world_size``), will not sample the entire dataset. For these reasons it is not advisable to use
tarred datasets as validation or test datasets.
"""
#################################################
# PYTORCH DATALOADER PARAMETERS
#################################################
shuffle: bool = True
"""Shuffle batches every epoch. For regular training datasets, the parameter also activates batch repacking every
epoch. For tarred dataset, it would be only batches permutation."""
drop_last: bool = False
"""In cases when data parallelism is used, ``drop_last`` defines the way data pipeline behaves when some replicas
are out of data and some are not. If ``drop_last`` is ``True``, then epoch ends in the moment when any replica runs
out of data. If ``drop_last`` is ``False``, then the replica will replace missing batch with a batch from a pool of
batches that the replica has already processed. If data parallelism is not used, then parameter ``drop_last`` does
not do anything. For more information see ``torch.utils.data.distributed.DistributedSampler``"""
pin_memory: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
num_workers: int = 8
"""See ``torch.utils.data.DataLoader`` documentation."""
persistent_workers: bool = True
"""See ``torch.utils.data.DataLoader`` documentation."""
@dataclass
class PunctuationCapitalizationTrainDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[str] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay."""
@dataclass
class PunctuationCapitalizationEvalDataConfig(PunctuationCapitalizationDataConfigBase):
ds_item: Optional[Any] = MISSING
"""Path to a directory where `tar_metadata_file` or `text_file` and `labels_file` lay. ``Any`` = ``str`` or
``List[str]``. If a ``List[str]``, then the model is tested or validated on several datasets."""
def is_legacy_data_config(ds_section: DictConfig) -> bool:
return 'use_tarred_dataset' not in ds_section
def legacy_data_config_to_new_data_config(
ds_section: DictConfig, legacy_dataset_section: DictConfig, train: bool
) -> DictConfig:
"""
Transform old style dataset to new format dataset.
Args:
ds_section: a ds section (``train_ds``, or ``validation_ds``, or ``test_ds``) from old style config. Such
section contain ``batch_size`` parameter.
legacy_dataset_section: a ``model.dataset`` section. ``model.dataset`` section contains ``data_dir`` parameter
train: ``True`` if ``train_ds`` is transformed and ``False`` otherwise
Returns:
New format dataset based on either ``PunctuationCapitalizationTrainDataConfig`` (``train=True``) or
``PunctuationCapitalizationEvalDataConfig`` (``train=False``)
"""
if train:
cls = PunctuationCapitalizationTrainDataConfig
ds_item = legacy_dataset_section.get('data_dir')
else:
cls = PunctuationCapitalizationEvalDataConfig
ds_item = ds_section.get('ds_item')
ds_item = legacy_dataset_section.get('data_dir') if ds_item is None else ds_item
if ds_item is None:
raise ValueError(
f"Data directory was not found in legacy config.\nspecific dataset configuration:\n"
f"{OmegaConf.to_yaml(ds_section)}\nmodel.dataset:\n{OmegaConf.to_yaml(legacy_dataset_section)}"
)
new_config = OmegaConf.structured(
cls(
use_tarred_dataset=False,
text_file=ds_section.text_file,
labels_file=ds_section.labels_file,
ds_item=ds_item,
max_seq_length=legacy_dataset_section.get(
'max_seq_length', PunctuationCapitalizationDataConfigBase.max_seq_length
),
)
)
return new_config
def _check_number_of_labels(
words: List[str],
query: str,
qi: int,
split_i: int,
punctuation_labels: List[str],
capitalization_labels: List[str],
) -> None:
if len(words) != len(punctuation_labels):
raise ValueError(
f"Number of punctuation labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of punctuation labels: "
f"{len(punctuation_labels)}. First 100 characters of the query: '{query[:100]}', punctuation labels: "
f"'{punctuation_labels}'"
)
if len(words) != len(capitalization_labels):
raise ValueError(
f"Number of capitalization labels for a query number {qi} in a split number {split_i} is not equal to "
f"number of words. Number of words: {len(words)}, number of capitalization labels: "
f"{len(capitalization_labels)}. First 100 characters of the query: '{query[:100]}', "
f"capitalization labels: '{capitalization_labels}'"
)
def _show_prog(queues: Tuple[mp.Queue, ...], totals: List[int], descriptions: List[str], units: List[str]) -> None:
"""
Show several ``tqdm`` progress bars.
Args:
queues: a list of queues by which progress is delivered into this function. Each queue is responsible for one
progress bar. ``show_prog`` function extracts integers from ``queues`` elements and adds them to progress
bars. If value extracted from a queue equals ``-1``, then corresponding progress bar is closed. When all
progress bars are closed, this function returns.
totals: list of values 100% of progress bars. See more in a description of ``total`` parameter of
``tqdm.tqdm`` function
descriptions: list of descriptions of progress bars. See more in a description of ``desc`` parameter of
``tqdm.tqdm`` function
units: list of progress bar units. See more in a description of ``unit`` parameter of ``tqdm.tqdm`` function
"""
if not all([len(queues) == len(v) for v in [totals, descriptions, units]]):
raise ValueError(
f"All of parameters `queues`, `total_num_lines`, `descriptions`, `units` have to have equal lengths. "
f"len(queues)={len(queues)}, len(total_num_lines)={len(totals)}, "
f"len(descriptions)={len(descriptions)}, len(units)={len(units)}."
)
prog = [
tqdm(total=tt, desc=dd, unit=uu, unit_scale=True, position=i)
for i, (tt, dd, uu) in enumerate(zip(totals, descriptions, units))
]
finished = [False] * len(queues)
while True:
for i, queue in enumerate(queues):
stop = False
to_add = 0
try:
v = queue.get(block=False)
while v != -1:
to_add += v
v = queue.get(block=False)
stop = True
except Empty:
if to_add == 0 and not stop:
continue
prog[i].n += to_add
prog[i].update(0)
if prog[i].n >= totals[i]:
finished[i] = True
prog[i].close()
if stop:
if prog[i].n < totals[i]:
logging.warning(
f"Progress with description '{descriptions[i]}' terminated before progress bar "
f"reached 100%. prog.n={prog[i].n}, total_num_lines={totals[i]}"
)
finished[i] = True
prog[i].close()
if all(finished):
break
sleep(0.1)
class Progress:
"""
Manages several ``tqdm`` progress bars for multiprocess tasks. This class can be used as context manager.
The class starts separate process which creates and updates progress bars. Information to progress process is
passed via multiprocessing queues. There is a separate queue for every progress bar.
You can use it as context manager:
.. code-block:: python
with Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"]) as progress_queues:
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
Or without context manager:
.. code-block:: python
progress = Progress([10, 20], ["progress bar 1", "progress bar 2"], ["parrot", "frog"])
progress_queues = progress.get_queue()
num_processes = 10
with multiprocessing.Pool(num_processes) as pool:
data = list(zip(my_data, [progress_queues[0]] * num_processes, [progress_queues[1]] * num_processes))
pool.starmap(worker_func, data)
progress.finish()
In a worker function you will have to put number of processed items into the progress queues. For example:
.. code-block:: python
def worker_func(my_datum, parrot_progress_queue, frog_progress_queue):
...
for i in range(10):
parrot_progress_queue.put(1)
frog_progress_queue.put(2)
Progress bars and progress process are closed when ``finish`` or ``__exit__`` methods are called.
"""
def __init__(self, total: Union[int, List[int]], desc: Union[str, List[str]], unit: Union[str, List[str]]) -> None:
"""
Starts progress process and creates queues for passing information to the progress process. Number of progress
bars is equal to the max length of lists ``total``, ``desc``, ``unit``. If none of these parameters is a list,
then 1 progress bar is created.
Args:
total: a list of ``int`` which length is equal to the number of progress bars OR an ``int`` OR a list of
one ``int``. Number which comprises 100% of progress bar. When sum of values passed through the
corresponding queue equals ``total`` corresponding progress bar reaches 100%. If ``total`` is an
``int`` or a list of one element, then all progress bars have equal ``total`` parameter.
desc: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. Description of a progress bar which is showed as a prefix. See more in description of
parameter ``desc`` of function ``tqdm.tqdm``.
unit: a list of ``str`` which length is equal to the number of progress bars OR a ``str`` OR a list of one
``str``. A unit of a progress bar. See more in description of parameter ``unit`` of function
``tqdm.tqdm``.
"""
if not isinstance(total, list):
total = [total]
if not isinstance(desc, list):
desc = [desc]
if not isinstance(unit, list):
unit = [unit]
num_processes = max([len(total), len(desc), len(unit)])
for param in [total, desc, unit]:
if len(param) not in [num_processes, 1]:
raise ValueError(
f"If parameter of `Progress.__init__` method is a list, then it has to be the same length as other "
f"parameters which are lists"
)
if len(param) == 1:
param *= num_processes
manager = mp.Manager()
self.progress_queues = tuple(manager.Queue() for _ in range(num_processes))
self.progress_process = mp.Process(target=_show_prog, args=(self.progress_queues, total, desc, unit))
self.progress_process.start()
def __enter__(self) -> Tuple[mp.Queue, ...]:
return self.get_queues()
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.finish()
def get_queues(self) -> Tuple[mp.Queue, ...]:
return self.progress_queues
def finish(self) -> None:
for q in self.progress_queues:
q.put(-1)
self.progress_process.join()
class TokenizeCreateMasksClipWorker:
"""A worker for tokenization, encoding labels, creating masks for first token in a word, sequence clipping"""
def __init__(
self,
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
pad_label: str,
verbose: bool,
progress_queue: mp.Queue,
) -> None:
"""
Args:
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of
tokens in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence
are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
verbose: whether to report when the worker finishes its job
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
"""
self.max_seq_length = max_seq_length
self.tokenizer = tokenizer
self.punct_label_ids = punct_label_ids
self.capit_label_ids = capit_label_ids
self.pad_label = pad_label
self.verbose = verbose
self.progress_queue = progress_queue
def _maybe_clip(self, values: List[int], append_value: int) -> List[int]:
if len(values) > self.max_seq_length:
return values[: self.max_seq_length - 1] + [append_value]
return values
def __call__(
self,
queries: List[str],
punct_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
capit_label_lines: Optional[Union[List[str], Tuple[str, ...]]],
split_i: int,
audio_queries: Optional[List[str]] = None,
sample_rate: Optional[int] = None,
preload_audios: Optional[bool] = True,
) -> Tuple[
List[ndarray],
List[ndarray],
List[ndarray],
List[ndarray],
Union[List[Any], List[None]],
Union[List[Any], List[None]],
Union[List[Any], List[None]],
]:
"""
Tokenize, clip, encode labels, and create masks of first tokens in words.
Args:
queries: text sequences
punct_label_lines: a list or a tuple of labels for every word in a sequence (str)
capit_label_lines: a list of a tuple labels for every word in a sequence (str)
split_i: number of a split which is processed. Used for logging
audio_queries: a list of audio filepaths
sample_rate: target sample rate of audios
preload_audios: whether to preload audios or not
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of the corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in
one word have identical labels
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens
in one word have identical labels
"""
all_input_ids, all_subtokens_mask, punct_all_labels, capit_all_labels = [], [], [], []
dummy = [None] * len(queries) # Needed to avoid code duplication with different values of `self.use_audio`
all_audio_waveforms = [] if preload_audios else dummy
audio_lengths = [] if preload_audios else dummy
audio_filepaths = [] if not preload_audios else dummy
progress_made = 0
queries = zip(queries, audio_queries) if audio_queries else zip(queries, dummy)
for i, (query, audio_query) in enumerate(queries):
words = query.split()
input_ids, subtokens_mask = [self.tokenizer.cls_id], [0]
_check_number_of_labels(words, query, i, split_i, punct_label_lines[i], capit_label_lines[i])
pad_id = self.punct_label_ids[self.pad_label]
punct_labels = [pad_id]
punct_query_labels = [self.punct_label_ids[lab] for lab in punct_label_lines[i]]
capit_labels = [pad_id]
capit_query_labels = [self.capit_label_ids[lab] for lab in capit_label_lines[i]]
for j, word in enumerate(words):
word_ids = self.tokenizer.text_to_ids(word)
if not word_ids and len(word):
word_ids = [self.tokenizer.unk_id]
input_ids.extend(word_ids)
subtokens_mask.append(1)
subtokens_mask.extend([0] * (len(word_ids) - 1))
punct_labels.extend([punct_query_labels[j]] * len(word_ids))
capit_labels.extend([capit_query_labels[j]] * len(word_ids))
# add eos token
input_ids.append(self.tokenizer.sep_id)
subtokens_mask.append(0)
all_input_ids.append(np.array(self._maybe_clip(input_ids, self.tokenizer.sep_id), dtype=np.int32))
all_subtokens_mask.append(np.array(self._maybe_clip(subtokens_mask, 0), dtype=bool))
punct_labels.append(pad_id)
punct_all_labels.append(np.array(self._maybe_clip(punct_labels, pad_id), dtype=np.int32))
capit_labels.append(pad_id)
capit_all_labels.append(np.array(self._maybe_clip(capit_labels, pad_id), dtype=np.int32))
if preload_audios and audio_query:
if ASR_AVAILABLE:
segment = AudioSegment.from_file(audio_query.strip(), target_sr=sample_rate)
all_audio_waveforms.append(segment.samples)
audio_lengths.append(segment.num_samples)
else:
raise ModuleNotFoundError(
'Nemo ASR was not installed, see https://github.com/NVIDIA/NeMo#installation for installation instructions'
)
elif audio_query:
audio_filepaths.append(audio_query.strip())
progress_made += 1
if progress_made >= TOKENIZATION_PROGRESS_REPORT_PERIOD:
self.progress_queue.put(progress_made)
progress_made = 0
self.progress_queue.put(progress_made)
if self.verbose:
logging.info(f"Finished processing data split number {split_i}")
return (
all_input_ids,
all_subtokens_mask,
punct_all_labels,
capit_all_labels,
all_audio_waveforms,
audio_lengths,
audio_filepaths,
)
def _get_features(
queries: Union[List[str], Tuple[str, ...]],
punct_label_lines: Union[List[str], Tuple[str, ...]],
capit_label_lines: Union[List[str], Tuple[str, ...]],
max_seq_length: int,
tokenizer: TokenizerSpec,
punct_label_ids: Dict[str, int] = None,
capit_label_ids: Dict[str, int] = None,
pad_label: str = 'O',
verbose: bool = True,
n_jobs: Optional[int] = 0,
progress_queue: Optional[mp.Queue] = None,
audio_queries: Optional[List[str]] = None,
sample_rate: Optional[int] = None,
preload_audios: Optional[bool] = True,
) -> Tuple[List[Any], List[Any], List[Any], List[Any], List[Any], List[Any], List[Any]]:
"""
Tokenizes data, encodes labels, creates masks of first tokens in words, clips sequences by number of tokens.
Args:
queries: text sequences
max_seq_length: max number of tokens in an input sequence including [CLS] and [SEP] tokens. If number of tokens
in a sequence exceeds ``max_seq_length``, then excess tokens in the end of the sequence are removed
tokenizer: a tokenizer instance which has properties ``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``
punct_label_ids: dict to map punctuation labels to label ids. Starts with pad_label->0.
capit_label_ids: dict to map capitalization labels to label ids. Starts with pad_label->0.
pad_label: pad value use for labels. By default, it's the neutral label for punctuation and capitalization.
Its id in ``punct_label_ids`` and ``capit_label_ids`` has to be ``0``
punct_label_lines: a list of a tuple of labels for every word in a sequence (str)
capit_label_lines: a list or a tuple of labels for every word in a sequence (str)
verbose: whether to show examples of tokenized data and various progress information
n_jobs: a number of workers used for preparing features. If ``n_jobs <= 0``, then do not use multiprocessing
and run features creation in this process. If not set, number of workers will be equal to the number of
CPUs.
!!WARNING!!
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
progress_queue: a multiprocessing queue used for reporting progress. Useful for creating tarred dataset
audio_queries: a list of audio filepaths
sample_rate: target sample rate of audios
preload_audios: whether to preload audios or not
Returns:
input_ids: a list of 1D int32 arrays. Each array contains token ids of corresponding query
subtokens_mask: a list of 1D boolean arrays. An array element is ``True`` if corresponding token is the
first token in a word
punct_labels: a list of 1D int32 arrays. Encoded punctuation labels for every token in a query. Tokens in one
word have identical labels.
capit_labels: a list of 1D int32 arrays. Encoded capitalization labels for every token in a query. Tokens in
one word have identical labels
"""
if verbose:
logging.info("Start initial tokenization.")
create_progress_process = progress_queue is None
if n_jobs is None:
n_jobs = min(mp.cpu_count(), len(queries))
if verbose:
logging.info(f"Running tokenization with {n_jobs} jobs.")
# Number of queries in split
split_size = min(len(queries) // max(n_jobs, 1), MAX_NUM_QUERIES_IN_SPLIT)
n_split = len(queries) // split_size
split_queries = [queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [
queries[split_size * (n_split - 1) :]
]
split_punct_labels_lines = [
punct_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [punct_label_lines[split_size * (n_split - 1) :]]
split_capit_labels_lines = [
capit_label_lines[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)
] + [capit_label_lines[split_size * (n_split - 1) :]]
args = list(zip(split_queries, split_punct_labels_lines, split_capit_labels_lines, range(n_split)))
if audio_queries:
split_audio_queries = [audio_queries[split_size * i : split_size * (i + 1)] for i in range(n_split - 1)] + [
audio_queries[split_size * (n_split - 1) :]
]
args = list(
zip(
split_queries,
split_punct_labels_lines,
split_capit_labels_lines,
range(n_split),
split_audio_queries,
[sample_rate for _ in range(n_split)],
[preload_audios for _ in range(n_split)],
)
)
if create_progress_process:
progress = Progress(len(queries), "Tokenization", "query")
progress_queue = progress.get_queues()[0]
if n_jobs > 0:
with mp.Pool(n_jobs) as pool:
result = pool.starmap(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue,
),
args,
)
else:
result = []
for x in args:
result.append(
TokenizeCreateMasksClipWorker(
max_seq_length, tokenizer, punct_label_ids, capit_label_ids, pad_label, verbose, progress_queue,
)(*x)
)
if create_progress_process:
progress.finish()
input_ids, subtokens_mask, punct_labels, capit_labels, waveforms, audio_lengths, audio_filepaths = tuple(
list(itertools.chain(*e)) for e in zip(*result)
)
if verbose:
logging.info("Finished initial tokenization.")
get_stats([len(inp) for inp in input_ids])
logging.info(f"Finished clipping and padding.")
for i in range(min(len(input_ids), 5)):
logging.info("*** Example ***")
logging.info("i: %s" % i)
logging.info("subtokens: %s" % " ".join(list(map(str, input_ids[i]))))
logging.info("subtokens_mask: %s" % " ".join(list(map(str, subtokens_mask[i]))))
logging.info("punct_labels: %s" % " ".join(list(map(str, punct_labels[i]))))
logging.info("capit_labels: %s" % " ".join(list(map(str, capit_labels[i]))))
return (
input_ids,
subtokens_mask,
waveforms,
audio_lengths,
audio_filepaths,
punct_labels,
capit_labels,
)
def create_masks_and_segment_ids(
input_ids: np.ndarray,
subtokens_mask: np.ndarray,
pad_id: int,
cls_id: int,
sep_id: int,
ignore_start_end: bool,
ignore_extra_tokens: bool,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Creates segment ids array, input mask, loss mask.
Segment ids array is BERT token type ids in HuggingFace terminology. It is a zeros array for punctuation
and capitalization task.
Input mask element is ``True`` if an element of ``input_ids`` is not padding and ``False`` otherwise.
Loss mask element is ``True`` for the first token in a word. If ``ignore_start_end=False``, then loss mask
element is ``True`` for [CLS] and [SEP] tokens. If ``ignore_extra_tokens=False``, then loss mask element is ``True``
for all word tokens. In all other cases loss mask elements are ``False``.
Args:
input_ids: an integer array of shape ``[Batch, Time]`` containing ids of source token ids
subtokens_mask: a boolean array of shape ``[Batch, Time]`` which elements are ``True`` if they correspond to
the first token of some word
pad_id: an id of padding token
cls_id: an id of [CLS] token
sep_id: an id of [SEP] token
ignore_start_end: whether to compute loss for [CLS] and [SEP] tokens
ignore_extra_tokens: whether to compute loss for not first tokens in words
Returns:
segment_ids: int8 array of shape [Batch, Time]
input_mask: boolean array of shape [Batch, Time]
loss_mask: boolean array of shape [Batch, Time]
"""
segment_ids = np.zeros_like(input_ids, dtype=np.int8)
input_mask = np.not_equal(input_ids, pad_id)
special_mask = np.equal(input_ids, cls_id) & np.equal(input_ids, sep_id)
if ignore_start_end:
if ignore_extra_tokens:
loss_mask = subtokens_mask
else:
loss_mask = input_mask & ~special_mask
else:
if ignore_extra_tokens:
loss_mask = subtokens_mask | special_mask
else:
loss_mask = input_mask
return segment_ids, input_mask, loss_mask
def create_label_ids(unique_labels: Set[str], pad_label: str) -> Dict[str, int]:
"""
Returns label ids dictionary. ``pad_label`` always has id ``0``. Other labels are sorted in alphabetical order.
Args:
unique_labels: a set of labels from which label ids dictionary is created. May or may not contain ``pad_label``
pad_label: label used for padding. It is also a neutral label
Returns:
label ids dictionary
"""
label_ids = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids[label] = len(label_ids)
return label_ids
def load_label_ids(file_path: Union[str, os.PathLike]) -> Dict[str, int]:
ids = {}
with open(file_path, encoding='utf_8') as f:
for i, line in enumerate(f):
ids[line.strip()] = i
return ids
def save_label_ids(label_ids: Dict[str, int], file_path: Path) -> None:
"""
Saves label ids map to a file. In each line of a file one label is saved. Labels are saved in the order of
increasing of their ids.
Args:
label_ids: label id dictionary. Pad label has to have id ``0``
file_path: path to a file where labels will be saved
"""
file_path.parent.mkdir(parents=True, exist_ok=True)
with file_path.open('w', encoding='utf_8', newline='\n') as out:
labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))
out.write('\n'.join(labels))
def raise_not_equal_labels_error(
first_labels: Dict[str, int], second_labels: Dict[str, int], first_labels_desc: str, second_labels_desc: str
) -> None:
"""
A helper function for raising comprehensible error if labels from 2 sources are different.
Such sources may include:
- labels stored in .nemo checkpoint
- labels stored in tarred dataset
- labels passed in config parameters ``model.common_dataset_parameters.{punct_label_ids,capit_label_ids}``
- labels from files passed in config parameters ``model.class_labels.{punct_labels_file,capit_labels_file}``
- labels in attributes ``PunctuationCapitalizationModel.{punct_label_ids,capit_label_ids}``
- any other source
This function helps to detect configuration early and give error messages that are easy to interpret.
Call this function if ``first_labels != second_labels``.
Args:
first_labels: first dictionary with labels
second_labels: second dictionary with labels
first_labels_desc: a description of first labels
second_labels_desc: a description of second labels
"""
missing_in_first = {k: second_labels[k] for k in set(second_labels) - set(first_labels)}
missing_in_second = {k: first_labels[k] for k in set(first_labels) - set(second_labels)}
not_equal = {
k: {'FIRST LABELS': first_labels[k], 'SECOND LABELS': second_labels[k]}
for k in set(first_labels) & set(second_labels)
if first_labels[k] != second_labels[k]
}
msg = f"{first_labels_desc} (FIRST LABELS) are not equal to {second_labels_desc} (SECOND LABELS)."
if len(missing_in_first) > 0:
msg += f" Number of SECOND LABELS missing in the FIRST LABELS: {len(missing_in_first)}."
if len(missing_in_second) > 0:
msg += f" Number of FIRST LABELS missing in the SECOND LABELS: {len(missing_in_second)}."
if len(not_equal) > 0:
msg += f" Number of labels which are not equal: {len(not_equal)}."
if len(missing_in_first) > 0:
msg += (
f" Several examples of missing SECONDS LABELS in the FIRST LABELS: "
f"{dict(list(missing_in_first.items())[:3])}."
)
if len(missing_in_second) > 0:
msg += (
f" Several examples of missing FIRST LABELS in the SECOND LABELS: "
f"{dict(list(missing_in_second.items())[:3])}."
)
if len(not_equal) > 0:
msg += f" Several examples of labels which are not equal: {dict(list(not_equal.items())[:3])}"
raise ValueError(msg)
def pad(vectors: List[np.ndarray], length: int, value: Union[int, float, bool]) -> np.ndarray:
"""
Pad vectors to length ``length`` and then stack.
Args:
vectors: a list of 1D arrays. Arrays to pad and stack
length: a length of padded sequence. Has to be greater or equal to the maximum length of an element of
``vectors``.
value: a value used for padding
Returns:
an array of padded vectors
"""
result = []
for v in vectors:
result.append(np.concatenate([v, np.full([length - v.shape[0]], value, dtype=v.dtype)]))
return np.stack(result)
class BertPunctuationCapitalizationDataset(Dataset):
"""
A dataset to use during training for punctuation and capitalization tasks.
For inference, you will need
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_infer_dataset.BertPunctuationCapitalizationInferDataset`.
For huge datasets which cannot be loaded into memory simultaneously use
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Args:
text_file (:obj:`Union[str, os.PathLike]`): a path to a file with sequences, each line should contain a text
without punctuation and capitalization
labels_file (:obj:`Union[str, os.PathLike]`): a path to a file with labels, each line corresponds to word
labels for a sentence in the ``text_file``. Labels have to follow format described in this section of
documentation :ref:`NeMo Data Format<nemo-data-format-label>`.
max_seq_length (:obj:`int`): max number of tokens in a source sequence. ``max_seq_length`` includes for [CLS]
and [SEP] tokens. Sequences which are too long will be clipped by removal of tokens from the end of the
sequence.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance which has properties ``unk_id``, ``sep_id``, ``bos_id``,
``eos_id``.
num_samples (:obj:`int`, `optional`, defaults to :obj:`-1`): a number of samples you want to use for the
dataset. If ``-1``, use all dataset. Useful for testing.
tokens_in_batch (:obj:`int`, `optional`, defaults to :obj:`5000`): number of tokens in a batch including
paddings and special tokens ([CLS], [SEP], [UNK]). This class :meth:`__getitem__` method returns not
samples but ready batches. Number of samples in a batch is adjusted for input sequences lengths. If input
sequences are short, then a batch will contain more samples. Before packing into batches, samples are
sorted by number of tokens they contain. Sorting allows to reduce number of pad tokens in a batch
significantly. Regular PyTorch data loader shuffling will only permute batches with changing their content.
Proper shuffling is achieved via calling method :meth:`repack_batches_with_shuffle` every epoch. If
parameter ``number_of_batches_is_multiple_of`` is greater than 1, some batches may be split into smaller
pieces.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): pad value to use for labels. It's also the neutral
label both for punctuation and capitalization.
punct_label_ids (:obj:`Dict[str, int]`, `optional`): dict to map punctuation labels to label ids. For dev set,
use label ids generated during training to support cases when not all labels are present in the dev set.
For training, it is recommended to set ``punct_label_ids`` to ``None`` or load from cache.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same ``punct_label_ids`` for capitalization labels.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to compute loss on
tokens which are not first tokens in a word. For example, assume that word ``'tokenization'`` is tokenized
into ``['token', 'ization']``. If ``ignore_extra_tokens=True``, loss mask for the word is
``[True, False]``, and if ``ignore_extra_tokens=False``, then loss mask is ``[True, True]``.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to ignore [CLS] and [SEP] tokens
in the loss_mask.
use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to use pickled features already present
in ``cache_dir`` or not. If pickled features file does not exist or ``use_cache=False``, then features are
pickled in ``cache_dir``. Pickled features include input ids, subtokens mask (mask of first tokens in
words), encoded punctuation and capitalization labels, label ids. Features creation consumes considerable
time and this ``use_cache=True`` significantly speeds up training starting. Pickled features are also
used for sharing features between processes if data parallel training is used.
cache_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where cache (pickled features)
is stored. By default, ``text_file`` parent directory is used. This parameter is useful if dataset
directory is read-only, and you wish to pickle features. In such a case specify a path to directory which
allows writing in ``cache_dir`` parameter.
get_label_frequencies (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to print and save label
frequencies. Frequencies are showed if ``verbose`` parameter is ``True``. If
``get_label_frequencies=True``, then frequencies are saved into ``label_info_save_dir`` directory.
label_info_save_dir (:obj:`Union[str, os.PathLike]`, `optional`): a path to a directory where label frequencies
are saved. By default, a ``text_file`` parent directory is used. When method
:meth:`save_labels_and_get_file_paths` is called label ids are saved into ``label_info_save_dir``
directory. This parameter is useful if directory containing ``text_file`` is read-only.
punct_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to a .csv file containing
punctuation label vocabulary. Each line in such a vocabulary file contains exactly one label. The first
line has to contain `pad_label`, otherwise error will be raised.
capit_label_vocab_file (:obj:`Union[str, os.PathLike]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
add_masks_and_segment_ids_to_batch (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to add
``'loss_mask'``, ``'input_mask'``, ``'segment_ids'`` items to a batch. Useful for creation of tarred
dataset and can NOT be used during model training and inference.
verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to show data examples, label stats and
other useful information.
n_jobs (:obj:`int`, `optional`, defaults to :obj:`0`): number of workers used for tokenization, encoding
labels, creating "first token in word" mask, and clipping. If ``n_jobs <= 0`` data preparation is performed
without multiprocessing. By default, ``n_jobs`` is ``0``.
.. warning::
There can be deadlocking problems with some tokenizers (e.g. SentencePiece, HuggingFace AlBERT)
if ``n_jobs > 0``.
number_of_batches_is_multiple_of (:obj:`int`, `optional`, defaults to :obj:`1`): number of batches in the
dataset is made divisible by ``number_of_batches_is_multiple_of``. If ``number_of_batches_is_multiple_of``
is greater than 1, then several batches are split in parts until number of batches
is divisible by ``number_of_batches_is_multiple_of``. If there is no enough queries in the dataset to
create enough batches, then a warning is printed. This parameter is useful for dev and validation datasets
if multiple GPUs are used. The problem is that if number of batches is not evenly divisible by number of
GPUs, then some queries may be processed several times and metrics will be distorted.
batch_shuffling_random_seed (:obj:`int`, defaults to :obj:`int`): a random seed used for batches repacking and
shuffling.
tokenization_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting tokenization
progress. Useful for creation of tarred dataset
batch_mark_up_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
deciding which samples batches will contain. Useful for creation of tarred dataset
batch_building_progress_queue (:obj:`multiprocessing.Queue`, `optional`): a queue for reporting progress in
batch creation (stacking and padding). Useful for creation of tarred dataset
use_audio (:obj:`bool`, `optional`, defaults to :obj: `False`): If set to True dataset will return audio as well as text.
audio_file (:obj:`Union[str, os.PathLike]`, `optional`): a path to file with audio paths.
sample_rate (:obj:`int`, `optional`, defaults to :obj:`None`): sample rate of audios. Can be used for up sampling or down sampling of audio.
use_bucketing (:obj:`bool`, `optional`, defaults to :obj: `True`): If set to False dataset will return ``batch_size`` batches instead of ``number_of_tokens`` tokens.
preload_audios (:obj:`bool`, `optional`, defaults to :obj: `True`): If set to True batches will include waveforms, if set to False will store audio_filepaths instead and load audios during ``collate_fn`` call
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
if self.use_audio:
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
'features': NeuralType(('B', 'T'), AudioSignal()),
'features_length': NeuralType(('B', 'T'), LengthsType()),
}
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
max_seq_length: int,
tokenizer: TokenizerSpec,
num_samples: int = -1,
tokens_in_batch: int = 5000,
pad_label: str = 'O',
punct_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
capit_label_ids: Optional[Union[Dict[str, int], DictConfig]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
use_cache: bool = True,
cache_dir: Optional[Union[str, os.PathLike]] = None,
get_label_frequencies: bool = False,
label_info_save_dir: Optional[Union[str, os.PathLike]] = None,
punct_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
capit_label_vocab_file: Optional[Union[str, os.PathLike]] = None,
add_masks_and_segment_ids_to_batch: bool = True,
verbose: bool = True,
n_jobs: Optional[int] = 0,
number_of_batches_is_multiple_of: int = 1,
batch_shuffling_random_seed: int = 42,
tokenization_progress_queue: Optional[mp.Queue] = None,
batch_mark_up_progress_queue: Optional[mp.Queue] = None,
batch_building_progress_queue: Optional[mp.Queue] = None,
use_audio: Optional[bool] = False,
audio_file: Optional[Union[str, os.PathLike]] = None,
sample_rate: Optional[int] = None,
use_bucketing: Optional[bool] = True,
preload_audios: Optional[bool] = True,
) -> None:
""" Initializes BertPunctuationCapitalizationDataset. """
if isinstance(punct_label_ids, DictConfig):
punct_label_ids = OmegaConf.to_container(punct_label_ids)
if isinstance(capit_label_ids, DictConfig):
capit_label_ids = OmegaConf.to_container(capit_label_ids)
self._check_constructor_parameters(
text_file,
labels_file,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
num_samples,
use_cache,
number_of_batches_is_multiple_of,
use_audio,
audio_file,
sample_rate,
)
if punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_vocab_file is not None:
capit_label_vocab_file = Path(capit_label_vocab_file).expanduser()
capit_label_ids = load_label_ids(capit_label_vocab_file)
self.text_file, self.labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
if label_info_save_dir is None:
self.label_info_save_dir = self.text_file.parent
else:
self.label_info_save_dir = Path(label_info_save_dir).expanduser()
self.tokens_in_batch = tokens_in_batch
self.tokenizer = tokenizer
self.pad_label = pad_label
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.add_masks_and_segment_ids_to_batch = add_masks_and_segment_ids_to_batch
self.verbose = verbose
self.batch_mark_up_progress_queue = batch_mark_up_progress_queue
self.batch_building_progress_queue = batch_building_progress_queue
self.use_audio = use_audio
self.audio_file = audio_file
self.sample_rate = sample_rate
self.use_bucketing = use_bucketing
self.preload_audios = preload_audios
master_device = is_global_rank_zero()
self.features_pkl = self._get_path_to_pkl_features(
self.text_file, self.labels_file, cache_dir, max_seq_length, num_samples
)
features = None
if master_device and not (self.features_pkl.is_file() and use_cache):
if verbose:
logging.info(
f'Processing {self.text_file}' + f' {self.audio_file if self.audio_file else ""} '.rstrip()
)
(
text_lines,
punct_label_lines,
capit_label_lines,
punct_unique_labels,
capit_unique_labels,
audio_lines,
) = self._read_dataset(self.text_file, self.labels_file, num_samples, self.audio_file)
if punct_label_ids:
self._check_label_ids_vs_unique_labels(
punct_label_ids, punct_unique_labels, 'punct', 'punctuation', self.labels_file
)
else:
punct_label_ids = create_label_ids(punct_unique_labels, self.pad_label)
if capit_label_ids:
self._check_label_ids_vs_unique_labels(
capit_label_ids, capit_unique_labels, 'capit', 'capitalization', self.labels_file
)
else:
capit_label_ids = create_label_ids(capit_unique_labels, self.pad_label)
features = _get_features(
text_lines,
punct_label_lines,
capit_label_lines,
max_seq_length,
self.tokenizer,
pad_label=self.pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
verbose=self.verbose,
progress_queue=tokenization_progress_queue,
n_jobs=n_jobs,
audio_queries=audio_lines if self.use_audio else None,
sample_rate=self.sample_rate,
preload_audios=self.preload_audios,
)
self.features_pkl.parent.mkdir(parents=True, exist_ok=True)
# save features to a temp file first to make sure that non-master processes don't start reading the file
# until the master process is done with writing
ofd, tmp_features_pkl = tempfile.mkstemp(
suffix='.pkl', prefix=os.path.basename(self.features_pkl), dir=os.path.dirname(self.features_pkl)
)
with os.fdopen(ofd, 'wb') as temp_f:
pickle.dump(tuple(list(features) + [punct_label_ids, capit_label_ids]), temp_f)
os.rename(tmp_features_pkl, self.features_pkl)
if self.verbose:
logging.info(f'Features saved to {self.features_pkl}')
# wait until the master process writes to the processed data files
if not master_device:
while features is None and not os.path.exists(self.features_pkl):
sleep(10)
if features is None:
features = pickle.load(self.features_pkl.open('rb'))
li = features[-2:]
self._check_label_ids_loaded_from_pkl(
punct_label_ids, capit_label_ids, *li, punct_label_vocab_file, capit_label_vocab_file
)
punct_label_ids, capit_label_ids = li[-2], li[-1]
if tokenization_progress_queue is not None:
tokenization_progress_queue.put(len(features[0]))
if self.verbose:
logging.info(f'Features restored from {self.features_pkl}')
features = features[:-2]
(
self.input_ids,
self.subtokens_mask,
self.waveforms,
self.waveforms_length,
self.audio_filepaths,
self.punct_labels,
self.capit_labels,
) = features
self.punct_label_ids, self.capit_label_ids = punct_label_ids, capit_label_ids
self.number_of_batches_is_multiple_of = number_of_batches_is_multiple_of
self.batch_shuffling_random_state = np.random.RandomState(batch_shuffling_random_seed)
if get_label_frequencies:
self.punct_label_frequencies = self._calculate_and_save_label_frequencies(self.punct_labels, 'punct')
self.capit_label_frequencies = self._calculate_and_save_label_frequencies(self.capit_labels, 'capit')
if self.use_bucketing:
self.batches = self._pack_into_batches(
input_ids=self.input_ids,
subtokens_mask=self.subtokens_mask,
punct_labels=self.punct_labels,
capit_labels=self.capit_labels,
waveforms=self.waveforms,
audio_lengths=self.waveforms_length,
audio_filepaths=self.audio_filepaths,
)
else:
self.batches = self._form_batches(
input_ids=self.input_ids,
subtokens_mask=self.subtokens_mask,
punct_labels=self.punct_labels,
capit_labels=self.capit_labels,
waveforms=self.waveforms,
audio_lengths=self.waveforms_length,
audio_filepaths=self.audio_filepaths,
)
def _get_path_to_pkl_features(
self,
text_file: Path,
labels_file: Path,
cache_dir: Optional[Union[str, os.PathLike]],
max_seq_length: int,
num_samples: int,
) -> Path:
if cache_dir is None:
cache_dir = text_file.parent
else:
cache_dir = Path(cache_dir).expanduser()
vocab_size = getattr(self.tokenizer, "vocab_size", 0)
features_pkl = cache_dir / "cached.{}.{}.max_seq_length{}.vocab{}.{}.punctuation_capitalization.pkl".format(
'__' + text_file.name + '__' + labels_file.name + '__',
self.tokenizer.name,
max_seq_length,
vocab_size,
f'num_samples{num_samples}' if num_samples > 0 else 'all_samples',
)
return features_pkl
@staticmethod
def _check_constructor_parameters(
text_file: Union[str, os.PathLike],
labels_file: Union[str, os.PathLike],
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
punct_label_vocab_file: Union[str, os.PathLike],
capit_label_vocab_file: Union[str, os.PathLike],
num_samples: int,
use_cache: bool,
number_of_batches_is_multiple_of: int,
use_audio: bool = False,
audio_file: Optional[Union[str, os.PathLike]] = None,
sample_rate: Optional[int] = None,
) -> None:
if torch.distributed.is_initialized() and torch.distributed.get_world_size() > 1 and not use_cache:
raise ValueError(
f"If you already created process group and the world size is greater than 1, then `use_cache` "
f"parameter has to be `True`. Only master process prepares features and if `use_cache=False`, then "
f"other processes will not be able to obtain features. Alternatively, you may set `use_cache=False` "
f"and set up data before spawning processes. Use `cache_dir` dataset directory with "
f"`text_file` and `labels_file` is read-only."
)
if not (os.path.exists(text_file) and os.path.exists(labels_file)):
raise FileNotFoundError(
f'{text_file} or {labels_file} not found. The data should be split into 2 files: text.txt and '
f'labels.txt. Each line of the text.txt file contains text sequences, where words are separated with '
f'spaces. The labels.txt file contains corresponding labels for each word in text.txt, the labels are '
f'separated with spaces. Each line of the files should follow the format:\n'
f' [WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and '
f' [LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
)
if not use_audio and audio_file:
raise ValueError(f"Audio file {audio_file} was passed but use_audio was set to False")
if use_audio and audio_file and not os.path.exists(audio_file):
raise FileNotFoundError(
f'use_audio was set to True but {audio_file} not found. Audio data should be listed in .txt file with one path per line'
)
if punct_label_ids is not None and punct_label_vocab_file is not None:
punct_label_vocab_file = Path(punct_label_vocab_file).expanduser()
file_punct_label_ids = load_label_ids(punct_label_vocab_file)
if file_punct_label_ids != punct_label_ids:
raise_not_equal_labels_error(
first_labels=punct_label_ids,
second_labels=file_punct_label_ids,
first_labels_desc='Punctuation labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `punct_label_ids`',
second_labels_desc=f'Punctuation labels loaded from file {punct_label_vocab_file} path to which '
f'is passed in parameter `punct_label_vocab_file`',
)
if capit_label_ids is not None and capit_label_vocab_file is not None:
capit_vocab_file = Path(capit_label_vocab_file).expanduser()
file_capit_label_ids = load_label_ids(capit_vocab_file)
if file_capit_label_ids != capit_label_ids:
raise_not_equal_labels_error(
first_labels=capit_label_ids,
second_labels=file_capit_label_ids,
first_labels_desc='Capitalization labels passed to the `PunctuationCapitalizationDataset` '
'constructor in parameter `capit_label_ids`',
second_labels_desc=f'Capitalization labels loaded from file {capit_label_vocab_file} path to '
f'which is passed in parameter `capit_label_vocab_file`',
)
if num_samples == 0:
raise ValueError(
f"Parameter `num_samples` has to be positive or negative whereas `num_samples={num_samples}`. "
f"Negative `num_samples` is for using all samples in a dataset."
)
if number_of_batches_is_multiple_of < 1 or not isinstance(number_of_batches_is_multiple_of, int):
raise ValueError(
f"Parameter `number_of_batches_is_multiple_of` has to be positive integer whereas "
f"{number_of_batches_is_multiple_of} is given."
)
if use_audio and not isinstance(sample_rate, int):
raise TypeError(f'use_audio was set to True but sample_rate was not set')
if use_audio and sample_rate < 1:
raise ValueError(f'sample_rate set to {sample_rate} but it cannot be less than 1')
def _check_label_ids_loaded_from_pkl(
self,
parameter_punct_label_ids: Dict[str, int],
parameter_capit_label_ids: Dict[str, int],
pkl_punct_label_ids: Any,
pkl_capit_label_ids: Any,
punct_label_vocab_file: Optional[Path],
capit_label_vocab_file: Optional[Path],
) -> None:
if not isinstance(pkl_punct_label_ids, dict):
raise ValueError(
f"Punctuation label ids loaded from features file {self.features_pkl} have wrong type "
f"{type(pkl_punct_label_ids)}"
)
if parameter_punct_label_ids is not None:
if parameter_punct_label_ids != pkl_punct_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_punct_label_ids,
second_labels=pkl_punct_label_ids,
first_labels_desc="Punctuation labels passed in parameter `punct_label_ids`"
if punct_label_vocab_file is None
else f"Punctuation labels loaded from file {punct_label_vocab_file}",
second_labels_desc=f"Punctuation label ids loaded from features file {self.features_pkl}",
)
if not isinstance(pkl_capit_label_ids, dict):
raise ValueError(
f"Capitalization label ids loaded from features file {self.features_pkl} has wrong type "
f"{type(pkl_capit_label_ids)}"
)
if parameter_capit_label_ids is not None:
if parameter_capit_label_ids != pkl_capit_label_ids:
raise_not_equal_labels_error(
first_labels=parameter_capit_label_ids,
second_labels=pkl_capit_label_ids,
first_labels_desc="Capitalization labels passed in parameter `capit_label_ids`"
if capit_label_vocab_file is None
else f"Capitalization labels loaded from file {capit_label_vocab_file}",
second_labels_desc=f"Capitalization label ids loaded from features file {self.features_pkl}",
)
@staticmethod
def _check_label_ids_vs_unique_labels(
label_ids: Dict[str, int], unique_labels: Set[str], label_type: str, task: str, label_file: Path
) -> None:
if unique_labels - set(label_ids):
not_present_labels = list(unique_labels - set(label_ids))
raise ValueError(
f"{len(not_present_labels)} {task} labels found in {label_file} are not present in "
f"`{label_type}_label_ids`. Examples of unexpected labels from {label_file}: {not_present_labels[:3]}"
)
@staticmethod
def _read_dataset(
text_file: Path, labels_file: Path, num_samples: int, audio_file: Optional[Path] = None
) -> Union[Tuple[Any, Any, Any, Set[Any], Set[Any], Any], Tuple[Any, Any, Any, Set[Any], Set[Any]]]:
with open(text_file, 'r', encoding='utf_8') as f:
text_lines = f.readlines()
punct_unique_labels, capit_unique_labels = set(), set()
punct_labels_lines, capit_labels_lines = [], []
with labels_file.open(encoding='utf_8') as f:
for i, line in enumerate(f):
pairs = line.split()
if not all([len(p) == 2 for p in pairs]):
raise ValueError(
f"Some label pairs are not pairs but have wrong length (!= 2) in line {i} in label file "
f"{labels_file}"
)
words = text_lines[i].split()
if len(pairs) != len(words):
raise ValueError(
f"In line {i} in text file {text_file} number of words {len(words)} is not equal to the "
f"number of labels {len(pairs)} in labels file {labels_file}."
)
punct_line, capit_line = zip(*pairs)
punct_labels_lines.append(punct_line)
capit_labels_lines.append(capit_line)
punct_unique_labels.update(punct_line)
capit_unique_labels.update(capit_line)
if len(punct_labels_lines) != len(text_lines):
raise ValueError(
f"Number of text lines {len(text_lines)} in text file {text_file} is not equal to the number of lines "
f"{len(punct_labels_lines)} in labels file {labels_file}."
)
if audio_file:
with open(audio_file, 'r') as f:
audio_lines = f.readlines()
if len(audio_lines) != len(text_lines):
raise ValueError(
f'Number of lines in {audio_file} equals {len(audio_lines)} which is not equal to '
f'number of lines in {text_file} which is {len(text_lines)}'
)
dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines, audio_lines))
else:
dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines))
if len(dataset) == 0:
raise ValueError(f"Dataset loaded from files {text_file} and {labels_file} is empty.")
if num_samples > 0:
dataset = dataset[:num_samples]
if audio_file:
text_lines, punct_labels_lines, capit_labels_lines, audio_lines = zip(*dataset)
return (
text_lines,
punct_labels_lines,
capit_labels_lines,
punct_unique_labels,
capit_unique_labels,
audio_lines,
)
else:
text_lines, punct_labels_lines, capit_labels_lines = zip(*dataset)
return text_lines, punct_labels_lines, capit_labels_lines, punct_unique_labels, capit_unique_labels, None
@staticmethod
def calc_batch_seq_length(queries: List[np.ndarray], length_is_multiple_of: int) -> int:
return ceil(max([len(elem) for elem in queries]) / length_is_multiple_of) * length_is_multiple_of
def _adjust_number_of_batches(
self,
input_ids: List[np.ndarray],
batch_beginnings: List[int],
batch_sizes: List[int],
batch_seq_lengths: List[int],
) -> Tuple[List[int], List[int], List[int]]:
"""
If length of ``batch_sizes`` list is not divisible by ``self.number_of_batches_is_multiple_of``, then
one or several batches are split into parts until number of batches is divisible by
``self.number_of_batches_is_multiple_of``.
The method selects a batch and tries to slice smaller batches with 8 elements each from the batch. If
the batch cannot be sliced any further and there are still not enough batches, then the next batch from dataset
is selected.
If slicing batches of size 8 is not enough, then batches of size 1 are created.
If dataset is too small to create enough batches, then a warning is shown.
Args:
input_ids: tokenized queries of the dataset. `input_ids` are expected to be sorted by length in ascending
order.
batch_beginnings: indices of first elements of batches created inside :meth:`_mark_up_batches` method.
Expected to be sorted in ascending order.
batch_sizes: sizes of batches created inside :meth:`_mark_up_batches` method.
batch_seq_lengths: lengths of elements in batch after padding created inside :meth:`_mark_up_batches`
method.
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes = batch_beginnings.copy(), batch_sizes.copy()
batch_seq_lengths = batch_seq_lengths.copy()
num_missing_batches = (
self.number_of_batches_is_multiple_of - len(batch_sizes) % self.number_of_batches_is_multiple_of
)
if num_missing_batches == 0:
return batch_beginnings, batch_sizes, batch_seq_lengths
if sum(batch_sizes) - len(batch_sizes) < num_missing_batches:
logging.warning(
f"Unable to achieve number of batches multiple of {self.number_of_batches_is_multiple_of} because "
f"dataset in files '{self.text_file}' and '{self.labels_file}' contains not enough queries "
f"({sum(batch_sizes)}) or queries in the dataset are too long. Dataset will have "
f"{len(batch_sizes)} batches instead. For validation or test dataset if multiple GPUs are used "
f"this will lead to distorted metrics because some batches will be processed several times. "
f"To fix this problem you may try to tweak (increase) parameter `tokens_in_batch`, though result is "
f"not guaranteed."
)
return batch_beginnings, batch_sizes, batch_seq_lengths
num_cut = 0
for ss in [8, 1]: # ss - split_size
old_num_batches = len(batch_sizes)
# Starting from the last batch because its size is likely to be not multiple of 8. Thus number of
# batches which size is not multiple of 8 can be reduced by 1.
original_batch_index = old_num_batches - 1
while original_batch_index >= 0 and num_cut < num_missing_batches:
bs, bb = batch_sizes[original_batch_index], batch_beginnings[original_batch_index]
rb = 0 # an index of sliced first element of sliced batch in original batch (relative beginning)
if rb < bs - ss:
while rb < bs - ss and num_cut < num_missing_batches:
batch_sizes.append(ss)
batch_beginnings.append(bb + rb)
batch_seq_lengths.append(
self.calc_batch_seq_length(input_ids[bb + rb : bb + rb + ss], length_is_multiple_of=8)
)
rb += ss
num_cut += 1
assert len(input_ids[bb + rb : bb + bs]) > 0
batch_sizes[original_batch_index] = bs - rb
batch_beginnings[original_batch_index] = bb + rb
batch_seq_lengths[original_batch_index] = self.calc_batch_seq_length(
input_ids[bb + rb : bb + bs], length_is_multiple_of=8
)
original_batch_index -= 1
# Keeping order of batches.
batch_beginnings, batch_sizes, batch_seq_lengths = map(
list, zip(*sorted(zip(batch_beginnings, batch_sizes, batch_seq_lengths), key=lambda x: x[0]))
)
assert len(batch_beginnings) % self.number_of_batches_is_multiple_of == 0
assert len(batch_sizes) % self.number_of_batches_is_multiple_of == 0
assert len(batch_seq_lengths) % self.number_of_batches_is_multiple_of == 0
return batch_beginnings, batch_sizes, batch_seq_lengths
def _mark_up_batches(self, input_ids: List[np.ndarray]) -> Tuple[List[int], List[int], List[int]]:
"""
Computes indices of first samples in batch, batch sizes, seq lengths for batches. ``input_ids`` has to be
sorted by number of tokens in ascending order.
Batches are marked up with respect to following conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
If ``self.batch_mark_up_progress_queue`` is not None, then the progress in mark up is reported via
``self.batch_mark_up_progress_queue``. Otherwise, ``tqdm`` instance is created in this function.
Args:
input_ids: a list of 1D int32 arrays. Elements of ``input_ids`` have to be sorted by length in ascending
order
Returns:
batch_beginnings: a list of indices in ``input_ids`` of first samples of every batch
batch_sizes: a list of numbers of samples in batches
batch_seq_lengths: a list of sequence lengths after padding for every batch
"""
batch_beginnings, batch_sizes, batch_seq_lengths = [], [], []
current_max_length = 0
start = 0
if self.batch_mark_up_progress_queue is None:
inp_iterator = tqdm(enumerate(input_ids), total=len(input_ids), desc="Batch mark up", unit="query")
else:
inp_iterator = enumerate(input_ids)
progress_made = 0
for i, inp in inp_iterator:
current_max_length = max(current_max_length, ceil(len(inp) / 8) * 8)
if current_max_length * (i + 1 - start) > self.tokens_in_batch:
batch_size = (i - start) // 8 * 8
if batch_size == 0:
if i > start:
batch_size = i - start
logging.warning(
f"Could not create batch with multiple of 8 size. Probably, there is a too long sequence "
f"in the dataset or parameter `tokens_in_batch` is too small. Current length of sequences "
f"in batch is {current_max_length}. Batch size will be reduced to {batch_size}. "
f"tokens_in_batch={self.tokens_in_batch}. The batch includes sequences from "
f"{start} to {i - 1}."
)
else:
logging.warning(
f"Input sequence number {i - 1} is too long. Could not fit it into batch with "
f"{self.tokens_in_batch} tokens. Sequence number {i - 1} will not be added to batches."
)
start = i
current_max_length = ceil(len(inp) / 8) * 8
continue
seq_length = self.calc_batch_seq_length(input_ids[start : start + batch_size], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(batch_size)
batch_seq_lengths.append(seq_length)
start += batch_size
current_max_length = self.calc_batch_seq_length(input_ids[start : i + 1], length_is_multiple_of=8)
if self.batch_mark_up_progress_queue is not None:
progress_made += 1
if progress_made >= BATCH_MARK_UP_PROGRESS_REPORT_PERIOD:
self.batch_mark_up_progress_queue.put(progress_made)
progress_made = 0
if start < len(input_ids):
seq_length = self.calc_batch_seq_length(input_ids[start:], length_is_multiple_of=8)
batch_beginnings.append(start)
batch_sizes.append(len(input_ids) - start)
batch_seq_lengths.append(seq_length)
if self.batch_mark_up_progress_queue is not None:
self.batch_mark_up_progress_queue.put(progress_made)
if len(batch_beginnings) % self.number_of_batches_is_multiple_of:
batch_beginnings, batch_sizes, batch_seq_lengths = self._adjust_number_of_batches(
input_ids, batch_beginnings, batch_sizes, batch_seq_lengths
)
assert sum(batch_sizes) == len(input_ids)
for i in range(len(batch_beginnings) - 1):
assert batch_beginnings[i] + batch_sizes[i] == batch_beginnings[i + 1]
assert batch_seq_lengths[i] >= max(
[len(inp) for inp in input_ids[batch_beginnings[i] : batch_beginnings[i] + batch_sizes[i]]]
)
return batch_beginnings, batch_sizes, batch_seq_lengths
def _form_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
waveforms: Optional[List[np.ndarray]] = None,
audio_lengths: Optional[List[np.ndarray]] = None,
audio_filepaths: Optional[List[str]] = None,
) -> List[Dict[str, np.ndarray]]:
"""
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
waveforms: a list of 1D float arrays which contain raw waveforms of audios.
audio_lengths: a list of 1D int32 arrays which contain length of corresponding audio from `waveforms`
audio_filepaths: a list of strings which contain paths to audio
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
If ``waveforms`` is not ``None``, then a batch also contain items
- ``features``: a ``np.float`` numpy array.
- ``features_length`` a ``np.int32`` numpy array.
If ``audio_filepaths`` is not ``None``, then a natch also contain items
- ``audio_filepaths`` a list of strings.
The values of a batch dictionary are numpy arrays of identical shape.
"""
batches = []
dummy = [None] * len(input_ids)
zipped = list(
zip(
input_ids,
subtokens_mask,
punct_labels,
capit_labels,
waveforms if waveforms else dummy,
audio_lengths if audio_lengths else dummy,
audio_filepaths if audio_filepaths else dummy,
)
)
for item in zipped:
batch = {
"input_ids": item[0],
"subtokens_mask": item[1],
"punct_labels": item[2].astype(np.int64),
"capit_labels": item[3].astype(np.int64),
}
if self.use_audio and self.preload_audios:
batch['features'] = item[4].astype(np.float)
batch['features_length'] = item[5]
elif self.use_audio and not self.preload_audios:
batch['audio_filepaths'] = item[6]
batches.append(batch)
return batches
def _pack_into_batches(
self,
input_ids: List[np.ndarray],
subtokens_mask: List[np.ndarray],
punct_labels: List[np.ndarray],
capit_labels: List[np.ndarray],
waveforms: Optional[List[np.ndarray]] = None,
audio_lengths: Optional[List[np.ndarray]] = None,
audio_filepaths: Optional[List[str]] = None,
) -> List[Dict[str, np.ndarray]]:
"""
Shuffle input sequences, sort them by number of tokens, pad, and pack into batches which satisfy following
conditions:
- total number of tokens in batch including paddings is less or equal to ``self.tokens_in_batch``
- batch size is evenly divisible by 8 (except for the last batch)
- seq length (elements of the third returned object) is evenly divisible by 8
Created batches are shuffled before returning.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then ``'segment_ids'``, ``'loss_mask'``, and
``'input_mask'`` are added to the batch.
If ``self.batch_building_progress_queue`` is not ``None``, then padding progress is reported to
``self.batch_building_progress_queue``. Otherwise, a new ``tqdm`` instance is created in ``pack_into_batches``
method.
Args:
input_ids: a list of 1D int32 arrays which contain token ids of dataset source
subtokens_mask: a list of 1D boolean arrays which elements are ``True`` if corresponding token is the
first token in some word
punct_labels: a list of 1D int32 arrays which contain encoded punctuation labels
capit_labels: a list of 1D int32 arrays which contain encoded capitalization labels
waveforms: a list of 1D float arrays which contain raw waveforms of audios.
audio_lengths: a list of 1D int32 arrays which contain length of corresponding audio from `waveforms`
audio_filepaths: a list of strings which contain paths to audio
Returns:
a list of batches. Each batch is a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array;
- ``'subtokens_mask'``: a boolean numpy array;
- ``'punct_labels'``: a ``np.int32`` numpy array;
- ``'capit_labels'``: a ``np.int32`` numpy array.
If ``self.add_masks_and_segment_ids_to_batch`` is ``True``, then a batch also contain items
- ``'segment_ids'``: a ``np.int8`` numpy array;
- ``'input_mask'``: a boolean numpy array;
- ``'loss_mask'``: a boolean numpy array.
If ``waveforms`` is not ``None``, then a batch also contain items
- ``features``: a ``np.float`` numpy array.
- ``features_length`` a ``np.int32`` numpy array.
If ``audio_filepaths`` is not ``None``, then a natch also contain items
- ``audio_filepaths`` a list of strings.
The values of a batch dictionary are numpy arrays of identical shape.
"""
dummy = [None] * len(input_ids)
zipped = list(
zip(
input_ids,
subtokens_mask,
punct_labels,
capit_labels,
waveforms if waveforms else dummy,
audio_lengths if audio_lengths else dummy,
audio_filepaths if audio_filepaths else dummy,
)
)
self.batch_shuffling_random_state.shuffle(zipped)
dim_sort = 4 if self.use_audio and self.preload_audios else 0
input_ids, subtokens_mask, punct_labels, capit_labels, waveforms, audio_lengths, audio_filepaths = zip(
*sorted(zipped, key=lambda x: x[dim_sort].shape[0])
)
batch_beginnings, batch_sizes, batch_seq_lengths = self._mark_up_batches(input_ids)
batches = []
if self.batch_building_progress_queue is None:
inp_iterator = tqdm(
zip(batch_beginnings, batch_sizes, batch_seq_lengths),
total=len(batch_beginnings),
desc="Batch building",
unit="batch",
)
else:
# In this case we report number of queries not number of batches
inp_iterator = zip(batch_beginnings, batch_sizes, batch_seq_lengths)
progress_made = 0
for start, size, length in inp_iterator:
batch_input_ids = pad(input_ids[start : start + size], length, self.tokenizer.pad_id)
batch_subtokens_mask = pad(subtokens_mask[start : start + size], length, False)
batch = {
"input_ids": batch_input_ids,
"subtokens_mask": batch_subtokens_mask,
"punct_labels": pad(
punct_labels[start : start + size], length, self.punct_label_ids[self.pad_label]
).astype(np.int64),
"capit_labels": pad(
capit_labels[start : start + size], length, self.capit_label_ids[self.pad_label]
).astype(np.int64),
}
if self.use_audio and self.preload_audios:
batch['features'] = pad(
waveforms[start : start + size], max(audio_lengths[start : start + size]), 0.0
).astype(np.float)
batch['features_length'] = audio_lengths[start : start + size]
elif self.use_audio and not self.preload_audios:
batch['audio_filepaths'] = audio_filepaths[start : start + size]
if self.add_masks_and_segment_ids_to_batch:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch_input_ids,
batch_subtokens_mask,
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
batches.append(batch)
if self.batch_building_progress_queue is not None:
progress_made += size
if progress_made >= BATCH_BUILDING_PROGRESS_REPORT_PERIOD:
self.batch_building_progress_queue.put(progress_made)
progress_made = 0
if self.batch_building_progress_queue is not None:
self.batch_building_progress_queue.put(progress_made)
self.batch_shuffling_random_state.shuffle(batches)
return batches
def repack_batches_with_shuffle(self) -> None:
"""A function for proper shuffling of a dataset. Pytorch data loader shuffling will only permute batches."""
if not self.use_bucketing:
return
logging.info("Shuffling training dataset")
self.batches = self._pack_into_batches(
self.input_ids,
self.subtokens_mask,
self.punct_labels,
self.capit_labels,
self.waveforms,
self.waveforms_length,
self.audio_filepaths,
)
def _calculate_and_save_label_frequencies(self, all_labels: List[np.ndarray], name: str) -> Dict[str, float]:
"""Calculates and saves labels frequencies in :attr:`label_info_save_dir`."""
merged_labels = itertools.chain.from_iterable(all_labels)
if self.verbose:
logging.info('Three most popular labels')
self.label_info_save_dir.mkdir(parents=True, exist_ok=True)
_, label_frequencies, _ = get_label_stats(
merged_labels, str(self.label_info_save_dir / f'label_count_{name}.tsv')
)
return label_frequencies
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Saves label ids into files located in ``self.label_info_save_dir``. Saved label ids are usually used for
``.nemo`` checkpoint creation.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationTarredDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of a punctuation labels file
capit_labels_file_name (:obj:`str`): a name of a capitalization labels file
Returns:
:obj:`Tuple[pathlib.Path, pathlib.Path]`: a tuple containing:
- :obj:`pathlib.Path`: a path to the saved punctuation labels file
- :obj:`pathlib.Path`: a path to the saved capitalization labels file
"""
nemo_dir = self.label_info_save_dir / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
punct_labels_file = nemo_dir / punct_labels_file_name
capit_labels_file = nemo_dir / capit_labels_file_name
save_label_ids(self.punct_label_ids, punct_labels_file)
save_label_ids(self.capit_label_ids, capit_labels_file)
return punct_labels_file, capit_labels_file
def __len__(self) -> int:
return len(self.batches)
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""
If ``self.use_bucketing`` set to ``True`` returns zeroth batch from ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``,
``'capit_labels'`` to types supported by
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
or :class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationLexicalAudioModel` if ``self.use_audio`` set to ``True``
All output tensors have shape ``[Batch, Time]``.
.. warning::
A ``batch_size`` parameter of a PyTorch data loader and sampler has to be ``1`` if ``self.use_bucketing`` set to ``True``
Args:
batches (:obj:`List[Dict[str, np.ndarray]]`): a list containing 1 batch passed for collating
Returns:
:obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch
items see method :meth:`__getitem__`):
- ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor.
- ``'features'`` (:obj:`torch.Tensor`): :obj:`torch.float` tensor.
- ``'features_length'`` (:obj:`torch.Tensor`): :obj:`torch.long` tensor.
"""
if self.use_bucketing:
batch = {k: torch.as_tensor(v) for k, v in batches[0].items() if k != 'audio_filepaths'}
batch['segment_ids'] = batch['segment_ids'].int()
batch['punct_labels'] = batch['punct_labels'].long()
batch['capit_labels'] = batch['capit_labels'].long()
if self.use_audio and self.preload_audios:
batch['features'] = batch['features'].to(torch.float32)
return batch
else:
for batch in batches:
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch['input_ids'],
batch['subtokens_mask'],
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = torch.as_tensor(batch_segment_ids, dtype=torch.int)
batch['input_mask'] = torch.as_tensor(batch_input_mask)
batch['loss_mask'] = torch.as_tensor(batch_loss_mask)
batch['input_ids'] = torch.as_tensor(batch['input_ids'], dtype=torch.int)
batch['subtokens_mask'] = torch.as_tensor(batch['subtokens_mask'])
batch['punct_labels'] = torch.as_tensor(batch['punct_labels'], dtype=torch.long)
batch['capit_labels'] = torch.as_tensor(batch['capit_labels'], dtype=torch.long)
if 'features' in batch:
batch['features'] = torch.as_tensor(batch['features'], dtype=torch.float)
batch['features_length'] = torch.as_tensor(batch['features_length'], dtype=torch.long)
elif self.use_audio:
if ASR_AVAILABLE:
waveform = AudioSegment.from_file(batch['audio_filepaths'], target_sr=self.sample_rate)
batch['features'] = torch.as_tensor(waveform.samples, dtype=torch.float)
batch['features_length'] = torch.as_tensor(waveform.num_samples, dtype=torch.long)
else:
raise ModuleNotFoundError(
'Nemo ASR was not installed, see https://github.com/NVIDIA/NeMo#installation for installation instructions'
)
segment_ids = pad_sequence([batch['segment_ids'] for batch in batches])
input_mask = pad_sequence([batch['input_mask'] for batch in batches])
loss_mask = pad_sequence([batch['loss_mask'] for batch in batches])
input_ids = pad_sequence([batch['input_ids'] for batch in batches], padding_value=self.tokenizer.pad_id)
subtokens_mask = pad_sequence([batch['subtokens_mask'] for batch in batches], padding_value=False)
punct_labels = pad_sequence([batch['punct_labels'] for batch in batches], padding_value=0)
capit_labels = pad_sequence([batch['capit_labels'] for batch in batches], padding_value=0)
features = pad_sequence([batch['features'] for batch in batches], padding_value=0.0)
features_length = torch.tensor([batch['features_length'] for batch in batches])
return {
'input_ids': input_ids.T,
'subtokens_mask': subtokens_mask.T,
'punct_labels': punct_labels.T,
'capit_labels': capit_labels.T,
'features': features.T,
'features_length': features_length,
'segment_ids': segment_ids.T,
'input_mask': input_mask.T,
'loss_mask': loss_mask.T,
}
def __getitem__(self, idx: int) -> Dict[str, np.ndarray]:
"""
Return a batch with index ``idx``. The values of a batch dictionary are numpy arrays of identical shapes
``[Batch, Time]``. Labels are identical for all tokens in a word. For example, if
- word ``'Tokenization'`` is tokenized into tokens ``['token', 'ization']``,
- it is followed by comma,
then punctuation labels are ``[',', ',']`` and capitalization labels are ``['U', 'U']`` (``'U'`` is a label
for words which start with upper case character).
Args:
idx: an index of returned batch
Returns:
:obj:`Dict[str, np.ndarray]`: a dictionary with items:
- ``'input_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded tokens,
- ``'subtokens_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if they
correspond to first token in a word,
- ``'punct_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded punctuation
labels,
- ``'capit_labels'`` (:obj:`numpy.ndarray`): :obj:`numpy.int32` array containing encoded capitalization
labels.
- ``'segment_ids'`` (:obj:`numpy.ndarray`): :obj:`numpy.int8` array filled with zeros (BERT token types
in HuggingFace terminology) (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then these
items is missing),
- ``'input_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if corresponding
token is not a padding token (if ``self.add_masks_and_segment_ids_to_batch`` is ``False``, then these
items is missing),
- ``'loss_mask'`` (:obj:`numpy.ndarray`): :obj:`bool` array which elements are ``True`` if loss is
computed for corresponding token. See more in description of constructor parameters
``ignore_start_end``, ``ignore_extra_tokens`` (if ``self.add_masks_and_segment_ids_to_batch`` is
``False``, then these items is missing).
- ``'features'`` (:obj:`numpy.ndarray`) :obj:`np.float` array of waveforms of audio if ``self.preload_audio`` is set to ``True`` else empty.
- ``'features_length'`` (:obj:`numpy.ndarray`) :obj:`np.long` array of number of samples per audio.
- ``'audio_filepaths'`` (:obj:`List`) :obj:`str` contains paths of audio files if ``self.preload_audio`` set to ``False``
"""
return self.batches[idx]
| NeMo-main | nemo/collections/nlp/data/token_classification/punctuation_capitalization_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| NeMo-main | nemo/collections/nlp/data/token_classification/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import json
import multiprocessing as mp
import os
import pickle
import re
import shutil
import tempfile
from collections import deque
from pathlib import Path
from typing import Any, Callable, Dict, Iterator, List, Optional, Set, Tuple, Type, Union
import numpy as np
import torch
import webdataset as wds
from joblib import Parallel, delayed
from omegaconf import DictConfig
from torch.utils.data import IterableDataset
from nemo.collections.common.tokenizers import TokenizerSpec
from nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset import (
LABEL_ID_DIR_FOR_NEMO_CHECKPOINT,
BertPunctuationCapitalizationDataset,
Progress,
create_label_ids,
create_masks_and_segment_ids,
load_label_ids,
raise_not_equal_labels_error,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_tokenizer
from nemo.core.neural_types import AudioSignal, ChannelType, LabelsType, LengthsType, MaskType, NeuralType
from nemo.utils import logging
NUMBER_RE = "(0|[1-9][0-9]*)"
TAR_FRAGMENT_TMPL_IN_PROGRESS = "fragment{fragment_idx}.{file_idx}.tar"
TAR_FRAGMENT_TMPL_FINISHED = "fragment{fragment_idx}.num_batches{num_batches}.{file_idx}.tar"
TAR_FRAGMENT_TMPL_TO_REPACK = "fragment{fragment_idx}.num_batches{num_batches}.{file_idx}.tar.to_repack"
TAR_FRAGMENT_PATTERN_IN_PROGRESS = re.compile(f"fragment{NUMBER_RE}.{NUMBER_RE}.tar$")
TAR_FRAGMENT_PATTERN_FINISHED = re.compile(f"fragment{NUMBER_RE}.num_batches{NUMBER_RE}.{NUMBER_RE}.tar$")
TAR_FRAGMENT_PATTERN_TO_REPACK = re.compile(f"fragment{NUMBER_RE}.num_batches{NUMBER_RE}.{NUMBER_RE}.tar.to_repack$")
NOT_ALLOWED_CHARACTERS_IN_FILE_NAME = re.compile(f"[^a-zA-Z0-9_.-]")
REPLACE_NOT_ALLOWED_CHARACTERS_IN_FILE_NAME = re.compile(f"-*[^a-zA-Z0-9_.-]+-*")
DATASET_PARAMETERS_TMPL = "{prefix}.tokens{tokens_in_batch}.max_seq_length{max_seq_length}.{tokenizer}"
TAR_FINAL_TMPL = ".batches{num_batches}.{ctr}.tar"
PROGRESS_REPORT_PERIOD = 10 ** 4
METADATA_PUNCT_LABEL_VOCAB_KEY = 'punct_label_vocab_file'
METADATA_CAPIT_LABEL_VOCAB_KEY = 'capit_label_vocab_file'
DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME = 'punct_label_vocab.csv'
DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME = 'capit_label_vocab.csv'
def count_lines_and_get_fragment_starting_positions(
file_name: Path, lines_per_dataset_fragment: int
) -> Tuple[int, List[int]]:
"""
Returns number of lines in a file and indices of fragment starting bytes.
Args:
file_name: a path to a text or label file
lines_per_dataset_fragment: number of lines in a dataset fragment. The last fragment can contain fewer lines
Returns:
num_lines: number of lines in a file
start_bytes: indices of fragment starting bytes
"""
pos = [0]
with file_name.open() as f:
i = 0
line = f.readline()
while line:
i += 1
if i % lines_per_dataset_fragment == 0:
pos.append(f.tell())
line = f.readline()
return i, pos[:-1] if i % lines_per_dataset_fragment == 0 else pos
def get_fragment_start_bytes(
text_file: Path, labels_file: Path, lines_per_dataset_fragment: int, audio_file: Path = None
) -> Union[Tuple[Any, Any, Any, Any], Tuple[Any, Any, Any]]:
"""
A function for calculating borders of dataset fragments. The function is used to split ``text_file`` and
``labels_file`` for processing them in parallel.
Args:
text_file: a path to a dataset source file
labels_file: a path to a dataset label file
lines_per_dataset_fragment: a number of lines in one fragment
audio_file: a path to a dataset audio file if one needed
Returns:
num_lines: total number of elements in the dataset (number of lines in ``text_file``` and ``labels_file``)
text_start_bytes: indices of the first bytes of fragments in ``text_file``
label_start_bytes: indices of the first bytes of fragments in ``labels_file``
"""
logging.info(
f"Counting lines in files {text_file} and {labels_file} and creating segment borders. This may take "
f"considerable time. 86GB, 1.27b lines file was processed in 7 minutes."
)
if audio_file:
result = Parallel(n_jobs=3)(
delayed(count_lines_and_get_fragment_starting_positions)(file_name, lines_per_dataset_fragment)
for file_name in [text_file, labels_file, audio_file]
)
num_lines = result[0][0]
if result[0][0] != result[1][0]:
raise ValueError(
f"Text file {text_file} and label file {labels_file} contain different number of lines. Number of lines "
f"in text file: {result[0][0]}, number of lines in label file: {result[1][0]}."
)
text_start_bytes, label_start_bytes, manifest_start_bytes = result[0][1], result[1][1], result[2][1]
assert len(text_start_bytes) == len(label_start_bytes) == len(manifest_start_bytes)
return num_lines, text_start_bytes, label_start_bytes, manifest_start_bytes
else:
result = Parallel(n_jobs=2)(
delayed(count_lines_and_get_fragment_starting_positions)(file_name, lines_per_dataset_fragment)
for file_name in [text_file, labels_file]
)
num_lines = result[0][0]
if result[0][0] != result[1][0]:
raise ValueError(
f"Text file {text_file} and label file {labels_file} contain different number of lines. Number of lines "
f"in text file: {result[0][0]}, number of lines in label file: {result[1][0]}."
)
text_start_bytes, label_start_bytes = result[0][1], result[1][1]
assert len(text_start_bytes) == len(label_start_bytes)
return num_lines, text_start_bytes, label_start_bytes
def process_fragment(
text_file: Path,
labels_file: Path,
output_dir: Path,
text_start_pos: int,
label_start_pos: int,
lines_per_dataset_fragment: int,
max_seq_length: int,
tokens_in_batch: int,
num_batches_per_tarfile: int,
tokenizer_name: str,
tokenizer_model: Optional[Path],
vocab_file: Optional[Path],
merges_file: Optional[Path],
special_tokens: Dict[str, str],
use_fast_tokenizer: Optional[bool],
pad_label: str,
punct_label_ids: Dict[str, int],
capit_label_ids: Dict[str, int],
fragment_idx: int,
tokenization_progress_queue: mp.Queue,
batch_mark_up_progress_queue: mp.Queue,
batch_building_progress_queue: mp.Queue,
writing_to_tar_progress_queue: mp.Queue,
audio_file: Path = None,
sample_rate: int = None,
audio_file_start_pos: int = None,
use_audio: bool = False,
) -> None:
tokenizer = get_tokenizer(
tokenizer_name,
tokenizer_model=None if tokenizer_model is None else str(tokenizer_model),
vocab_file=None if vocab_file is None else str(vocab_file),
merges_file=None if merges_file is None else str(merges_file),
special_tokens=special_tokens,
use_fast=use_fast_tokenizer,
)
tmp_text: Optional[str] = None
tmp_labels: Optional[str] = None
tmp_audio: Optional[str] = None
try:
otfd, tmp_text = tempfile.mkstemp(suffix='.txt', prefix=f'text_{fragment_idx}_', dir=output_dir, text=True)
olfd, tmp_labels = tempfile.mkstemp(suffix='.txt', prefix=f'labels_{fragment_idx}_', dir=output_dir, text=True)
if use_audio:
oafd, tmp_audio = tempfile.mkstemp(
suffix='.txt', prefix=f'audio_{fragment_idx}_', dir=output_dir, text=True
)
with text_file.open() as tf, labels_file.open() as lf, os.fdopen(otfd, 'w') as otf, os.fdopen(
olfd, 'w'
) as olf: # handle audio manifest
if use_audio:
mf = audio_file.open()
mf.seek(audio_file_start_pos)
oaf = os.fdopen(oafd, 'w')
tf.seek(text_start_pos)
lf.seek(label_start_pos)
for _ in range(lines_per_dataset_fragment):
text_line = tf.readline()
if not text_line:
break
otf.write(text_line)
olf.write(lf.readline())
if use_audio:
oaf.write(mf.readline())
if use_audio:
mf.close()
oaf.close()
dataset = BertPunctuationCapitalizationDataset(
tmp_text,
tmp_labels,
max_seq_length,
tokenizer,
tokens_in_batch=tokens_in_batch,
pad_label=pad_label,
punct_label_ids=punct_label_ids,
capit_label_ids=capit_label_ids,
n_jobs=0,
use_cache=False,
add_masks_and_segment_ids_to_batch=False,
verbose=False,
tokenization_progress_queue=tokenization_progress_queue,
batch_mark_up_progress_queue=batch_mark_up_progress_queue,
batch_building_progress_queue=batch_building_progress_queue,
audio_file=tmp_audio,
sample_rate=sample_rate,
use_audio=use_audio,
use_bucketing=True,
preload_audios=use_audio,
)
finally:
if tmp_text is not None and os.path.exists(tmp_text):
os.remove(tmp_text)
if tmp_labels is not None and os.path.exists(tmp_labels):
os.remove(tmp_labels)
if tmp_audio is not None and os.path.exists(tmp_audio):
os.remove(tmp_audio)
dataset.features_pkl.unlink()
tar_ctr = 0
current_file_name = output_dir / TAR_FRAGMENT_TMPL_IN_PROGRESS.format(fragment_idx=fragment_idx, file_idx=tar_ctr)
current_num_batches = 0
sink = wds.TarWriter(str(current_file_name))
progress_made = 0
for batch_i, batch in enumerate(dataset):
sink.write({"__key__": f"fragment-{fragment_idx}-batch-{batch_i}", "batch.pyd": batch})
current_num_batches += 1
progress_made += len(batch['input_ids'])
if current_num_batches % num_batches_per_tarfile == 0:
sink.close()
current_file_name.rename(
output_dir
/ TAR_FRAGMENT_TMPL_FINISHED.format(
fragment_idx=fragment_idx, num_batches=current_num_batches, file_idx=tar_ctr
)
)
writing_to_tar_progress_queue.put(progress_made)
progress_made = 0
tar_ctr += 1
current_file_name = output_dir / TAR_FRAGMENT_TMPL_IN_PROGRESS.format(
fragment_idx=fragment_idx, file_idx=tar_ctr
)
current_num_batches = 0
sink = wds.TarWriter(str(current_file_name))
sink.close()
writing_to_tar_progress_queue.put(progress_made)
if progress_made > 0:
new_file_name = output_dir / TAR_FRAGMENT_TMPL_TO_REPACK.format(
fragment_idx=fragment_idx, num_batches=current_num_batches, file_idx=tar_ctr
)
current_file_name.rename(new_file_name)
else:
current_file_name.unlink()
if fragment_idx == 0:
punct_label_ids_file, capit_label_ids_file = dataset.save_labels_and_get_file_paths(
DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME, DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME
)
punct_label_ids_file.rename(output_dir / DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME)
capit_label_ids_file.rename(output_dir / DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME)
shutil.rmtree(punct_label_ids_file.parent)
def remove_unexpected_files_and_dirs(output_dir: Path, output_file_tmpl: str, metadata_file_name: Path) -> None:
"""
This function removes all files with names which may be used in the dataset creation.
Args:
output_dir: a path to directory where removal is performed
output_file_tmpl: a format string for a name of final tar file. Must include fields ``ctr`` for number of the
file and ``num_batches`` for number of batches in the file.
metadata_file_name: a metadata file name
"""
if not output_dir.is_dir():
return
tar_final_pattern = re.compile(output_file_tmpl.format(ctr=NUMBER_RE, num_batches=NUMBER_RE))
unexpected_tar_files = [
path
for path in output_dir.iterdir()
if any(
[
p.match(path.name) is not None
for p in [
TAR_FRAGMENT_PATTERN_IN_PROGRESS,
TAR_FRAGMENT_PATTERN_FINISHED,
TAR_FRAGMENT_PATTERN_TO_REPACK,
tar_final_pattern,
]
]
)
]
if unexpected_tar_files:
logging.warning(
f"Found {len(unexpected_tar_files)} unexpected tar files in the output directory {output_dir}. "
f"All of them are going to be removed. The files match one of 3 patterns: "
f"'{TAR_FRAGMENT_PATTERN_IN_PROGRESS.pattern}', '{TAR_FRAGMENT_PATTERN_FINISHED.pattern}', "
f"'{tar_final_pattern.pattern}'. The first unexpected files: "
f"{', '.join([str(f) for f in unexpected_tar_files[:3]])}."
)
for fn in unexpected_tar_files:
fn.unlink()
if metadata_file_name.exists():
logging.warning(f"Found metadata file {metadata_file_name}. It is going to be removed.")
metadata_file_name.unlink()
punct_label_ids = output_dir / DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME
capit_label_ids = output_dir / DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME
if punct_label_ids.exists():
logging.warning(f"Found unexpected punctuation label file {punct_label_ids}. It is going to be removed.")
punct_label_ids.unlink()
if capit_label_ids.exists():
logging.warning(f"Found unexpected capitalization label file {capit_label_ids}. It is going to be removed.")
capit_label_ids.unlink()
def collect_unique_labels_from_fragment(
labels_file: Path, start_pos: int, lines_per_dataset_fragment: int, progress_queue: mp.Queue, fragment_idx: int
) -> Tuple[Set[str], Set[str]]:
"""
Returns a set of unique punctuation labels and a set of unique capitalization labels.
Args:
labels_file: a path to a file with labels
start_pos: an index of the first byte of a fragment in ``labels_file``
lines_per_dataset_fragment: number of lines in dataset fragment. In the last fragment there can be less lines.
progress_queue: a queue for reporting number of processed lines
fragment_idx: a processed fragment index
Returns:
unique_punct: a set of unique punctuation labels
unique_capit: a set of unique capitalization labels
"""
unique_punct, unique_capit = set(), set()
with labels_file.open() as f:
f.seek(start_pos)
progress_report = 0
for i in range(lines_per_dataset_fragment):
line = f.readline()
if not line:
break
pairs = line.split()
if not all([len(p) == 2 for p in pairs]):
broken_pairs = [i for i, p in enumerate(pairs) if len(p) != 2]
raise ValueError(
f"Found broken labels line in number {fragment_idx * lines_per_dataset_fragment + i} in file "
f"{labels_file}. Indices of broken pairs of labels: {broken_pairs}"
)
punct, capit = zip(*pairs)
unique_punct.update(punct)
unique_capit.update(capit)
progress_report += 1
if progress_report >= PROGRESS_REPORT_PERIOD:
progress_queue.put(progress_report)
progress_report = 0
progress_queue.put(progress_report)
return unique_punct, unique_capit
def create_label_dictionaries(
labels_file: Path,
text_start_bytes: List[int],
num_lines: int,
lines_per_dataset_fragment: int,
pad_label: str,
n_jobs: int,
) -> Tuple[Dict[str, int], Dict[str, int]]:
"""
Creates punctuation and capitalization label ids dictionaries based on labels present in ``labels_file``.
Args:
labels_file: a path to file with labels
text_start_bytes: indices of first bytes of fragments in ``labels_file``
num_lines: total number of lines in ``labels_file``
lines_per_dataset_fragment: number of lines in dataset fragments. The last fragment can have fewer lines
pad_label: a label used for padding and for absence of punctuation and capitalization
n_jobs: a number of fragments processed in parallel
Returns:
punct_label_ids: a dictionary with punctuation label ids
capit_label_ids: a dictionary with capitalization label ids
"""
with Progress(num_lines, "Creating label dictionary", "line") as progress_queues:
result = Parallel(n_jobs=min(n_jobs, len(text_start_bytes)))(
delayed(collect_unique_labels_from_fragment)(
labels_file, start_pos, lines_per_dataset_fragment, *progress_queues, fragment_idx
)
for fragment_idx, start_pos in enumerate(text_start_bytes)
)
unique_punct, unique_capit = zip(*result)
unique_punct = set().union(*unique_punct)
unique_capit = set().union(*unique_capit)
return create_label_ids(unique_punct, pad_label), create_label_ids(unique_capit, pad_label)
def check_label_ids(pad_label: str, punct_label_ids: Dict[str, int], capit_label_ids: Dict[str, int]) -> None:
"""
A function for checking that pad label has zeroth id in ``punct_label_dis`` and ``capit_label_ids`` dictionaries.
Args:
pad_label: a pad label
punct_label_ids: a dictionary with punctuation label ids
capit_label_ids: a dictionary with capitalization label ids
"""
msg = "Parameter `pad_label` has to have id 0 in dictionary `{param_name}` whereas it has id {id_}." + (
'' if len(pad_label) > 10 else f" pad_label='{pad_label}'"
)
if punct_label_ids is not None:
if punct_label_ids[pad_label] != 0:
raise ValueError(msg.format(param_name='punct_label_ids', id_=punct_label_ids[pad_label]))
if capit_label_ids is not None:
if capit_label_ids[pad_label] != 0:
raise ValueError(msg.format(param_name='capit_label_ids', id_=capit_label_ids[pad_label]))
def process_error(msg: str, error_class_or_function: Union[Type[Exception], Callable[[str], Any]]) -> None:
if inspect.isclass(error_class_or_function) and issubclass(error_class_or_function, Exception):
raise error_class_or_function(msg)
if callable(error_class_or_function):
error_class_or_function(msg)
raise ValueError(
f"Parameter `error_class_or_function` has to be a subclass of `Exception` or a function."
f"Given {type(error_class_or_function)}"
)
def check_labels_for_being_unique_before_building_label_ids(
pad_label: str,
other_labels: List[str],
pad_label_name: str,
other_labels_name: str,
error_class_or_function: Union[Type[Exception], Callable[[str], Any]],
) -> None:
"""
A function for checking that that all labels are unique.
Args:
pad_label: a pad label
other_labels: a list of labels except for the pad label
pad_label_name: a name of the pad label used in error message
other_labels_name: a name of other labels used in error message
error_class_or_function: a class of an exception which is raised if there is a problem with labels.
Alternatively it can be a function for handling exceptions, for example ``argparse.ArgumentParser.error``.
Such a function has to take one argument -- error message.
"""
for i, lbl in enumerate(other_labels):
if lbl == pad_label:
msg = f"Label number {i} in parameter `{other_labels_name}` is equal to `{pad_label_name}`."
process_error(msg, error_class_or_function)
for i in range(len(other_labels) - 1):
for lbl in other_labels[i + 1 :]:
if lbl == other_labels[i]:
msg = f"Label number {i} occurs at least 2 times in parameter `{other_labels_name}`."
process_error(msg, error_class_or_function)
def build_label_ids_from_list_of_labels(pad_label: str, other_labels: List[str]) -> Dict[str, int]:
"""
Builds label ids dictionary from pad label and list of other labels. Used for parsing command line arguments.
Args:
pad_label: a pad label
other_labels: list of labels except for the pad label
Returns:
a dictionary with label ids
"""
check_labels_for_being_unique_before_building_label_ids(
pad_label, other_labels, 'pad_label', 'other_labels', ValueError
)
ids = {pad_label: 0}
for lbl in other_labels:
ids[lbl] = len(ids)
return ids
def get_label_dictionaries(
labels_file: Path,
start_bytes: List[int],
num_lines: int,
lines_per_dataset_fragment: int,
pad_label: str,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
punct_label_vocab_file: Optional[Path],
capit_label_vocab_file: Optional[Path],
n_jobs: int,
) -> Tuple[Dict[str, int], Dict[str, int]]:
"""
Return label ids if the label ids are present in parameters ``punct_label_ids``, ``capit_label_ids``,
``punct_label_vocab_file``, ``capit_label_vocab_file``. Otherwise, label ids are created using ``labels_file``.
Args:
labels_file: a path to file with labels. Labels have to be given in the format described in
https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/nlp/punctuation_and_capitalization.html#nemo-data-format
start_bytes: a list of positions in ``labels_file`` at which fragments start. Parameter ``start_bytes`` is used
for creating labels for several fragments in parallel
num_lines: total number of lines in ``labels_file``. Parameter ``num_lines`` is used for showing progress of
label ids collection
lines_per_dataset_fragment: number of lines in a dataset fragment
pad_label: a label used for padding and also neutral label showing there is no punctuation and capitalization.
Label ``pad_label`` has to have id ``0`` in parameters ``punct_label_ids``, ``capit_label_ids``,
``punct_label_vocab_file``, ``capit_label_vocab_file`` if these parameters are provided.
punct_label_ids: a dictionary with punctuation label ids. Pad label has to have id ``0``. No more than 1 of
parameters ``punct_label_ids`` and ``punct_label_vocab_file`` can be provided.
capit_label_ids: a dictionary with capitalization label ids. Pad label has to have id ``0``. No more than 1 of
parameters ``capit_label_ids`` and ``capit_label_vocab_file`` can be provided.
punct_label_vocab_file: a text file with punctuation labels. Every line in the file contains 1 label. Pad label
has to be in the first line. No more than 1 of parameters ``punct_label_ids`` and
``punct_label_vocab_file`` can be provided.
capit_label_vocab_file: a text file with capitalization labels. Every line in the file contains 1 label. Pad
label has to be in the first line. No more than 1 of parameters ``capit_label_ids`` and
``capit_label_vocab_file`` can be provided.
n_jobs: a number of fragments processed in parallel
Returns:
punct_label_ids: a dictionary with punctuation label ids
capit_label_ids: a dictionary with capitalization label ids
"""
if punct_label_ids is not None and punct_label_vocab_file is not None:
raise ValueError("You can provide at most one of parameters `punct_label_ids` and `punct_label_vocab_file`.")
if capit_label_ids is not None and capit_label_vocab_file is not None:
raise ValueError("You can provide at most one of parameters `capit_label_ids` and `capit_label_vocab_file`.")
if punct_label_ids is None and punct_label_vocab_file is not None:
punct_label_ids = load_label_ids(punct_label_vocab_file)
if capit_label_ids is None and capit_label_vocab_file is not None:
capit_label_ids = load_label_ids(capit_label_vocab_file)
check_label_ids(pad_label, punct_label_ids, capit_label_ids)
if punct_label_ids is None or capit_label_ids is None:
_punct_label_ids, _capit_label_ids = create_label_dictionaries(
labels_file, start_bytes, num_lines, lines_per_dataset_fragment, pad_label, n_jobs
)
if punct_label_ids is None:
punct_label_ids = _punct_label_ids
if capit_label_ids is None:
capit_label_ids = _capit_label_ids
return punct_label_ids, capit_label_ids
def decode_pyd(key: str, value: bytes) -> Any:
"""
Used for decoding batch loaded by ``webdataset`` from tar files.
Args:
key: name of a batch
value: pickled batch
Returns:
decoded batch
"""
return pickle.loads(value)
def repack_tar_files_with_not_enough_batches(output_dir: Path, num_batches_per_tarfile: int) -> None:
f"""
It is possible that number of batches in a fragment is not evenly divisible by ``num_batches_per_tarfile``.
In such a case excess batches are put in a tar file which matches a pattern
``fragment(0|[1-9][0-9]*).num_batches(0|[1-9][0-9]*).(0|[1-9][0-9]*).tar.to_repack``. Such files are repacked by
``repack_tar_files_with_not_enough_batches`` function into tar files with correct ``num_batches_per_tarfile``
batches each. If there is no enough batches in repacked files, then up to ``num_batches_per_tarfile - 1``
remaining batches may be discarded.
Args:
output_dir: a path to the output directory which contains files to repack and where new files are saved
num_batches_per_tarfile: a number of batches in 1 tar file. If number of batches in files matching a pattern
``fragment(0|[1-9][0-9]*).num_batches(0|[1-9][0-9]*).(0|[1-9][0-9]*).tar.to_repack`` is not evenly
divisible by ``num_batches_per_tarfile`` excess batches are discarded.
"""
files_to_repack_with_matches = [
(path, TAR_FRAGMENT_PATTERN_TO_REPACK.match(path.name))
for path in output_dir.iterdir()
if TAR_FRAGMENT_PATTERN_TO_REPACK.match(path.name) is not None
]
files_to_repack_with_matches = sorted(files_to_repack_with_matches, key=lambda x: int(x[1].group(3)))
logging.info(f"Found {len(files_to_repack_with_matches)} files for repacking.")
files_to_repack_with_matches = deque(files_to_repack_with_matches)
total_batches_in_repacked_files = 0
initial_number_of_files_to_repack = len(files_to_repack_with_matches)
pop_file_ds = None
new_file_sink = None
new_file_num_batches = 0
while files_to_repack_with_matches:
assert pop_file_ds is None or new_file_sink is None
if new_file_sink is None:
# `append_file` is a file which content will serve as a start for new tar file. `append_file` content is
# copied into a `new_file` and then content of other files needing repacking is appended to content of
# `new_file`.
append_file, match = files_to_repack_with_matches.popleft()
new_file = append_file.parent / TAR_FRAGMENT_TMPL_FINISHED.format(
fragment_idx=match.group(1), num_batches=num_batches_per_tarfile, file_idx=match.group(3)
)
new_file_sink = wds.TarWriter(str(new_file))
append_ds_to_rewrite = (
wds.WebDataset(urls=[str(append_file)], nodesplitter=None)
.decode(wds.handle_extension('.pyd', decode_pyd))
.to_tuple('__key__', 'batch.pyd')
)
for key, batch in iter(append_ds_to_rewrite):
new_file_sink.write({"__key__": key, "batch.pyd": batch})
new_file_num_batches += 1
total_batches_in_repacked_files += 1
assert total_batches_in_repacked_files < initial_number_of_files_to_repack * num_batches_per_tarfile
assert new_file_num_batches == int(match.group(2)), (
f"Number of batches {new_file_num_batches} in {append_file} is different from number of batches "
f"{match.group(2)} in repacked tar file with name {append_file}."
)
append_file.unlink()
if files_to_repack_with_matches and pop_file_ds is None:
pop_file, _ = files_to_repack_with_matches.pop()
pop_file_ds = (
wds.WebDataset(urls=[str(pop_file)], nodesplitter=None)
.decode(wds.handle_extension('.pyd', decode_pyd))
.to_tuple('__key__', 'batch.pyd')
)
pop_file_ds = iter(pop_file_ds)
if pop_file_ds is not None and new_file_sink is not None:
while new_file_num_batches < num_batches_per_tarfile:
try:
key, batch = next(pop_file_ds)
except StopIteration:
pop_file_ds = None
pop_file.unlink()
break
new_file_sink.write({"__key__": key, "batch.pyd": batch})
total_batches_in_repacked_files += 1
assert total_batches_in_repacked_files < initial_number_of_files_to_repack * num_batches_per_tarfile
new_file_num_batches += 1
if new_file_num_batches >= num_batches_per_tarfile:
assert new_file_num_batches == num_batches_per_tarfile
new_file_sink.close()
new_file_sink = None
new_file_num_batches = 0
if new_file_sink is not None:
new_file_sink.close()
new_file.unlink()
logging.info(f"Discarded {new_file_num_batches} batches.")
if pop_file_ds is not None:
pop_file.unlink()
logging.info(f"Repacked {total_batches_in_repacked_files} batches from short tar files")
def create_metadata_file(
output_dir: Path, output_file_tmpl: str, metadata_file_name: Path, num_batches_per_tarfile: int
) -> None:
"""
Rename tar files according to template ``output_file_tmpl`` and save metadata file.
Args:
output_dir: a path to directory which contains initial tar files and where renamed tar files are saved
output_file_tmpl: a template of a new tar file name
metadata_file_name: a path to a file into which metadata is going to be saved
num_batches_per_tarfile: a required number of batches in tar files. Used for checking that present tar files
have correct number of batches
"""
metadata = {"num_batches": 0, "tar_files": []}
for i, fn in enumerate([fn for fn in output_dir.iterdir() if TAR_FRAGMENT_PATTERN_FINISHED.match(fn.name)]):
nb = int(TAR_FRAGMENT_PATTERN_FINISHED.match(fn.name).group(2))
assert nb == num_batches_per_tarfile
new_name = output_dir / output_file_tmpl.format(ctr=i, num_batches=nb)
fn.rename(new_name)
metadata['tar_files'].append(new_name.name)
metadata["num_batches"] += nb
metadata[METADATA_PUNCT_LABEL_VOCAB_KEY] = DEFAULT_PUNCT_LABEL_VOCAB_FILE_NAME
metadata[METADATA_CAPIT_LABEL_VOCAB_KEY] = DEFAULT_CAPIT_LABEL_VOCAB_FILE_NAME
logging.info(f"{metadata['num_batches']} batches are in tarred dataset with metadata file {metadata_file_name}")
with metadata_file_name.open('w') as f:
json.dump(metadata, f, indent=2)
def check_tar_file_prefix(
tar_file_prefix: str, error_class_or_function: Union[Type[Exception], Callable[[str], Any]], var_name: str
) -> None:
not_allowed_characters_in_prefix = NOT_ALLOWED_CHARACTERS_IN_FILE_NAME.findall(tar_file_prefix)
if not_allowed_characters_in_prefix:
not_allowed_characters_in_prefix = set(not_allowed_characters_in_prefix)
msg = (
f"Found {len(not_allowed_characters_in_prefix)} not allowed characters in `{var_name}`. Only 'A-Z', "
f"'a-z', '0-9', '_', '-', '.' characters are allowed. Examples of not allowed characters: "
f"{list(not_allowed_characters_in_prefix)[:10]}. `{var_name}`[:30]={repr(tar_file_prefix)[:30]}."
)
process_error(msg, error_class_or_function)
def create_tarred_dataset(
text_file: Union[os.PathLike, str],
labels_file: Union[os.PathLike, str],
output_dir: Union[os.PathLike, str],
max_seq_length: int,
tokens_in_batch: int,
lines_per_dataset_fragment: int,
num_batches_per_tarfile: int,
tokenizer_name: str,
tokenizer_model: Optional[Union[os.PathLike, str]] = None,
vocab_file: Optional[Union[os.PathLike, str]] = None,
merges_file: Optional[Union[os.PathLike, str]] = None,
special_tokens: Optional[Dict[str, str]] = None,
use_fast_tokenizer: Optional[bool] = False,
pad_label: str = 'O',
punct_label_ids: Optional[Dict[str, int]] = None,
capit_label_ids: Optional[Dict[str, int]] = None,
punct_label_vocab_file: Optional[Union[os.PathLike, str]] = None,
capit_label_vocab_file: Optional[Union[os.PathLike, str]] = None,
tar_file_prefix: Optional[str] = 'punctuation_capitalization',
n_jobs: Optional[int] = None,
audio_file: Optional[Path] = None,
use_audio: Optional[bool] = False,
sample_rate: Optional[int] = 16000,
) -> None:
"""
Creates tarred dataset from ``text_file`` and ``labels_file``. A tarred dataset allows to train on large amounts of
data without storing it all into memory simultaneously. You may use these function directly or try script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_.
Tarred dataset is a directory which contains metadata file, tar files with batches,
``punct_label_vocab.csv`` and ``capit_label_vocab.csv`` files.
Metadata file is a JSON file with 4 items: ``'num_batches'``, ``'tar_files'``, ``'punct_label_vocab_file'``,
``'capit_label_vocab_file'``. The item ``'num_batches'`` (``int``) is a total number of batches in tarred dataset.
``'tar_files'`` is a list of paths to tar files relative to directory containing the metadata file. The items
``'punct_label_vocab_file'`` and ``'capit_label_vocab_file'`` are correspondingly paths to punctuation and
capitalization label vocabulary files. These paths are relative to directory containing the metadata file.
Every tar file contains objects written using ``webdataset.TarWriter``. Each object is a dictionary with two items:
``'__key__'`` and ``'batch.pyd'``. ``'__key__'`` is a name of a batch and ``'batch.pyd'`` is a pickled dictionary
which contains ``'input_ids'``, ``'subtokens_mask'``, ``'punct_labels'``, ``'capit_labels'``. ``'input_ids'`` is an
array containing ids of source tokens, ``'subtokens_mask'`` is a boolean array showing first tokens in words,
``'punct_labels'`` and ``'capit_labels'`` are arrays with ids of labels.
Metadata file should be passed to constructor of :class:`BertPunctuationCapitalizationTarredDataset` and the
instance of the class will handle iteration and constructing masks and token types for BERT model.
Args:
text_file (:obj:`Union[os.PathLike, str]`): a path to a file with dataset source. Dataset source is lowercase
text without punctuation. Number of lines in ``text_file`` has to be equal to the number of lines in
``labels_file``.
labels_file (:obj:`Union[os.PathLike, str]`): a path to a file with labels. Labels are given in the format
described in :ref:`NeMo Data Format<nemo-data-format-label>`.
output_dir (:obj:`Union[os.PathLike, str]`): a path to a directory where metadata file, tar files and
``'punct_label_ids.csv'`` and ``'capit_label_ids.csv'`` files are saved.
max_seq_length (:obj:`int`): Maximum number of subtokens in an input sequence. A source sequence which contains
too many subtokens is clipped to ``max_seq_length - 2`` subtokens and then [CLS] token is prepended to the
clipped sequence and [SEP] token is appended to the clipped sequence. The clipping is performed via removal
of subtokens in the end of a source sequence.
tokens_in_batch (:obj:`int`): maximum number of tokens in a batch including [CLS], [SEP], [UNK], and [PAD]
tokens. Before packing into batches source sequences are sorted by number of tokens in order to reduce
number of pad tokens. So the number of samples in a batch may vary.
lines_per_dataset_fragment (:obj:`int`): a number of lines processed by one worker during creation of tarred
dataset. A worker tokenizes ``lines_per_dataset_fragment`` lines and keeps in RAM tokenized text labels
before packing them into batches. Reducing ``lines_per_dataset_fragment`` leads to reducing of the amount
of memory used by this function.
num_batches_per_tarfile (:obj:`int`): a number of batches saved in a tar file. If you increase
``num_batches_per_tarfile``, then there will be less tar files in the dataset. There cannot be less than
``num_batches_per_tarfile`` batches in a tar file, and all excess batches are removed. Maximum number of
discarded batches is ``num_batches_per_tarfile - 1``.
tokenizer_name (:obj:`str`): a name of the tokenizer used for tokenization of source sequences. Possible
options are ``'sentencepiece'``, ``'word'``, ``'char'``, HuggingFace tokenizers. For more options see
function ``nemo.collections.nlp.modules.common.get_tokenizer``. The tokenizer must have properties
``cls_id``, ``pad_id``, ``sep_id``, ``unk_id``.
tokenizer_model (:obj:`Union[os.PathLike, str]`, `optional`): a path to a tokenizer model required for
``'sentencepiece'`` tokenizer.
vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to a vocabulary file which can be used in
``'word'``, ``'char'``, and HuggingFace tokenizers.
merges_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to merges file which can be used in
HuggingFace tokenizers.
special_tokens (:obj:`Dict[str, str]`, `optional`): a dictionary with special tokens passed to constructors of
``'char'``, ``'word'``, ``'sentencepiece'``, and various HuggingFace tokenizers.
use_fast_tokenizer (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to use fast HuggingFace
tokenizer.
pad_label (:obj:`str`, `optional`, defaults to :obj:`'O'`): a pad label both for punctuation and
capitalization. This label is also a neutral label (used for marking words which do not need punctuation
and capitalization).
punct_label_ids (:obj:`Dict[str, int]`, `optional`): a dictionary which keys are punctuation labels and values
are label ids. The pad label ``pad_label`` has to have id ``0``. You can provide at most one of parameters
``punct_label_ids`` and ``punct_label_vocab_file``. If none of parameters ``punct_label_ids`` and
``punct_label_vocab_file`` is provided, then punctuation label ids will be inferred from ``labels_file``
file.
capit_label_ids (:obj:`Dict[str, int]`, `optional`): same as ``punct_label_ids`` for capitalization labels.
punct_label_vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): a path to a file with punctuation labels.
These labels include pad label. The pad label has to be the first label in the file. Each label is written
on a separate line. Alternatively you can use ``punct_labels_ids`` parameter. If none of parameters
``punct_labels_ids`` and ``punct_label_vocab_file`` is provided, then punctuation label ids will be
inferred from ``labels_file`` file.
capit_label_vocab_file (:obj:`Union[os.PathLike, str]`, `optional`): same as ``punct_label_vocab_file`` for
capitalization labels.
tar_file_prefix (:obj:`str`, `optional`, defaults :obj:`'punctuation_capitalization'`): a string from which tar
file names start. The string can contain only characters ``A-Z``, ``a-z``, ``0-9``, ``_``, ``-``, ``.``.
n_jobs (:obj:`int`, `optional`): a number of workers for creating tarred dataset. If ``None``, then ``n_jobs``
is equal to number of CPUs.
audio_file (:obj:`Optional[Union[os.PathLike, str]]`, defaults to :obj:`None`): a path to a file with audio dataset file paths if dataset is lexical and audio. Must contain one path per line.
use_audio (:obj:`bool`, `optional`, defaults to :obj:`False`): If set to ``True`` dataset becomes lexical and audio rather than only lexical.
sample_rate (:obj:`int`, `optional`, defaults to :obj:`16000`) Targeted sample rate of audios If ``use_audio`` set to ``True``.
"""
check_tar_file_prefix(tar_file_prefix, ValueError, 'tar_file_prefix')
if n_jobs is None:
n_jobs = mp.cpu_count()
text_file, labels_file = Path(text_file).expanduser(), Path(labels_file).expanduser()
output_dir = Path(output_dir).expanduser()
ds_params_str = DATASET_PARAMETERS_TMPL.format(
prefix=tar_file_prefix,
tokens_in_batch=tokens_in_batch,
max_seq_length=max_seq_length,
tokenizer=REPLACE_NOT_ALLOWED_CHARACTERS_IN_FILE_NAME.sub('-', tokenizer_name),
)
output_file_tmpl = ds_params_str + TAR_FINAL_TMPL
metadata_file_name = output_dir / ('metadata.' + ds_params_str + '.json')
remove_unexpected_files_and_dirs(output_dir, output_file_tmpl, metadata_file_name)
audio_start_bytes = None
if use_audio:
num_lines, text_start_bytes, label_start_bytes, audio_start_bytes = get_fragment_start_bytes(
text_file, labels_file, lines_per_dataset_fragment, audio_file
)
else:
num_lines, text_start_bytes, label_start_bytes = get_fragment_start_bytes(
text_file, labels_file, lines_per_dataset_fragment
)
if text_start_bytes:
output_dir.mkdir(parents=True, exist_ok=True)
else:
raise ValueError(f"Both {labels_file} and {text_file} are empty. Tarred dataset cannot be created.")
punct_label_ids, capit_label_ids = get_label_dictionaries(
labels_file,
label_start_bytes,
num_lines,
lines_per_dataset_fragment,
pad_label,
punct_label_ids,
capit_label_ids,
punct_label_vocab_file,
capit_label_vocab_file,
n_jobs,
)
with Progress(
num_lines, ["Tokenization", "Batch mark up", "Batch building", "Writing tarred dataset"], "query"
) as progress_queues:
Parallel(n_jobs=min(n_jobs, len(text_start_bytes)))(
delayed(process_fragment)(
text_file,
labels_file,
output_dir,
text_start_pos,
label_start_pos,
lines_per_dataset_fragment,
max_seq_length,
tokens_in_batch,
num_batches_per_tarfile,
tokenizer_name,
None if tokenizer_model is None else Path(tokenizer_model).expanduser(),
None if vocab_file is None else Path(vocab_file).expanduser(),
None if merges_file is None else Path(merges_file).expanduser(),
special_tokens,
use_fast_tokenizer,
pad_label,
punct_label_ids,
capit_label_ids,
fragment_idx,
*progress_queues,
audio_file,
sample_rate,
audio_file_start_pos,
use_audio,
)
for fragment_idx, (text_start_pos, label_start_pos, audio_file_start_pos) in enumerate(
zip(
text_start_bytes,
label_start_bytes,
audio_start_bytes if use_audio else [None for _ in range(len(text_start_bytes))],
)
)
)
repack_tar_files_with_not_enough_batches(output_dir, num_batches_per_tarfile)
create_metadata_file(output_dir, output_file_tmpl, metadata_file_name, num_batches_per_tarfile)
class BertPunctuationCapitalizationTarredDataset(IterableDataset):
"""
Punctuation capitalization dataset which allows not to load all data in memory simultaneously. A tarred dataset
is created from text and label files using script
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_
or function
:func:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.create_tarred_dataset`.
Args:
metadata_file (:obj:`Union[os.PathLike, str]`): a path to tarred dataset metadata file. Metadata file and files
referenced in metadata file are created by
`examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py
<https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/token_classification/data/create_punctuation_capitalization_tarred_dataset.py>`_.
Metadata file is a JSON file which contains ``'num_batches'``, ``'tar_files'``,
``'punct_label_vocab_file'``, ``'capit_label_vocab_file'`` items. The first item is total number of batches
in a dataset, the second is a list of paths to tar files relative to directory containing
``metadata_file``. Items ``'punct_label_vocab_file'`` and ``'capit_label_vocab_file'`` are paths to
``.csv`` files which contain unique punctuation a capitalization label vocabularies. Vocabulary file paths
are relative to directory containing the ``metadata_file``. Each line in ``'punct_label_vocab_file'`` and
``'capit_label_vocab_file'`` contains 1 label. The first lines in ``'punct_label_vocab_file'`` and
``'capit_label_vocab_file'`` files are neutral labels which also serve as pad labels. Neutral labels for
punctuation and capitalization must be equal to the ``pad_label`` parameter.
tokenizer (:obj:`TokenizerSpec`): a tokenizer instance used for tokenization of dataset source. A tokenizer
instance is used for getting ids of [CLS], [PAD], and [SEP] tokens which are used for masks creation.
pad_label (:obj:`str`): a label that is used for padding and for absence of punctuation or
capitalization. Used for checking items ``'punct_label_vocab'`` and ``'capit_label_vocab'`` of dictionary
in ``metadata_file``.
label_info_save_dir (:obj:`Union[os.PathLike, str]`, `optional`): a path to a directory where label
vocabularies are copied when method :meth:`save_labels_and_get_file_paths` is called. This parameter is
useful if tarred dataset directory is read-only.
ignore_extra_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): whether to use only first token in a
word for loss computation and training. If set to ``True``, then loss will be computed only for the first
tokens of words.
ignore_start_end (:obj:`bool`, `optional`, defaults to :obj:`True`): whether to compute loss for [CLS] and
[SEP] tokens. If set to ``True``, then loss will not be computed for [CLS] and [SEP] tokens.
world_size (:obj:`int`, `optional`, defaults to :obj:`1`): a number of processes used for model training. It is
used together with a ``global_rank`` parameter to decide which tar files will be used in the current
process.
global_rank (:obj:`int`, `optional`, defaults to :obj:`0`): a number of current process in the pool of workers
used for model training. It is used together with ``world_size`` parameter to decide which tar files will
be used in the current process.
shuffle_n (:obj:`int`, `optional`, defaults to :obj:`1`): a number of shuffled batches in a buffer.
``shuffle_n`` batches are loaded into memory, shuffled, and then yielded by a dataset instance.
shard_strategy (:obj:`str`, defaults to :obj:``'scatter'``): Tarred dataset shard distribution strategy chosen as
a str value during ddp.
- ``'scatter'``: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- ``'replicate'``: Optional shard strategy, where each node gets all the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of :param:`shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tar files,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns definitions of module output ports. """
if self.use_audio:
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
'features': NeuralType(('B', 'T'), AudioSignal()),
'features_length': NeuralType(('B', 'T'), LengthsType()),
}
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'loss_mask': NeuralType(('B', 'T'), MaskType()),
'punct_labels': NeuralType(('B', 'T'), LabelsType()),
'capit_labels': NeuralType(('B', 'T'), LabelsType()),
}
def __init__(
self,
metadata_file: Union[os.PathLike, str],
tokenizer: TokenizerSpec,
pad_label: str,
label_info_save_dir: Optional[Union[os.PathLike, str]] = None,
ignore_extra_tokens: bool = False,
ignore_start_end: bool = True,
world_size: int = 1,
global_rank: int = 0,
shuffle_n: int = 1,
shard_strategy: str = "scatter",
use_audio: bool = False,
) -> None:
super().__init__()
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(
f"Invalid shard strategy of type {type(shard_strategy)} "
f"{repr(shard_strategy) if len(repr(shard_strategy)) < 100 else repr(shard_strategy)[:100] + '...'}! "
f"Allowed values are: {valid_shard_strategies}."
)
self.tokenizer = tokenizer
self.metadata_file = Path(metadata_file).expanduser()
if label_info_save_dir is None:
self.for_nemo_ckpt = self.metadata_file.parent / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
else:
self.for_nemo_ckpt = Path(label_info_save_dir).expanduser() / LABEL_ID_DIR_FOR_NEMO_CHECKPOINT
with open(self.metadata_file) as f:
self.metadata = json.load(f)
self.ignore_extra_tokens = ignore_extra_tokens
self.ignore_start_end = ignore_start_end
self.tar_files = []
for file_path in self.metadata['tar_files']:
file_path = Path(file_path).expanduser()
if file_path.is_absolute():
self.tar_files.append(str(file_path))
else:
self.tar_files.append(str(self.metadata_file.parent / file_path))
self.punct_label_vocab_file = self.metadata_file.parent / self.metadata[METADATA_PUNCT_LABEL_VOCAB_KEY]
self.capit_label_vocab_file = self.metadata_file.parent / self.metadata[METADATA_CAPIT_LABEL_VOCAB_KEY]
self.punct_label_ids = load_label_ids(self.punct_label_vocab_file)
self.capit_label_ids = load_label_ids(self.capit_label_vocab_file)
self.pad_label = pad_label
self._check_pad_label()
if shard_strategy == 'scatter':
logging.info("Tarred dataset shards will be scattered evenly across all nodes.")
if len(self.tar_files) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(self.tar_files)}) is not divisible "
f"by number of distributed workers ({world_size}). "
f"Some shards will not be used ({len(self.tar_files) % world_size})."
)
begin_idx = (len(self.tar_files) // world_size) * global_rank
end_idx = begin_idx + (len(self.tar_files) // world_size)
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
batches_per_tar = self.metadata['num_batches'] // len(self.tar_files)
self.tar_files = self.tar_files[begin_idx:end_idx]
self.length = batches_per_tar * len(self.tar_files) * world_size
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
self.length = self.metadata['num_batches']
else:
raise ValueError(f"Invalid shard strategy! Allowed values are: {valid_shard_strategies}")
self._dataset = wds.WebDataset(urls=self.tar_files, nodesplitter=None).decode(
wds.handle_extension('.pyd', decode_pyd)
)
if shuffle_n > 0:
self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.to_tuple('__key__', 'batch.pyd').map(f=self._build_sample)
self.use_audio = use_audio
def _check_pad_label(self) -> None:
"""
Checks the condition that ``pad_label`` passed to this class constructor has ``0`` id in
``self.punct_label_ids`` and ``self.capit_label_ids`` loaded from tarred dataset.
"""
for label_ids, labels_file, task in [
(self.punct_label_ids, self.metadata[METADATA_PUNCT_LABEL_VOCAB_KEY], "punctuation"),
(self.capit_label_ids, self.metadata[METADATA_CAPIT_LABEL_VOCAB_KEY], "capitalization"),
]:
if label_ids[self.pad_label] != 0:
raise ValueError(
f"Pad label '{self.pad_label}' has non zero id {label_ids[self.pad_label]} in {task} "
f"ids dictionary loaded from {labels_file}."
)
def check_for_label_consistency_with_model_config(
self,
punct_label_ids: Optional[Dict[str, int]],
capit_label_ids: Optional[Dict[str, int]],
class_labels: DictConfig,
common_dataset_parameters_config: DictConfig,
) -> None:
"""
Checks that label ids loaded from tarred dataset are identical to those provided in
``model.common_dataset_parameters`` :ref:`config<common-dataset-parameters-config-label>` item. In addition,
this method checks that label ids set in attributes ``punct_label_ids`` and ``capit_label_ids`` of an instance
of
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
are identical to label ids loaded from tarred dataset.
Args:
punct_label_ids: a content of ``punct_label_ids`` attribute of an instance of
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
in which this tarred dataset is used.
capit_label_ids: a content of ``capit_label_ids`` attribute of an instance of
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`
in which this tarred dataset is used.
class_labels: a config item ``model.class_labels``. See more in description of
:ref:`class labels' config<class-labels-config-label>`.
common_dataset_parameters_config: a config item ``model.common_dataset_parameters``. See more in
of :ref:`common dataset parameters config<common-dataset-parameters-config-label>`.
"""
tarred_dataset_label_desc_tmpl = (
f'{{label_type}} labels loaded from tarred dataset with metadata file {self.metadata_file}'
)
if punct_label_ids is not None:
if punct_label_ids != self.punct_label_ids:
raise_not_equal_labels_error(
first_labels=self.punct_label_ids,
second_labels=punct_label_ids,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'),
second_labels_desc="Punctuation labels stored in an attribute "
"`PunctuationCapitalizationModel.punct_label_ids`",
)
if capit_label_ids is not None:
if capit_label_ids != self.capit_label_ids:
raise_not_equal_labels_error(
first_labels=self.capit_label_ids,
second_labels=capit_label_ids,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'),
second_labels_desc="Capitalization labels stored in an attribute"
"`PunctuationCapitalizationModel.capit_label_ids`",
)
if common_dataset_parameters_config.punct_label_ids is not None:
cfg_punct_label_ids = dict(common_dataset_parameters_config.punct_label_ids)
if cfg_punct_label_ids != self.punct_label_ids:
raise_not_equal_labels_error(
first_labels=self.punct_label_ids,
second_labels=cfg_punct_label_ids,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'),
second_labels_desc='Punctuation labels stored a config field '
'`model.common_dataset_parameters.punct_label_ids`',
)
if common_dataset_parameters_config.capit_label_ids is not None:
cfg_capit_label_ids = dict(common_dataset_parameters_config.capit_label_ids)
if cfg_capit_label_ids != self.capit_label_ids:
raise_not_equal_labels_error(
first_labels=self.capit_label_ids,
second_labels=cfg_capit_label_ids,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'),
second_labels_desc='Capitalization labels stored a config field '
'`model.common_dataset_parameters.capit_label_ids`',
)
if common_dataset_parameters_config.label_vocab_dir is not None:
label_vocab_dir = Path(common_dataset_parameters_config.label_vocab_dir).expanduser()
punct_label_vocab_file = label_vocab_dir / class_labels.punct_labels_file
file_punct_vocab = load_label_ids(punct_label_vocab_file)
if file_punct_vocab != self.punct_label_ids:
raise_not_equal_labels_error(
first_labels=self.punct_label_ids,
second_labels=file_punct_vocab,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Punctuation'),
second_labels_desc=f'labels stored in file {punct_label_vocab_file} passed in '
f'`model.common_dataset_parameters.punct_label_vocab_file`',
)
capit_label_vocab_file = label_vocab_dir / class_labels.capit_labels_file
file_capit_vocab = load_label_ids(capit_label_vocab_file)
if file_capit_vocab != self.capit_label_ids:
raise_not_equal_labels_error(
first_labels=self.capit_label_ids,
second_labels=file_capit_vocab,
first_labels_desc=tarred_dataset_label_desc_tmpl.format(label_type='Capitalization'),
second_labels_desc=f'labels stored in file {capit_label_vocab_file} passed in '
f'`model.common_dataset_parameters.capit_label_vocab_file`',
)
def save_labels_and_get_file_paths(
self, punct_labels_file_name: str, capit_labels_file_name: str
) -> Tuple[Path, Path]:
"""
Copies label vocabulary files for punctuation and capitalization into directory passed in the constructor
parameter ``label_info_save_dir``. The names of new
files are ``punct_labels_file_name`` and ``capit_labels_file_name``.
The signatures of this method and the signature of the method
:meth:`~nemo.collections.nlp.data.token_classification.BertPunctuationCapitalizationDataset.save_labels_and_get_file_paths`
must be identical.
Args:
punct_labels_file_name (:obj:`str`): a name of punctuation labels file
capit_labels_file_name (:obj:`str`): a name of capitalization labels file
Returns:
:obj:`Tuple[Path, Path]`: a tuple of 2 elements
- :obj:`pathlib.Path`: a path to the new punctuation label ids file
- :obj:`pathlib.Path`: a path to the new capitalization label ids file
"""
self.for_nemo_ckpt.mkdir(parents=True, exist_ok=True)
punct_label_ids_file = self.for_nemo_ckpt / punct_labels_file_name
capit_label_ids_file = self.for_nemo_ckpt / capit_labels_file_name
shutil.copy(str(self.punct_label_vocab_file), str(punct_label_ids_file))
shutil.copy(str(self.capit_label_vocab_file), str(capit_label_ids_file))
return punct_label_ids_file, capit_label_ids_file
def _build_sample(self, batch: Tuple[str, Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]:
"""
Takes batch loaded from tarred dataset and transforms it for passing to the model. Adds ``'segment_ids'``,
``'input_mask'``, ``'loss_mask'`` items to the batch.
Args:
batch: a tuple of 2 elements: batch name and a dictionary with ``'input_ids'``, ``'subtokens_mask'``,
``'punct_labels'``, ``'capit_labels'``. Batch name is not needed for training and inference and
discarded.
Returns:
a batch in the form of a dictionary with items:
- ``'input_ids'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``;
- ``'subtokens_mask'``: a boolean numpy array of shape ``[Batch, Time]``;
- ``'punct_labels'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``;
- ``'capit_labels'``: a ``np.int32`` numpy array of shape ``[Batch, Time]``;
- ``'segment_ids'``: a ``np.int8`` numpy array of shape ``[Batch, Time]``;
- ``'input_mask'``: a boolean numpy array of shape ``[Batch, Time]``;
- ``'loss_mask'``: a boolean numpy array of shape ``[Batch, Time]``.
"""
_, batch = batch
batch_segment_ids, batch_input_mask, batch_loss_mask = create_masks_and_segment_ids(
batch['input_ids'],
batch['subtokens_mask'],
self.tokenizer.pad_id,
self.tokenizer.cls_id,
self.tokenizer.sep_id,
self.ignore_start_end,
self.ignore_extra_tokens,
)
batch['segment_ids'] = batch_segment_ids
batch['input_mask'] = batch_input_mask
batch['loss_mask'] = batch_loss_mask
return batch
def __iter__(self) -> Iterator[Dict[str, np.ndarray]]:
"""
Constructs an iterator of batches. The values of one batch dictionary are numpy arrays of identical shapes
``[Batch, Time]``.
Returns:
:obj:`Iterator[Dict[str, np.ndarray]]`: an iterator of batches with items:
- ``'input_ids'``: ``np.int32`` array containing encoded tokens,
- ``'subtokens_mask'``: ``bool`` array which elements are ``True`` if they correspond to first token in
a word,
- ``'punct_labels'``: ``np.int32`` array with encoded punctuation labels,
- ``'capit_labels'``: ``np.int32`` array with encoded capitalization labels,
- ``'segment_ids'``: ``np.int8`` array filled with zeros (BERT token types in HuggingFace terminology),
- ``'input_mask'``: ``bool`` array which elements are ``True`` if corresponding token is not a padding
token,
- ``'loss_mask'``: ``bool`` array which elements are ``True`` if loss is computed for corresponding
token. See more in description of constructor parameters ``ignore_start_end``, ``ignore_extra_tokens``.
"""
return self._dataset.__iter__()
def __len__(self) -> int:
return self.length
def collate_fn(self, batches: List[Dict[str, np.ndarray]]) -> Dict[str, torch.Tensor]:
"""
Return zeroth batch of ``batches`` list passed for collating and casts ``'segment_ids'``, ``'punct_labels'``,
``'capit_labels'`` to types supported by
:class:`~nemo.collections.nlp.models.token_classification.punctuation_capitalization_model.PunctuationCapitalizationModel`.
All output tensors have shape ``[Batch, Time]``.
.. warning::
``batch size`` parameter of a PyTorch data loader and sampler has to be ``1``.
Args:
batches (:obj:`List[Dict[str, np.ndarray]]`): a list of batches passed for collating
Returns:
:obj:`Dict[str, torch.Tensor]`: a batch dictionary with following items (for detailed description of batch
items see method :meth:`__getitem__`):
- ``'input_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'subtokens_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'punct_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'capit_labels'`` (:obj:`torch.Tensor`): :obj:`torch.int64` tensor,
- ``'segment_ids'`` (:obj:`torch.Tensor`): :obj:`torch.int32` tensor,
- ``'input_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor,
- ``'loss_mask'`` (:obj:`torch.Tensor`): :obj:`torch.bool` tensor.
"""
batch = {k: torch.as_tensor(v) for k, v in batches[0].items()}
batch['segment_ids'] = batch['segment_ids'].int()
batch['punct_labels'] = batch['punct_labels'].long()
batch['capit_labels'] = batch['capit_labels'].long()
if self.use_audio:
batch['features'] = batch['features'].to(torch.float32)
return batch
| NeMo-main | nemo/collections/nlp/data/token_classification/punctuation_capitalization_tarred_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import re
import string
from typing import Dict
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
fill_class_weights,
get_freq_weights,
get_label_stats,
)
from nemo.utils import logging
__all__ = ['get_label_ids', 'create_text_and_labels']
def remove_punctuation(word: str):
"""
Removes all punctuation marks from a word except for '
that is often a part of word: don't, it's, and so on
"""
all_punct_marks = string.punctuation.replace("'", '')
return re.sub('[' + all_punct_marks + ']', '', word)
def create_text_and_labels(output_dir: str, file_path: str, punct_marks: str = ',.?'):
"""
Create datasets for training and evaluation.
Args:
output_dir: path to the output data directory
file_path: path to file name
punct_marks: supported punctuation marks
The data will be split into 2 files: text.txt and labels.txt. \
Each line of the text.txt file contains text sequences, where words\
are separated with spaces. The labels.txt file contains \
corresponding labels for each word in text.txt, the labels are \
separated with spaces. Each line of the files should follow the \
format: \
[WORD] [SPACE] [WORD] [SPACE] [WORD] (for text.txt) and \
[LABEL] [SPACE] [LABEL] [SPACE] [LABEL] (for labels.txt).'
"""
if not os.path.exists(file_path):
raise ValueError(f'{file_path} not found')
os.makedirs(output_dir, exist_ok=True)
base_name = os.path.basename(file_path)
labels_file = os.path.join(output_dir, 'labels_' + base_name)
text_file = os.path.join(output_dir, 'text_' + base_name)
with open(file_path, 'r') as f:
with open(text_file, 'w') as text_f:
with open(labels_file, 'w') as labels_f:
for line in f:
line = line.split()
text = ''
labels = ''
for word in line:
label = word[-1] if word[-1] in punct_marks else 'O'
word = remove_punctuation(word)
if len(word) > 0:
if word[0].isupper():
label += 'U'
else:
label += 'O'
word = word.lower()
text += word + ' '
labels += label + ' '
text_f.write(text.strip() + '\n')
labels_f.write(labels.strip() + '\n')
print(f'{text_file} and {labels_file} created from {file_path}.')
def get_label_ids(
label_file: str,
is_training: bool = False,
pad_label: str = 'O',
label_ids_dict: Dict[str, int] = None,
get_weights: bool = True,
class_labels_file_artifact='label_ids.csv',
):
"""
Generates str to int labels mapping for training data or checks correctness of the label_ids_dict
file for non-training files or if label_ids_dict is specified
Args:
label_file: the path of the label file to process
is_training: indicates whether the label_file is used for training
pad_label: token used for padding
label_ids_dict: str label name to int ids mapping. Required for non-training data.
If specified, the check that all labels from label_file are present in label_ids_dict will be performed.
For training data, if label_ids_dict is None, a new mapping will be generated from label_file.
get_weights: set to True to calculate class weights, required for Weighted Loss.
class_labels_file_artifact: name of the file to save in .nemo
"""
if not os.path.exists(label_file):
raise ValueError(f'File {label_file} was not found.')
logging.info(f'Processing {label_file}')
if not is_training and label_ids_dict is None:
raise ValueError(
f'For non training data, label_ids_dict created during preprocessing of the training data '
f'should be provided'
)
# collect all labels from the label_file
data_dir = os.path.dirname(label_file)
unique_labels = set(pad_label)
all_labels = []
with open(label_file, 'r') as f:
for line in f:
line = line.strip().split()
all_labels.extend(line)
unique_labels.update(line)
# check that all labels from label_file are present in the specified label_ids_dict
# or generate label_ids_dict from data (for training only)
if label_ids_dict:
logging.info(f'Using provided labels mapping {label_ids_dict}')
for name in unique_labels:
if name not in label_ids_dict:
raise ValueError(f'{name} class from {label_file} not found in the provided mapping: {label_ids_dict}')
else:
label_ids_dict = {pad_label: 0}
if pad_label in unique_labels:
unique_labels.remove(pad_label)
for label in sorted(unique_labels):
label_ids_dict[label] = len(label_ids_dict)
label_ids_filename = os.path.join(data_dir, class_labels_file_artifact)
if is_training:
with open(label_ids_filename, 'w') as f:
labels, _ = zip(*sorted(label_ids_dict.items(), key=lambda x: x[1]))
f.write('\n'.join(labels))
logging.info(f'Labels mapping {label_ids_dict} saved to : {label_ids_filename}')
# calculate label statistics
base_name = os.path.splitext(os.path.basename(label_file))[0]
stats_file = os.path.join(data_dir, f'{base_name}_label_stats.tsv')
if os.path.exists(stats_file) and not is_training and not get_weights:
logging.info(f'{stats_file} found, skipping stats calculation.')
else:
all_labels = [label_ids_dict[label] for label in all_labels]
logging.info(f'Three most popular labels in {label_file}:')
total_labels, label_frequencies, max_id = get_label_stats(all_labels, stats_file)
logging.info(f'Total labels: {total_labels}. Label frequencies - {label_frequencies}')
if get_weights:
class_weights_pkl = os.path.join(data_dir, f'{base_name}_weights.p')
if os.path.exists(class_weights_pkl):
class_weights = pickle.load(open(class_weights_pkl, 'rb'))
logging.info(f'Class weights restored from {class_weights_pkl}')
else:
class_weights_dict = get_freq_weights(label_frequencies)
logging.info(f'Class Weights: {class_weights_dict}')
class_weights = fill_class_weights(class_weights_dict, max_id)
pickle.dump(class_weights, open(class_weights_pkl, "wb"))
logging.info(f'Class weights saved to {class_weights_pkl}')
else:
class_weights = None
return label_ids_dict, label_ids_filename, class_weights
| NeMo-main | nemo/collections/nlp/data/token_classification/token_classification_utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import itertools
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import torch
from numpy import ndarray
from torch import Tensor
from torch.nn.utils.rnn import pad_sequence
from nemo.collections.common.tokenizers import TokenizerSpec
from nemo.collections.nlp.data import get_stats
from nemo.core import Dataset
from nemo.core.neural_types import ChannelType, Index, MaskType, NeuralType
from nemo.core.neural_types.elements import AudioSignal, BoolType, LengthsType
from nemo.utils import logging
try:
from nemo.collections.asr.parts.preprocessing import AudioSegment
ASR_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
ASR_AVAILABLE = False
def get_features_infer(
queries: List[str],
tokenizer: TokenizerSpec,
max_seq_length: int = 64,
step: Optional[int] = 8,
margin: Optional[int] = 16,
audio_queries: Optional[Union[List[bytes], List[str]]] = None,
target_sr: Optional[int] = None,
) -> Tuple[
List[List[int]],
List[List[int]],
List[List[int]],
List[List[int]],
List[int],
List[int],
List[bool],
List[bool],
Optional[List[float]],
Optional[List[int]],
]:
"""
Processes the data and returns features.
Args:
queries: text sequences
tokenizer: such as AutoTokenizer
max_seq_length: max sequence length minus 2 for [CLS] and [SEP]
step: relative shift of consequent segments into which long queries are split. Long queries are split into
segments which can overlap. Parameter ``step`` controls such overlapping. Imagine that queries are
tokenized into characters, ``max_seq_length=5``, and ``step=2``. In such a case query "hello" is
tokenized into segments ``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``.
margin: number of subtokens near edges of segments which are not used for punctuation and capitalization
prediction. The first segment does not have left margin and the last segment does not have right
margin. For example, if input sequence is tokenized into characters, ``max_seq_length=5``,
``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments
``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'],
['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions
computation, margins are removed. In the next list, subtokens which logits are not used for final
predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*],
['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``.
audio_queries (:obj:`List[str]`, `optional`): paths to audio files.
target_sr (:obj:`int`, `optional`): target sample rate for audios.
Returns:
all_input_ids: list of input ids of all segments
all_segment_ids: token type ids of all segments
all_input_mask: attention mask to use for BERT model
all_subtokens_mask: masks out all subwords besides the first one
all_quantities_of_preceding_words: number of words in query preceding a segment. Used for joining
predictions from overlapping segments.
all_query_ids: index of a query to which segment belongs
all_is_first: is segment first segment in a query
all_is_last: is segment last segment in a query
"""
st = []
stm = []
sent_lengths = []
audios = []
audio_queries = audio_queries if audio_queries else [None] * len(queries) # Dummy if no `audio_queries` passed
for i, (query, audio_query) in enumerate(zip(queries, audio_queries)):
subtokens, subtokens_mask = _get_subtokens_and_subtokens_mask(query, tokenizer)
sent_lengths.append(len(subtokens))
st.append(subtokens)
stm.append(subtokens_mask)
if audio_query:
if ASR_AVAILABLE:
if isinstance(audio_query, bytes):
audios.append(AudioSegment.from_file(io.BytesIO(audio_query), target_sr=target_sr))
elif isinstance(audio_query, str):
audios.append(AudioSegment.from_file(audio_query.strip(), target_sr=target_sr))
else:
raise ModuleNotFoundError(
'Nemo ASR was not installed, see https://github.com/NVIDIA/NeMo#installation for installation instructions'
)
audios = audios if len(audios) else [None] * len(st)
_check_max_seq_length_and_margin_and_step(max_seq_length, margin, step)
if max_seq_length > max(sent_lengths) + 2:
max_seq_length = max(sent_lengths) + 2
# If `max_seq_length` is greater than maximum length of input query, parameters ``margin`` and ``step`` are
# not used will not be used.
step = 1
# Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS
length = max_seq_length - 2
else:
# Maximum number of word subtokens in segment. The first and the last tokens in segment are CLS and EOS
length = max_seq_length - 2
step = min(length - margin * 2, step)
logging.info(f'Max length: {max_seq_length}')
get_stats(sent_lengths)
all_input_ids, all_segment_ids, all_subtokens_mask, all_input_mask, all_input_mask = [], [], [], [], []
all_quantities_of_preceding_words, all_query_ids, all_is_first, all_is_last = [], [], [], []
all_audio_queries, all_audio_lengths = [], []
for q_i, (query_st, query_audio) in enumerate(zip(st, audios)):
q_inp_ids, q_segment_ids, q_subtokens_mask, q_inp_mask, q_quantities_of_preceding_words = [], [], [], [], []
q_audio_queries, q_audio_lengths = [], []
if query_audio and length < len(query_st):
logging.info(f'Ignoring query with id {q_i}')
continue
for i in range(0, max(len(query_st), length) - length + step, step):
subtokens = [tokenizer.cls_token] + query_st[i : i + length] + [tokenizer.sep_token]
q_inp_ids.append(tokenizer.tokens_to_ids(subtokens))
q_segment_ids.append([0] * len(subtokens))
q_subtokens_mask.append([False] + stm[q_i][i : i + length] + [False])
q_inp_mask.append([True] * len(subtokens))
q_quantities_of_preceding_words.append(np.count_nonzero(stm[q_i][:i]))
if query_audio:
samples = query_audio.samples
q_audio_queries.append(samples)
q_audio_lengths.append(len(samples))
all_input_ids.append(q_inp_ids)
all_segment_ids.append(q_segment_ids)
all_subtokens_mask.append(q_subtokens_mask)
all_input_mask.append(q_inp_mask)
all_quantities_of_preceding_words.append(q_quantities_of_preceding_words)
all_query_ids.append([q_i] * len(q_inp_ids))
all_is_first.append([True] + [False] * (len(q_inp_ids) - 1))
all_is_last.append([False] * (len(q_inp_ids) - 1) + [True])
if query_audio:
all_audio_queries.append(q_audio_queries)
all_audio_lengths.append(q_audio_lengths)
return (
list(itertools.chain(*all_input_ids)),
list(itertools.chain(*all_segment_ids)),
list(itertools.chain(*all_input_mask)),
list(itertools.chain(*all_subtokens_mask)),
list(itertools.chain(*all_quantities_of_preceding_words)),
list(itertools.chain(*all_query_ids)),
list(itertools.chain(*all_is_first)),
list(itertools.chain(*all_is_last)),
list(itertools.chain(*all_audio_queries)),
list(itertools.chain(*all_audio_lengths)),
)
def _check_max_seq_length_and_margin_and_step(max_seq_length: int, margin: int, step: int):
"""
Checks values of ``max_seq_length``, ``margin``, and ``step``.
Args:
max_seq_length: a segment length with ``[CLS]`` and ``[SEP]`` tokens
margin: a number of input tokens near edges of segments which are not used in punctuation and capitalization
prediction.
step: offset of consequent segments.
Returns:
None
"""
if max_seq_length < 3:
raise ValueError(
f"Parameter `max_seq_length={max_seq_length}` cannot be less than 3 because `max_seq_length` is a length "
f"of a segment with [CLS] and [SEP] tokens."
)
if margin >= (max_seq_length - 2) // 2 and margin > 0 or margin < 0:
raise ValueError(
f"Parameter `margin` has to be not negative and less than `(max_seq_length - 2) // 2`. Don't forget about "
f"CLS and EOS tokens in the beginning and the end of segment. margin={margin}, "
f"max_seq_length={max_seq_length}"
)
if step <= 0:
raise ValueError(f"Parameter `step` has to be positive whereas step={step}")
if step > max_seq_length - 2 - 2 * margin:
logging.warning(
f"Parameter step={step} is too big. It will be reduced to `min(max_seq_length, <maximum query length> + 2) "
f"- 2 - 2 * margin`."
)
def _get_subtokens_and_subtokens_mask(query: str, tokenizer: TokenizerSpec) -> Tuple[List[str], List[bool]]:
"""
Tokenizes input query into subtokens and creates subtokens mask. Subtokens mask is an array of the same length as
subtokens array and contains zeros and ones in which. If element of mask equals 1, then corresponding subtoken in
subtokens array is first subtoken in some word
Args:
query: a string that will be tokenized
tokenizer: an instance of tokenizer
Returns:
subtokens: list of subtokens
subtokens_mask: list of ints
"""
words = query.strip().split()
subtokens = []
subtokens_mask = []
for j, word in enumerate(words):
word_tokens = tokenizer.text_to_tokens(word)
subtokens.extend(word_tokens)
subtokens_mask.append(True)
subtokens_mask.extend([False] * (len(word_tokens) - 1))
return subtokens, subtokens_mask
class BertPunctuationCapitalizationInferDataset(Dataset):
"""
Creates dataset to use during inference for punctuation and capitalization tasks with a pretrained model.
For dataset to use during training with labels, see
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_dataset.BertPunctuationCapitalizationDataset`
and
:class:`~nemo.collections.nlp.data.token_classification.punctuation_capitalization_tarred_dataset.BertPunctuationCapitalizationTarredDataset`.
Parameters ``max_seq_length``, ``step``, ``margin`` are for controlling the way queries are split into segments
which then processed by the model. Parameter ``max_seq_length`` is a length of a segment after tokenization
including special tokens [CLS] in the beginning and [SEP] in the end of a segment. Parameter ``step`` is shift
between consequent segments. Parameter ``margin`` is used to exclude negative effect of subtokens near
borders of segments which have only one side context.
Args:
queries (:obj:`List[str]`): list of sequences.
tokenizer (:obj:`TokenizerSpec`): a tokenizer which was used for model training. It should have properties
``cls_id``, ``sep_id``, ``unk_id``, ``pad_id``.
max_seq_length (:obj:`int`, `optional`, defaults to :obj:`128`): max sequence length which includes [CLS] and
[SEP] tokens
step (:obj:`int`, `optional`, defaults to :obj:`8`): relative shift of consequent segments into which long
queries are split. Long queries are split into segments which can overlap. Parameter ``step`` controls such
overlapping. Imagine that queries are tokenized into characters, ``max_seq_length=5``, and ``step=2``. In
such a case query "hello" is tokenized into segments
``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'l', 'l', 'o', '[SEP]']]``.
margin (:obj:`int`, `optional`, defaults to :obj:`16`): number of subtokens in the beginning and the end of
segments which are not used for prediction computation. The first segment does not have left margin and the
last segment does not have right margin. For example, if input sequence is tokenized into characters,
``max_seq_length=5``, ``step=1``, and ``margin=1``, then query "hello" will be tokenized into segments
``[['[CLS]', 'h', 'e', 'l', '[SEP]'], ['[CLS]', 'e', 'l', 'l', '[SEP]'],
['[CLS]', 'l', 'l', 'o', '[SEP]']]``. These segments are passed to the model. Before final predictions
computation, margins are removed. In the next list, subtokens which logits are not used for final
predictions computation are marked with asterisk: ``[['[CLS]'*, 'h', 'e', 'l'*, '[SEP]'*],
['[CLS]'*, 'e'*, 'l', 'l'*, '[SEP]'*], ['[CLS]'*, 'l'*, 'l', 'o', '[SEP]'*]]``.
"""
@property
def output_types(self) -> Optional[Dict[str, NeuralType]]:
"""Returns neural types of :meth:`collate_fn` output."""
if self.use_audio:
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'quantities_of_preceding_words': NeuralType(('B',), Index()),
'query_ids': NeuralType(('B',), Index()),
'is_first': NeuralType(('B',), BoolType()),
'is_last': NeuralType(('B',), BoolType()),
'features': NeuralType(('B', 'T'), AudioSignal()),
'features_length': NeuralType(('B', 'T'), LengthsType()),
}
return {
'input_ids': NeuralType(('B', 'T'), ChannelType()),
'segment_ids': NeuralType(('B', 'T'), ChannelType()),
'input_mask': NeuralType(('B', 'T'), MaskType()),
'subtokens_mask': NeuralType(('B', 'T'), MaskType()),
'quantities_of_preceding_words': NeuralType(('B',), Index()),
'query_ids': NeuralType(('B',), Index()),
'is_first': NeuralType(('B',), BoolType()),
'is_last': NeuralType(('B',), BoolType()),
}
def __init__(
self,
queries: List[str],
tokenizer: TokenizerSpec,
max_seq_length: int = 64,
step: int = 8,
margin: int = 16,
audio_queries: Optional[Union[List[bytes], List[str]]] = None,
target_sr: Optional[int] = None,
):
features = get_features_infer(
queries=queries,
max_seq_length=max_seq_length,
tokenizer=tokenizer,
step=step,
margin=margin,
audio_queries=audio_queries,
target_sr=target_sr,
)
self.all_input_ids: List[List[int]] = features[0]
self.all_segment_ids: List[List[int]] = features[1]
self.all_input_mask: List[List[int]] = features[2]
self.all_subtokens_mask: List[List[int]] = features[3]
self.all_quantities_of_preceding_words: List[int] = features[4]
self.all_query_ids: List[int] = features[5]
self.all_is_first: List[bool] = features[6]
self.all_is_last: List[bool] = features[7]
self.all_audio_queries: Optional[List[List[float]]] = features[8]
self.all_audio_lengths: Optional[List[List[int]]] = features[9]
self.use_audio = audio_queries is not None
def __len__(self) -> int:
return len(self.all_input_ids)
def collate_fn(
self,
batch: List[
Tuple[
np.ndarray,
np.ndarray,
np.ndarray,
np.ndarray,
int,
int,
bool,
bool,
Optional[np.ndarray],
Optional[np.ndarray],
]
],
) -> Union[
Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any],
Tuple[Tensor, Tensor, Tensor, Tensor, Any, Any, Any, Any, Any, Any],
]:
"""
Collates samples into batches.
Args:
batch (:obj:`List[tuple]`): a list of samples returned by :meth:`__getitem__` method.
Returns:
:obj:`Tuple[torch.Tensor (x4), Tuple[int, ...] (x2), Tuple[bool, ...] (x2)]`: a tuple containing 8
elements:
- ``input_ids`` (:obj:`torch.Tensor`): an integer tensor of shape ``[Batch, Time]`` containing encoded
input text.
- ``segment_ids`` (:obj:`torch.Tensor`): an integer tensor of shape ``[Batch, Time]`` filled with zeros.
- ``input_mask`` (:obj:`torch.Tensor`): a boolean tensor of shape ``[Batch, Time]`` which elements are
``True`` if corresponding token is not a padding token.
- ``subtokens_mask`` (:obj:`torch.Tensor`): a boolean tensor of shape ``[Batch, Time]`` which elements
are ``True`` if corresponding tken is the first token in a word.
- ``quantities_of_preceding_words`` (:obj:`Tuple[int, ...]`): a tuple containing number of words in
a query preceding current segment.
- ``query_ids`` (:obj:`Tuple[int, ...]`): a tuple containing indices of queries to which segments belong.
- ``is_first`` (:obj:`Tuple[bool, ...]`): a tuple booleans which elements are ``True`` if corresponding
segment is the first segment in a query.
- ``is_last`` (:obj:`Tuple[bool, ...]`): a tuple of booleans which elements are ``True`` if corresponding
segment is the last segment in a query.
"""
if not self.use_audio:
inp_ids, segment_ids, inp_mask, st_mask, n_preceding, query_ids, is_first, is_last = zip(*batch)
return (
pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),
n_preceding,
query_ids,
is_first,
is_last,
)
(
inp_ids,
segment_ids,
inp_mask,
st_mask,
n_preceding,
query_ids,
is_first,
is_last,
features,
features_length,
) = zip(*batch)
return (
pad_sequence([torch.tensor(x) for x in inp_ids], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in segment_ids], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in inp_mask], batch_first=True, padding_value=0),
pad_sequence([torch.tensor(x) for x in st_mask], batch_first=True, padding_value=0),
n_preceding,
query_ids,
is_first,
is_last,
pad_sequence([torch.tensor(x) for x in features], batch_first=True, padding_value=0).float(),
torch.tensor(features_length, dtype=torch.long),
)
def __getitem__(
self, idx: int
) -> Union[
Tuple[ndarray, ndarray, ndarray, ndarray, int, int, bool, bool],
Tuple[ndarray, ndarray, ndarray, ndarray, int, int, bool, bool, ndarray, List[int]],
]:
"""
Returns batch used for punctuation and capitalization inference.
Args:
idx (:obj:`int`): a batch index
Returns:
:obj:`Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int, bool, bool]`: a tuple containing:
- ``input_ids`` (:obj:`np.ndarray`): an integer numpy array of shape ``[Time]``. Ids of word
subtokens encoded using tokenizer passed in constructor ``tokenizer`` parameter.
- ``segment_ids`` (:obj:`np.ndarray`): an integer zeros numpy array of shape ``[Time]``. Indices
of segments for BERT model (token types in HuggingFace terminology).
- ``input_mask`` (:obj:`np.ndarray`): a boolean numpy array of shape ``[Time]``. An element of
this array is ``True`` if corresponding token is not padding token.
- ``subtokens_mask`` (:obj:`np.ndarray`): a boolean numpy array of shape ``[Time]``. An element
equals ``True`` if corresponding token is the first token in a word and ``False`` otherwise. For
example, if input query ``"language processing"`` is tokenized into
``["[CLS]", "language", "process", "ing", "SEP"]``, then ``subtokens_mask`` will be
``[False, True, True, False, False]``.
- ``quantities_of_preceding_words`` (:obj:`int`): a number of words preceding current segment in the
query to which the segment belongs. This parameter is used for uniting predictions from adjacent
segments.
- ``query_ids`` (:obj:`int`): an index of query to which the segment belongs
- ``is_first`` (:obj:`bool`): whether a segment is the first segment in a query. The left margin of
the first segment in a query is not removed.
- ``is_last`` (:obj:`bool`): whether a query is the last query in a query. The right margin of the last
segment in a query is not removed.
"""
if not self.use_audio:
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.float32),
np.array(self.all_subtokens_mask[idx]),
self.all_quantities_of_preceding_words[idx],
self.all_query_ids[idx],
self.all_is_first[idx],
self.all_is_last[idx],
)
return (
np.array(self.all_input_ids[idx]),
np.array(self.all_segment_ids[idx]),
np.array(self.all_input_mask[idx], dtype=np.float32),
np.array(self.all_subtokens_mask[idx]),
self.all_quantities_of_preceding_words[idx],
self.all_query_ids[idx],
self.all_is_first[idx],
self.all_is_last[idx],
np.array(self.all_audio_queries[idx], dtype=np.float),
self.all_audio_lengths[idx],
)
| NeMo-main | nemo/collections/nlp/data/token_classification/punctuation_capitalization_infer_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
from typing import Optional
import braceexpand
import numpy as np
import webdataset as wd
from torch.utils.data import Dataset, IterableDataset
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.data_utils import dataset_to_ids
from nemo.utils import logging
__all__ = ['L2RLanguageModelingDataset', 'TarredL2RLanguageModelingDataset']
class L2RLanguageModelingDataset(Dataset):
"""
Dataset for training and evaluating left-to-right language models.
Args:
tokenizer: tokenizer, such as WordTokenizer or CharTokenizer
dataset: path to data
max_seq_length: maximum sequence length (in tokens) of input tensors
batch_step: distance (in tokens) between two successive sequences of
the text. By default, it is equal to max_seq_length which corresponds
to splitting text into disjoint segments covering full dataset
use_cache: bool value, defaults to False. Determines whether the preprocessed,
tokenized dataset should be cached into a pickle file. If true, cache is saved
at the path provided in `dataset`.
"""
def __init__(
self,
tokenizer: TokenizerSpec,
dataset: str,
max_seq_length: Optional[int] = 512,
batch_step: Optional[int] = None,
use_cache: bool = False,
):
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.batch_step = batch_step or self.max_seq_length
ids = dataset_to_ids(dataset, tokenizer, cache_ids=use_cache, add_bos_eos=False)
self.ids = np.array([j for i in ids for j in i])
def __len__(self):
return (len(self.ids) - self.max_seq_length) // self.batch_step
def __getitem__(self, idx):
left = idx * self.batch_step
right = left + self.max_seq_length
src_ids = self.ids[left:right]
labels = self.ids[left + 1 : right + 1]
src_mask = (src_ids != self.tokenizer.pad_id).astype(np.float32)
return src_ids, src_mask, labels
class TarredL2RLanguageModelingDataset(IterableDataset):
"""
A similar Dataset to the L2RLanguageModelingDataset, but which loads tarred tokenized numpy files.
Accepts a single JSON metadata manifest file as well as the path(s) to the tarball(s) containing the wav files.
The manifest should contain information such as the number of shards, the number of tokens in the corpus,
and the number of tokens contained within each shard of the tarfile(s).
Valid formats for the text_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/text.tar' or 'path/to/text_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['text_1.tar', 'text_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
Additionally, please note that the len() of this DataLayer is assumed to be the number of tokens
of the text data. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
text_tar_filepaths: Either a list of tokenized text tarball filepaths, or a
string (can be brace-expandable).
metadata_path (str): Path to the metadata manifest.
tokenizer: tokenizer, such as WordTokenizer or CharTokenizer
dataset: path to data
max_seq_length: maximum sequence length (in tokens) of input tensors
batch_step: distance (in tokens) between two successive sequences of
the text. By default, it is equal to max_seq_length which corresponds
to splitting text into disjoint segments covering full dataset
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
"""
def __init__(
self,
text_tar_filepaths: str,
metadata_path: str,
tokenizer,
max_seq_length: int = 512,
batch_step: int = None,
shuffle_n: int = 1,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 0,
):
super(TarredL2RLanguageModelingDataset, self).__init__()
self.tokenizer = tokenizer
self.max_seq_length = max_seq_length
self.batch_step = batch_step or self.max_seq_length
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(
f"Invalid shard strategy of type {type(shard_strategy)} "
f"{repr(shard_strategy) if len(repr(shard_strategy)) < 100 else repr(shard_strategy)[:100] + '...'}! "
f"Allowed values are: {valid_shard_strategies}."
)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
self.metadata = metadata
if isinstance(text_tar_filepaths, str):
# Replace '(', '[', '<' and '_OP_' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "{")
# Replace ')', ']', '>' and '_CL_' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "}")
if isinstance(text_tar_filepaths, str):
# Brace expand
text_tar_filepaths = list(braceexpand.braceexpand(text_tar_filepaths))
if shard_strategy == 'scatter':
logging.info("All tarred dataset shards will be scattered evenly across all nodes.")
if len(text_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(text_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size})."
)
begin_idx = (len(text_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(text_tar_filepaths) // world_size)
text_tar_filepaths = text_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
else:
raise ValueError(f"Invalid shard strategy ! Allowed values are : {valid_shard_strategies}")
self.tarpath = text_tar_filepaths
# Put together WebDataset
self._dataset = wd.WebDataset(urls=text_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.rename(npy='npy', key='__key__').to_tuple('npy', 'key').map(f=self._build_sample)
def _build_sample(self, tup):
# Load file
npy, filepath = tup
npy = io.BytesIO(npy)
data = np.load(npy) # loads np.int64 vector
npy.close()
# Select random contiguous subsegment
idx = np.random.randint(0, (len(data) - self.max_seq_length) // self.batch_step)
# Slice of data chunk
left = idx * self.batch_step
right = left + self.max_seq_length
data = data[left : right + 1]
# Create batch
src_ids = data[:-1]
labels = data[1:]
src_mask = (src_ids != self.tokenizer.pad_id).astype(np.float32)
return src_ids, src_mask, labels
def __iter__(self):
# We need to wrap an infinite generator since the actual files
# within the tar files contains large chunks of contiguous data.
# This prevents PTL from early exiting the train loop after exhausting
# all of the files in one iteration (though the actual dataset is many
# times larger due to each file containing a large chunk of data).
dl_iter = iter(self._dataset)
while True:
try:
batch = next(dl_iter)
yield batch
except StopIteration:
dl_iter = iter(self._dataset)
continue
def __len__(self):
return (self.metadata['num_text'] - self.max_seq_length) // self.batch_step
| NeMo-main | nemo/collections/nlp/data/language_modeling/l2r_lm_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import array
import os
import pickle
import random
from typing import Dict, List, Optional
import h5py
import numpy as np
from torch.utils.data import DataLoader, DistributedSampler
from tqdm import tqdm
from nemo.collections.nlp.data.data_utils.data_preprocessing import find_newlines, load_data_indices
from nemo.core.classes import Dataset
__all__ = ['BertPretrainingDataset', 'BertPretrainingPreprocessedDataloader']
def load_h5(input_file: str):
return h5py.File(input_file, "r")
class BertPretrainingDataset(Dataset):
"""
Dataset for bert pretraining when using data preprocessing including tokenization
"""
def __init__(
self,
tokenizer: object,
data_file: str,
max_seq_length: Optional[int] = 128,
mask_prob: Optional[float] = 0.15,
short_seq_prob: Optional[float] = 0.1,
seq_a_ratio: Optional[float] = 0.6,
sentence_idx_file: Optional[str] = None,
):
"""
Args:
tokenizer: tokenizer
data_file: path to data
max_seq_length: maximum sequence length of input tensors
mask_probability: proability to mask token
short_seq_prob: probability to create a sequence shorter than max_seq_length
seq_a_ratio: ratio between lengths of first and second sequence
sentence_idx_file: sentence indices file for caching
"""
self.tokenizer = tokenizer
# Loading enormous datasets into RAM isn't always feasible -- for
# example, the pubmed corpus is 200+ GB, which doesn't fit into RAM on
# most computers. To get around this, we store the indices of newlines
# in each file so we can seek to and retrieve sentences immediately
# from main memory when needed during training.
# Try and load sentence indices file if already exists
sentence_indices, sentence_idx_file, data_dir = load_data_indices(
sentence_idx_file, data_file, "sentence_indices"
)
# If sentence indices file doesn't exists, generate and store sentence indices
if sentence_indices is None:
sentence_indices = {}
filenames = [data_file]
for filename in tqdm(filenames):
with open(filename, "rb") as f:
contents = f.read()
newline_indices = find_newlines(contents)
if os.path.isdir(data_dir):
# Only keep the parts of the filepath that are invariant to
# the dataset's location on disk
filename = os.path.basename(filename)
# In python, arrays are much more space-efficient than lists
sentence_indices[filename] = array.array("I", newline_indices)
# Save sentence indices so we don't have to do this again
with open(sentence_idx_file, "wb") as f:
pickle.dump(sentence_indices, f)
corpus_size = 0
empty_files = []
# Find total number of newlines across entire corpus and remove files
# without any newlines
for filename in sentence_indices:
if len(sentence_indices[filename]) <= 1:
empty_files.append(filename)
else:
corpus_size += len(sentence_indices[filename])
for filename in empty_files:
del sentence_indices[filename]
self.corpus_size = corpus_size
self.dataset = data_dir
self.filenames = list(sentence_indices.keys())
self.mask_probability = mask_prob
self.max_seq_length = max_seq_length
self.sentence_indices = sentence_indices
self.vocab_size = self.tokenizer.vocab_size
self.short_seq_prob = short_seq_prob
self.seq_a_ratio = seq_a_ratio
def __len__(self):
return self.corpus_size
def __getitem__(self, idx: int, min_doc_length: Optional[int] = 16):
# Each sequence has three special tokens, as follows:
# tokenizer.cls_token <document a> tokenizer.sep_token <document b> tokenizer.eos_token
num_special_tokens = 3
max_num_tokens = self.max_seq_length - num_special_tokens
target_seq_length = max_num_tokens
if random.random() < self.short_seq_prob:
# TODO: maybe introduce an argument to control this.
target_seq_length = random.randint(2, max_num_tokens)
# prefer the seq_a to be slightly longer than seq_b, 0.6 by default
target_seq_length_a = int(round(target_seq_length * self.seq_a_ratio))
target_seq_length_b = target_seq_length - target_seq_length_a
def get_document(filepath, offset):
# Retrieve a specific line from a file and return as a document
if os.path.isdir(self.dataset):
filepath = os.path.join(self.dataset, filepath)
with open(filepath, "rb") as f:
f.seek(offset)
doc_text = f.readline()[:-1].decode("utf-8", errors="ignore")
document = self.tokenizer.text_to_ids(doc_text)
return document
def match_target_seq_length(
document: str, target_seq_length: int, filename: str, line_idx: int, sentence_indices: Dict[str, dict]
):
# If document is shorter than target sequence length,
# append the next line or take a random line as replacement.
num_lines = len(sentence_indices[filename])
while len(document) < target_seq_length:
if line_idx < (num_lines - 1):
# append the next line
line_idx += 1
else:
# current line is the last line, take a random one
line_idx = random.randrange(num_lines)
document = []
offset = sentence_indices[filename][line_idx]
document += get_document(filename, offset)
return document, line_idx
# Take sequence A from a random file and a random line
a_filename = random.choice(self.filenames)
a_line_idx = random.randrange(len(self.sentence_indices[a_filename]))
a_line_offset = self.sentence_indices[a_filename][a_line_idx]
a_document = get_document(a_filename, a_line_offset)
a_document, a_line_idx = match_target_seq_length(
a_document, target_seq_length_a, a_filename, a_line_idx, self.sentence_indices
)
is_last_line = a_line_idx >= (len(self.sentence_indices[a_filename]) - 1)
# About 50% of the time, B is a random sentence from the corpus
take_random_b = (random.random() < 0.5) or is_last_line
if take_random_b:
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
b_filename = random.choice(self.filenames)
b_line_idx = random.choice(range(len(self.sentence_indices[b_filename])))
if b_filename != a_filename:
break
else:
# Take another line from the same file
b_line_pos = self.sentence_indices[b_filename][b_line_idx]
a_line_pos = self.sentence_indices[a_filename][a_line_idx]
# TODO unclear about the following check
if abs(b_line_pos - a_line_pos) > max_num_tokens:
break
else:
pass
else:
b_filename = a_filename
b_line_idx = a_line_idx + 1
is_next = int(not take_random_b)
b_line_pos = self.sentence_indices[b_filename][b_line_idx]
b_document = get_document(b_filename, b_line_pos)
b_document, b_line_idx = match_target_seq_length(
b_document, target_seq_length_b, b_filename, b_line_idx, self.sentence_indices
)
def truncate_seq_pair(a, b, max_num_tokens):
# Truncates a pair of sequences to a maximum sequence length
while (len(a) + len(b)) > max_num_tokens:
# Truncate the longer sequence
if len(a) > len(b):
trunc_document = a
else:
trunc_document = b
if len(trunc_document) <= 1:
raise ValueError(
"Input text corpora probably too small. "
"Failed to truncate sequence pair to "
"maximum sequence legnth."
)
# Randomly truncate from the front or the back
if random.random() < 0.5:
del trunc_document[0]
else:
trunc_document.pop()
truncate_seq_pair(a_document, b_document, max_num_tokens)
output_ids = (
[self.tokenizer.cls_id] + a_document + [self.tokenizer.sep_id] + b_document + [self.tokenizer.eos_id]
)
input_ids, output_mask = self.mask_ids(output_ids)
input_mask = np.zeros(self.max_seq_length, dtype=np.long)
input_mask[: len(input_ids)] = 1
input_type_ids = np.zeros(self.max_seq_length, dtype=np.int)
input_type_ids[len(a_document) + 2 : len(output_ids) + 1] = 1
padding_length = max(0, self.max_seq_length - len(input_ids))
if padding_length > 0:
input_ids.extend([self.tokenizer.pad_id] * padding_length)
output_ids.extend([self.tokenizer.pad_id] * padding_length)
output_mask.extend([0] * padding_length)
# TODO: wrap the return value with () for consistent style.
return (
np.array(input_ids),
input_type_ids,
np.array(input_mask, dtype=np.long),
np.array(output_ids),
np.array(output_mask, dtype=np.float32),
is_next,
)
def mask_ids(self, ids: List[int]):
"""
Args:
ids: list of token ids representing a chunk of text
Returns:
masked_ids: list of input tokens with some of the entries masked
according to the following protocol from the original BERT paper:
each token is masked with a probability of 15% and is replaced with
1) the [MASK] token 80% of the time,
2) random token 10% of the time,
3) the same token 10% of the time.
output_mask: list of binary variables which indicate what tokens has
been masked (to calculate the loss function for these tokens only)
"""
# Whole-word masking by default, as it gives better performance.
cand_indexes = [[ids[0]]]
for tid in ids[1:]:
token = self.tokenizer.ids_to_tokens([tid])[0]
is_suffix = token.startswith('\u2581')
if is_suffix:
# group together with its previous token to form a whole-word
cand_indexes[-1].append(tid)
else:
cand_indexes.append([tid])
masked_ids, output_mask = [], []
mask_id = self.tokenizer.token_to_id("[MASK]")
for word_ids in cand_indexes:
is_special = (word_ids[0] == self.tokenizer.cls_id) or (word_ids[0] == self.tokenizer.sep_id)
if is_special or (random.random() > self.mask_probability):
output_mask.extend([0] * len(word_ids))
masked_ids.extend(word_ids)
else:
output_mask.extend([1] * len(word_ids))
p = random.random()
# for 80%, replace with mask
if p < 0.8:
masked_ids.extend([mask_id] * len(word_ids))
# for 10%, replace by a random token
elif p < 0.9:
for _ in word_ids:
# randomly select a valid word
random_word = random.randrange(self.vocab_size)
while random_word in (self.tokenizer.cls_id, self.tokenizer.sep_id):
random_word = random.randrange(self.vocab_size)
masked_ids.append(random_word)
# for 10%, use same token
else:
masked_ids.extend(word_ids)
return masked_ids, output_mask
class BertPretrainingPreprocessedDataset(Dataset):
"""
Dataset for already preprocessed data.
"""
def __init__(self, input_file: str, max_predictions_per_seq: int):
"""
Args:
input_file: data file in hdf5 format with preprocessed data in array format
max_predictions_per_seq: maximum number of masked tokens per sequence. Need to be consistent with data in input file.
"""
self.input_file = input_file
self.max_predictions_per_seq = max_predictions_per_seq
f = load_h5(input_file)
keys = [
'input_ids',
'input_mask',
'segment_ids',
'masked_lm_positions',
'masked_lm_ids',
'next_sentence_labels',
]
self.inputs = [np.asarray(f[key][:]) for key in keys]
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index: int):
[input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, next_sentence_labels] = [
input[index].astype(np.int64) for input in self.inputs
]
output_mask = np.zeros_like(input_ids)
output_ids = input_ids.copy()
index = self.max_predictions_per_seq
padded_mask_indices = (masked_lm_positions == 0).nonzero()
if len(padded_mask_indices[0]) != 0:
index = padded_mask_indices[0][0]
output_mask[masked_lm_positions[:index]] = 1.0
output_ids[masked_lm_positions[:index]] = masked_lm_ids[:index]
# input_mask = np.asarray(input_mask, dtype=np.float32)
# output_mask = np.asarray(output_mask, dtype=np.float32)
return (input_ids, segment_ids, input_mask, output_ids, output_mask, next_sentence_labels)
class BertPretrainingPreprocessedDataloader(DataLoader):
"""
Dataloader for already preprocessed data in hdf5 files that is already in the format expected by BERT model.
"""
def __init__(self, data_files: List[str], max_predictions_per_seq: int, batch_size: int, seed: Optional[int] = 42):
"""
Args:
data_files: list of data files in hdf5 format with preprocessed data in array format
max_predictions_per_seq: maximum number of masked tokens per sequence. Need to be consistent with data in input file.
batch_size: batch size per gpu per forward pass
seed: seed to ensure each gpu process opens the same data file in each iteration
"""
super().__init__(None, batch_size=batch_size)
self.random = random.Random(seed)
self.data_files = data_files
self.max_predictions_per_seq = max_predictions_per_seq
# def __len__(self):
# return sum([len(load_h5(data_file)['input_ids']) for data_file in self.data_files])//(self.batch_size)
def __iter__(self):
self.random.shuffle(self.data_files)
for data_file in self.data_files:
train_data = BertPretrainingPreprocessedDataset(
input_file=data_file, max_predictions_per_seq=self.max_predictions_per_seq
)
train_sampler = DistributedSampler(train_data)
# print("---")
# print(os.getpid(), train_sampler.rank, train_sampler.num_replicas, train_sampler.num_samples)
# print("---")
train_dataloader = DataLoader(
dataset=train_data, sampler=train_sampler, batch_size=self.batch_size, shuffle=False,
)
for x in train_dataloader:
yield x
| NeMo-main | nemo/collections/nlp/data/language_modeling/lm_bert_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.language_modeling.l2r_lm_dataset import L2RLanguageModelingDataset
from nemo.collections.nlp.data.language_modeling.lm_bert_dataset import (
BertPretrainingDataset,
BertPretrainingPreprocessedDataloader,
)
from nemo.collections.nlp.data.language_modeling.sentence_dataset import SentenceDataset, TarredSentenceDataset
| NeMo-main | nemo/collections/nlp/data/language_modeling/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import get_samples_mapping
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import JSONLMemMapDataset, TextMemMapDataset
from nemo.core.classes import Dataset
class T0Dataset(Dataset):
def __init__(
self,
file_path: str,
tokenizer: TokenizerSpec,
max_src_seq_length: int = 512,
max_tgt_seq_length: int = 512,
replace_bos_with_pad: bool = False,
add_bos_to_input: bool = False,
add_eos_to_input: bool = False,
max_num_samples: int = None,
seed: int = 1234,
):
"""
src_file_name: Path to a JSONL T0 dataset file.
tokenizer: Tokenizer for the dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
max_src_seq_length: Maximum length of the source sequences. Lines above this length will be truncated.
max_tgt_seq_length: Maximum length of the target sequences. Lines above this length will be truncated.
replace_bos_with_pad: Whether the decoder starts with a pad token. This is needed for Google's T5 models that may be converted from HF.
add_bos_to_input: Whether to add the bos_id to the input sequence.
add_eos_to_input: Whether to add the eos_id to the input sequence.
seed: Random seed for data shuffling.
max_num_samples: Maximum number of samples to load. This can be > dataset length if you want to oversample data. If None, all samples will be loaded.
"""
self.tokenizer = tokenizer
self.file_path = file_path
self.max_src_seq_length = max_src_seq_length
self.max_tgt_seq_length = max_tgt_seq_length
self.replace_bos_with_pad = replace_bos_with_pad
self.add_bos_to_input = add_bos_to_input
self.add_eos_to_input = add_eos_to_input
self.max_num_samples = max_num_samples
self.seed = seed
self.indexed_dataset = JSONLMemMapDataset(dataset_paths=[file_path], tokenizer=None, header_lines=0)
# Will be None after this call if `max_num_samples` is None
self._build_samples_mapping()
def _build_samples_mapping(self):
if self.max_num_samples is not None:
# This means max src and max tgt sequence length need to be the same
if self.max_src_seq_length != self.max_tgt_seq_length:
raise ValueError(
f"max_src_seq_length ({self.max_src_seq_length}) != max_tgt_seq_length ({self.max_tgt_seq_length}). This is needed for max_samples based training for now."
)
self.samples_mapping = get_samples_mapping(
indexed_dataset=self.indexed_dataset,
data_prefix=self.file_path,
num_epochs=None,
max_num_samples=self.max_num_samples,
max_seq_length=self.max_src_seq_length - 2,
short_seq_prob=0,
seed=self.seed,
name=self.file_path.split('/')[-1],
binary_head=False,
)
else:
self.samples_mapping = None
def __len__(self):
if self.max_num_samples is None:
return len(self.indexed_dataset)
else:
return len(self.samples_mapping)
def __getitem__(self, idx):
if isinstance(idx, np.int64):
idx = idx.item()
if self.samples_mapping is not None:
assert idx < len(self.samples_mapping)
idx, _, _ = self.samples_mapping[idx]
if isinstance(idx, np.uint32):
idx = idx.item()
assert idx < len(self.indexed_dataset)
example = self.indexed_dataset[idx]
return self._process_example(example)
def _process_example(self, example):
"""
Process a single example from the dataset into IDs and other T0-related metadata.
"""
tokenized_input = self.tokenizer.text_to_ids(example['input'])
tokenized_output = self.tokenizer.text_to_ids(example['output'])
offset = 0
if self.add_bos_to_input:
offset += 1
if self.add_eos_to_input:
offset += 1
if len(tokenized_input) > self.max_src_seq_length - offset:
tokenized_input = tokenized_input[: self.max_src_seq_length - offset]
if len(tokenized_output) > self.max_tgt_seq_length - 2:
tokenized_output = tokenized_output[: self.max_tgt_seq_length - 2]
bos_id = self.tokenizer.pad_id if self.replace_bos_with_pad else self.tokenizer.bos_id
if self.add_bos_to_input:
tokenized_input = [bos_id] + tokenized_input
if self.add_eos_to_input:
tokenized_input = tokenized_input + [self.tokenizer.eos_id]
target = [bos_id] + tokenized_output + [self.tokenizer.eos_id]
processed_example = {
'text_enc': tokenized_input,
'text_dec': target[:-1],
'labels': target[1:],
}
# Process optional args:
if 'chunked_idx' in example:
original = ""
template = ""
for item in example['chunked_idx'].split(', '):
item = item.split('-')
if item[0] == "original_text":
original += example['input'][int(item[1]) : int(item[2])]
elif item[0] == "template":
template += example['input'][int(item[1]) : int(item[2])]
else:
raise ValueError(f"Unknown chunk type: {item[0]}")
additional_args = {
'original': self.tokenizer.text_to_ids(original),
'template': self.tokenizer.text_to_ids(template),
'prompt': self.tokenizer.text_to_ids(example['prompt']),
}
processed_example.update(additional_args)
if 'choices' in example:
additional_args = {
'choices': [self.tokenizer.text_to_ids(choice) for choice in example['choices']],
}
processed_example.update(additional_args)
if 'task_name_with_prompt' in example:
additional_args = {
'task_name_with_prompt': self.tokenizer.text_to_ids(example['task_name_with_prompt']),
}
processed_example.update(additional_args)
return processed_example
def _maybe_cast_to_list(self, x):
if isinstance(x, np.ndarray):
return [item.tolist() for item in x]
return x
def _collate_item(self, item):
item = self._maybe_cast_to_list(item)
max_length = max([len(x) for x in item]) if item else 0
item = [x + [self.tokenizer.pad_id] * (max_length - len(x)) for x in item]
return item
def collate_fn(self, batch):
enc_query = [item['text_enc'] for item in batch]
dec_input = [item['text_dec'] for item in batch]
labels = [item['labels'] for item in batch]
enc_query = torch.LongTensor(self._collate_item(enc_query))
dec_input = torch.LongTensor(self._collate_item(dec_input))
loss_mask = torch.LongTensor(
[([1] * (len(item))) + ([0] * (dec_input.size(1) - len(item))) for item in labels]
)
labels = torch.LongTensor(self._collate_item(labels))
enc_mask = (enc_query != self.tokenizer.pad_id).long()
dec_mask = (dec_input != self.tokenizer.pad_id).long()
processed_example = {
'text_enc': enc_query,
'text_dec': dec_input,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
# Collate additional args if present in the batch.
if 'original' in batch[0]:
original = self._collate_item([item['original'] for item in batch])
processed_example['original'] = torch.LongTensor(original)
if 'template' in batch[0]:
template = self._collate_item([item['template'] for item in batch])
processed_example['template'] = torch.LongTensor(template)
if 'prompt' in batch[0]:
prompt = self._collate_item([item['prompt'] for item in batch])
processed_example['prompt'] = torch.LongTensor(prompt)
if 'task_name_with_prompt' in batch[0]:
task_name_with_prompt = self._collate_item([item['task_name_with_prompt'] for item in batch])
processed_example['task_name_with_prompt'] = torch.LongTensor(task_name_with_prompt)
return processed_example
| NeMo-main | nemo/collections/nlp/data/language_modeling/t0_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import multiprocessing as mp
import os
import pickle
import time
from functools import partial
from typing import Callable, List, Optional, Type
import numpy as np
import torch
from nemo.core import Dataset
from nemo.utils import AppState, logging
__all__ = ["TextMemMapDataset", "CSVMemMapDataset", "build_index_files"]
__idx_version__ = "0.2" # index file version
__idx_suffix__ = "idx" # index file suffix
def _build_index_from_memdata(fn, newline_int):
"""
Build index of delimiter positions between samples in memmap.
Can be provided externally.
Returns a 1D array of ints.
"""
# use memmap to read file
mdata = np.memmap(fn, dtype=np.uint8, mode="r")
# find newline positions
midx = np.where(mdata == newline_int)[0]
midx_dtype = midx.dtype
# make sure to account for all data
midx = midx.tolist()
# add last item in case there is no new-line at the end of the file
if (len(midx) == 0) or (midx[-1] + 1 != len(mdata)):
midx = midx + [len(mdata) + 1]
# remove empty lines from end of file
while len(midx) > 1 and (midx[-1] - midx[-2]) < 2:
midx.pop(-1)
midx = np.asarray(midx, dtype=midx_dtype)
# free memmap
mdata._mmap.close()
del mdata
return midx
class TextMemMapDataset(Dataset):
"""
Allow per-line lazy access to multiple text files using numpy memmap.
"""
def __init__(
self,
dataset_paths: List[str],
newline_int: Optional[int] = 10,
header_lines: Optional[int] = 0,
workers: Optional[int] = None,
tokenizer: Optional[Type["TokenizerSpec"]] = None,
build_index_fn: Optional[Callable[[str, Optional[int]], bool]] = _build_index_from_memdata,
sort_dataset_paths: Optional[bool] = True,
index_mapping_dir: Optional[str] = None,
):
"""
Args:
dataset_paths: list of JSONL file paths.
newline_int: ASCII code to use to interpret newlines in file.
header_lines: number of header lines in JSON files.
workers: number of workers to use for creating index files.
tokenizer: tokenizer to use to convert text to tokens.
build_index_fn: a callable build_index_fn(fn, newline_int) -> midx [np.array]
that returns the index of newlines in a file fn must be pickleable
(to be used in multiprocessing.Pool.map).
sort_dataset_paths: whether to sort datasets by paths.
index_mapping_dir: directory to save the index mapping to.
If None, will write to the same folder as the dataset.
"""
super().__init__()
self.mdata_midx_list = []
# Make a single string into a list
if isinstance(dataset_paths, str):
dataset_paths = [dataset_paths]
if len(dataset_paths) < 1:
raise ValueError("files_list must contain at leat one file name")
self._newline_int = newline_int
# skip first N lines
self._header_lines = header_lines
self._files_list = dataset_paths
self._worker = workers
self.tokenizer = tokenizer
self._sort_dataset_paths = sort_dataset_paths
if sort_dataset_paths:
self._files_list = sorted(self._files_list)
logging.info(f"Building data files")
# load all files into memmap
is_distributed = torch.distributed.is_available() and torch.distributed.is_initialized()
if not is_distributed or (is_distributed and torch.distributed.get_rank() == 0):
# Create index files on global rank 0.
build_index_files(
dataset_paths,
newline_int,
workers=self._worker,
build_index_fn=build_index_fn,
index_mapping_dir=index_mapping_dir,
)
if is_distributed:
torch.distributed.barrier()
if is_distributed and AppState().local_rank == 0:
# If we are in a distributed multi-node set-up and index files are not stored on
# a shared filesystem, then the index files created on global rank 0 are only
# accessible to the workers on that node.
#
# Two cases may occur here:
#
# 1. case of a shared filesystem, or global_rank==0: the index files are present in
# the locally available filesystem, calling build_index_files() again is a no-op.
# 2. case of a non-shared filesystem, and global_rank>0: the index files are not
# present in the locally available filesystem, calling build_index_files() again
# will create them.
#
# Outcome in all cases: all nodes have access to the index files in their filesystem.
build_index_files(
dataset_paths,
newline_int,
workers=self._worker,
build_index_fn=build_index_fn,
index_mapping_dir=index_mapping_dir,
)
if is_distributed:
torch.distributed.barrier()
logging.info(f"Loading data files")
start_time = time.time()
mdata_midx_list = [self.load_file(fn, index_mapping_dir) for fn in self._files_list]
logging.info(
f"Time loading {len(mdata_midx_list)} mem-mapped files: {datetime.timedelta(seconds=time.time() - start_time)}"
)
logging.info("Computing global indices")
midx_bins = np.cumsum([(len(midx) - header_lines) for _, midx in mdata_midx_list])
self.midx_bins = midx_bins
self.mdata_midx_list = mdata_midx_list
# figure out size of the dataset
self._size = self.midx_bins[-1]
def __del__(self):
if self.mdata_midx_list:
for mdata, midx in self.mdata_midx_list:
mdata._mmap.close()
def __len__(self):
return self._size
def __getitem__(self, idx):
"""
Return a string from binary memmap
"""
if (idx >= len(self)) or (idx < 0):
raise IndexError(f"Index {idx} if out of dataset range with {len(self)} samples")
# Identify the file containing the record
file_id = np.digitize(idx, self.midx_bins, right=False)
base_idx = self.midx_bins[file_id - 1] if file_id > 0 else 0
file_idx = idx - base_idx + self._header_lines
mdata, midx = self.mdata_midx_list[file_id]
# load sample
if file_idx == 0:
i = 0
j = midx[0]
else:
i = midx[file_idx - 1] + 1 # ignore newline
j = midx[file_idx]
# fetch sample from memmap
try:
sample = self._fetch_sample_from_memmap(mdata, i, j)
except Exception as e:
logging.error(f"Error while fetching sample from memmap: {e}")
logging.error(f"file_id: {file_id}, file_idx: {file_idx}, i: {i}, j: {j}")
raise e
# parse raw text (e.g., tokenize)
try:
data = self._build_data_from_text(sample)
except Exception as e:
logging.error(
f"Error while building data from text, possible issue with sample expected format (see offending sample below): {e}"
)
logging.error(f"sample: {sample}, file_id: {file_id}, file_idx: {file_idx}, i: {i}, j: {j}")
raise e
return data
def _fetch_sample_from_memmap(self, mdata, i, j):
"""Fetchs the text sample. Can be overriden by child-classes to support loading of partial samples and alternative decode methods"""
# load text sample by slicing memmap data[i:j]
text = mdata[i:j].tobytes().decode("utf-8")
return text
def _build_data_from_text(self, text):
"""Allows child-classes to modify the parsing of raw text, prior to tokenization"""
# tokenize text if tokenizer is given
if self.tokenizer is not None:
data = self.tokenizer.text_to_ids(text)
else:
data = text
return data
def load_file(self, fn, index_mapping_dir: Optional[str] = None):
"""
Loads a text file as np.int8.
Returns:
mdata - memorymap of np.int8
midx - indices pointing to the end-of-line (or end of file) position
size - number of lines in file
"""
logging.info(f"Loading {fn}")
idx_fn = _index_fn(fn, index_mapping_dir)
# create data map
mdata = np.memmap(fn, dtype=np.uint8, mode="r")
if _index_file_exists(idx_fn):
# load index file into memory map
midx = np.load(idx_fn + ".npy", allow_pickle=True, mmap_mode="r")
# test for header
if len(midx) < self._header_lines:
raise RuntimeError(f"Missing header, expected {self._header_lines} header lines")
# load meta info
idx_info_dict = pickle.load(open(idx_fn + ".info", "rb"))
# test for mismatch in expected newline_int
if "newline_int" in idx_info_dict:
newline_int = idx_info_dict["newline_int"]
if self._newline_int != newline_int:
logging.warning(
f"Mismatch in newline_int, expected = {self._newline_int} but loaded {newline_int}"
)
# test for version mismatch (useful to force recreation of index files)
idx_version = idx_info_dict.get("version", "0.0")
if __idx_version__ != idx_version:
raise RuntimeError(
f"Version mismatch: Please delete existing '.{__idx_suffix__}' files. Expected version = {__idx_version__}, but file version = {idx_version}. File path = {idx_fn}"
)
else:
raise ValueError(
f"Memory Map for {fn} is not found, missing one or more of files: {idx_fn}.{{.npy,.info}}"
)
return (mdata, midx)
class CSVMemMapDataset(TextMemMapDataset):
"""
Allow per-line lazy access to multiple text files using numpy memmap.
"""
def __init__(
self,
dataset_paths: List[str],
newline_int: Optional[int] = 10,
header_lines: Optional[int] = 0,
workers: Optional[int] = None,
tokenizer: Optional[Type["TokenizerSpec"]] = None,
sort_dataset_paths: Optional[bool] = True,
data_col=1,
data_sep=",",
index_mapping_dir: Optional[str] = None,
):
"""
Args:
dataset_paths: list of JSONL file paths.
newline_int: ASCII code to use to interpret newlines in file.
header_lines: number of header lines in JSON files.
workers: number of workers to use for creating index files.
tokenizer: tokenizer to use to convert text to tokens.
sort_dataset_paths: whether to sort datasets by paths.
data_col: index of data column.
data_sep: data separator.
index_mapping_dir: directory to save the index mapping to.
If None, will write to the same folder as the dataset.
"""
super().__init__(
dataset_paths=dataset_paths,
newline_int=newline_int,
header_lines=header_lines,
workers=workers,
tokenizer=tokenizer,
sort_dataset_paths=sort_dataset_paths,
index_mapping_dir=index_mapping_dir,
)
self._data_col = data_col
self._data_sep = data_sep
def _build_data_from_text(self, text):
"""Return a CSV field from text"""
# get CSV field
text = text.split(self._data_sep)[self._data_col]
# tokenize
return super()._build_data_from_text(text)
class CSVFieldsMemmapDataset(TextMemMapDataset):
"""
Allow per-line lazy access to multiple csv files using numpy memmap.
Returns a dictionary with multiple fields.
"""
def __init__(
self,
dataset_paths,
newline_int=10,
header_lines=1,
workers=None,
tokenizer=None,
sort_dataset_paths=True,
data_sep=',',
data_fields={"data": 0},
index_mapping_dir: Optional[str] = None,
):
"""
Args:
dataset_paths: list of csv file paths to read data from
newline_int: ASCII code to use to interpret newlines in file.
header_lines: number of header lines in csv files.
workers: number of workers to use for creating index files.
tokenizer: tokenizer to use to convert text to tokens.
sort_dataset_paths: whether to sort datasets by paths.
data_sep: data separator.
data_fields: dict of field names and their corresponding column indices
index_mapping_dir: directory to save the index mapping to.
If None, will write to the same folder as the dataset.
"""
super().__init__(
dataset_paths=dataset_paths,
newline_int=newline_int,
header_lines=header_lines,
workers=workers,
tokenizer=tokenizer,
sort_dataset_paths=sort_dataset_paths,
index_mapping_dir=index_mapping_dir,
)
self._data_fields = data_fields
self._data_sep = data_sep
def _build_data_from_text(self, text: str):
"""
"""
_build_data_from_text = super()._build_data_from_text
data = {}
text_fields = text.split(self._data_sep)
for field_name, field_idx in self._data_fields.items():
data[field_name] = _build_data_from_text(text_fields[field_idx])
return data
class JSONLMemMapDataset(TextMemMapDataset):
"""
Memory-mapped iteration over a JSONL file.
"""
def __init__(
self,
dataset_paths: List[str],
newline_int: Optional[int] = 10,
header_lines: Optional[int] = 0,
workers: Optional[int] = None,
tokenizer: Optional[Type["TokenizerSpec"]] = None,
sort_dataset_paths: Optional[bool] = True,
index_mapping_dir: Optional[str] = None,
):
"""
Args:
dataset_paths: list of JSONL file paths.
newline_int: ASCII code to use to interpret newlines in file.
header_lines: number of header lines in JSON files.
workers: number of workers to use for creating index files.
tokenizer: tokenizer to use to convert text to tokens.
sort_dataset_paths: whether to sort datasets by paths.
index_mapping_dir: directory to save the index mapping to.
If None, will write to the same folder as the dataset.
"""
super().__init__(
dataset_paths=dataset_paths,
newline_int=newline_int,
header_lines=header_lines,
workers=workers,
tokenizer=tokenizer,
sort_dataset_paths=sort_dataset_paths,
index_mapping_dir=index_mapping_dir,
)
def _build_data_from_text(self, text):
"""Return a dictionary of data based on a single JSON line."""
try:
record = json.loads(text)
except Exception as e:
logging.error(f"Exception: {e}")
logging.error(f"datapoint: {text}")
raise e
return record
def _index_file_exists(idx_fn):
"""Helper function to test if index file exists"""
if os.path.exists(idx_fn + ".npy") and os.path.exists(idx_fn + ".info"):
return True
else:
return False
def _index_fn(fn: str, index_mapping_dir: str) -> str:
"""Return base file name of index files.
This returns the base file name associated with specified index
files. This base name is the base on top of which suffixes
like .npy or .info are added.
The parent directory is created if it does not already exist.
fn may be specified in multiple ways:
1. file name: data.jsonl,
2. relative path to a file: relative/path/to/data.jsonl,
3. absolute path to a file: /absolute/path/to/data.jsonl.
This function returns paths in the pattern of:
1. /path/to/input_mapping_dir/data.jsonl.idx
2. /path/to/input_mapping_dir/relative/path/to/data.jsonl.idx
3. /path/to/input_mapping_dir/absolute/path/to/data.jsonl.idx
Args:
fn: filename to get base name for.
index_mapping_dir: directory to save the index mapping to.
If None, will write to the same folder as the dataset.
"""
if index_mapping_dir:
# Remove leading "/" and "..".
while fn.startswith(("/", "..")):
if fn.startswith(".."):
fn = fn.lstrip("..")
if fn.startswith("/"):
fn = fn.lstrip("/")
idx_fn = f"{os.path.join(index_mapping_dir, fn)}.{__idx_suffix__}"
# Create parent directory if needed.
os.makedirs(os.path.dirname(idx_fn), exist_ok=True)
else:
idx_fn = f"{fn}.{__idx_suffix__}"
return idx_fn
def _build_memmap_index_files(newline_int, build_index_fn, fn, index_mapping_dir: str):
"""Helper function to build an index file"""
idx_fn = _index_fn(fn, index_mapping_dir)
# create data map
if _index_file_exists(idx_fn):
return False
else:
logging.info(f"Building indexing for fn = {fn}")
# find all newline positions
midx = build_index_fn(fn, newline_int)
# validate midx
midx = np.asarray(midx)
if not np.issubdtype(midx.dtype, np.integer):
raise TypeError(f"midx must be an integer array, but got type = {midx.dtype}")
# create e metadata file
data = dict(newline_int=newline_int, version=__idx_version__)
# save index as numpy array to enable memmap reading
logging.info(f"Saving idx file = {idx_fn}.npy")
np.save(idx_fn + ".npy", midx, allow_pickle=True)
logging.info(f"Saving metadata file = {idx_fn}.info")
pickle.dump(data, open(idx_fn + ".info", "wb"))
return True
def build_index_files(
dataset_paths, newline_int, workers=None, build_index_fn=_build_index_from_memdata, index_mapping_dir: str = None,
):
"""Auxiliary method to build multiple index files"""
if len(dataset_paths) < 1:
raise ValueError("files_list must contain at leat one file name")
if workers is None:
workers = max(1, os.cpu_count() // 2)
logging.info(f"Processing {len(dataset_paths)} data files using {workers} workers")
# load all files into memmap
start_time = time.time()
with mp.Pool(workers) as p:
build_status = p.map(
partial(_build_memmap_index_files, newline_int, build_index_fn, index_mapping_dir=index_mapping_dir,),
dataset_paths,
)
logging.info(
f"Time building {sum(build_status)} / {len(build_status)} mem-mapped files: {datetime.timedelta(seconds=time.time() - start_time)}"
)
| NeMo-main | nemo/collections/nlp/data/language_modeling/text_memmap_dataset.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pytorch Dataset with sentences packed into batches by length."""
import io
import json
import logging
import pickle
from collections import OrderedDict
from typing import Any
import braceexpand
import numpy as np
import webdataset as wd
from torch.utils.data import IterableDataset
from nemo.collections.nlp.data.data_utils.data_preprocessing import dataset_to_ids
from nemo.core import Dataset
__all__ = ['SentenceDataset', 'TarredSentenceDataset']
class SentenceDataset(Dataset):
def __init__(
self,
tokenizer: Any,
dataset: Any,
tokens_in_batch: int = 1024,
clean: bool = False,
cache_ids: bool = False,
max_seq_length: int = 512,
min_seq_length: int = 1,
):
self.tokenizer = tokenizer
self.tokens_in_batch = tokens_in_batch
ids = dataset_to_ids(dataset, tokenizer, cache_ids=cache_ids)
if clean:
ids = self.clean(ids, max_tokens=max_seq_length, min_tokens=min_seq_length)
self.batch_sent_ids, self.batch_elem_lengths = self.pack_data_into_batches(ids)
self.batches = self.pad_batches(ids)
def __len__(self):
return len(self.batches)
def __getitem__(self, idx):
ids = self.batches[idx]
mask = (ids != self.tokenizer.pad_id).astype(np.int32)
return ids, mask
def pad_batches(self, ids):
"""
Augments source and target ids in the batches with padding symbol
to make the lengths of all sentences in the batches equal.
"""
batches = []
for batch_elem_len, batch_sent_ids in zip(self.batch_elem_lengths, self.batch_sent_ids):
batch = self.tokenizer.pad_id * np.ones((len(batch_sent_ids), batch_elem_len), dtype=np.int)
for i, sentence_idx in enumerate(batch_sent_ids):
batch[i][: len(ids[sentence_idx])] = ids[sentence_idx]
batches.append(batch)
return batches
def pack_data_into_batches(self, ids):
"""
Takes two lists of source and target sentences, sorts them, and packs
into batches to minimize the use of padding tokens. Returns a list of
batches where each batch contains indices of sentences included into it
"""
# create buckets sorted by the number of src tokens
# each bucket is also sorted by the number of tgt tokens
buckets = {}
for i, line_ids in enumerate(ids):
len_ = len(line_ids)
if len_ not in buckets:
buckets[len_] = [i]
else:
buckets[len_].append(i)
for b_idx in buckets:
buckets[b_idx] = sorted(buckets[b_idx])
buckets = OrderedDict(sorted(buckets.items()))
batches = []
batch_elem_lengths = []
curr_batch = []
len_of_longest_sent = 0
for sent_len, bucket in buckets.items():
for sent_i in bucket:
if sent_len * (len(curr_batch) + 1) > self.tokens_in_batch:
if not curr_batch:
raise ValueError(
f"The limitation on number of tokens in batch {self.tokens_in_batch} is too strong."
f"Several sentences contain {sent_len} tokens."
)
batches.append(curr_batch)
batch_elem_lengths.append(sent_len)
curr_batch = []
curr_batch.append(sent_i)
len_of_longest_sent = sent_len
if curr_batch:
batches.append(curr_batch)
batch_elem_lengths.append(len_of_longest_sent)
return batches, batch_elem_lengths
def clean(self, ids, max_tokens=None, min_tokens=None):
"""
Cleans source and target sentences to get rid of noisy data.
Specifically, a pair of sentences is removed if
-- either source or target is longer than *max_tokens*
-- either source or target is shorter than *min_tokens*
-- absolute difference between source and target is larger than
*max_tokens_diff*
-- one sentence is *max_tokens_ratio* times longer than the other
"""
ids_ = []
for i in range(len(ids)):
len_ = len(ids[i])
if (max_tokens is not None and len_ > max_tokens) or (min_tokens is not None and len_ < min_tokens):
continue
ids_.append(ids[i])
return ids_
class TarredSentenceDataset(IterableDataset):
"""
A similar Dataset to the SentenceDataset, but which loads tarred tokenized pickle files.
Accepts a single JSON metadata file containing the total number of batches
as well as the path(s) to the tarball(s) containing the wav files.
Valid formats for the text_tar_filepaths argument include:
(1) a single string that can be brace-expanded, e.g. 'path/to/text.tar' or 'path/to/text_{1..100}.tar.gz', or
(2) a list of file paths that will not be brace-expanded, e.g. ['text_1.tar', 'text_2.tar', ...].
Note: For brace expansion in (1), there may be cases where `{x..y}` syntax cannot be used due to shell interference.
This occurs most commonly inside SLURM scripts. Therefore we provide a few equivalent replacements.
Supported opening braces - { <=> (, [, < and the special tag _OP_.
Supported closing braces - } <=> ), ], > and the special tag _CL_.
For SLURM based tasks, we suggest the use of the special tags for ease of use.
See the WebDataset documentation for more information about accepted data and input formats.
If using multiple processes the number of shards should be divisible by the number of workers to ensure an
even split among workers. If it is not divisible, logging will give a warning but training will proceed.
Additionally, please note that the len() of this DataLayer is assumed to be the number of tokens
of the text data. An incorrect manifest length may lead to some DataLoader issues down the line.
Args:
text_tar_filepaths: Either a list of tokenized text tarball filepaths, or a
string (can be brace-expandable).
metadata_path (str): Path to the metadata manifest.
encoder_tokenizer: Autokenizer wrapped BPE tokenizer model, such as YTTM
decoder_tokenizer: Autokenizer wrapped BPE tokenizer model, such as YTTM
shuffle_n (int): How many samples to look ahead and load to be shuffled.
See WebDataset documentation for more details.
Defaults to 0.
shard_strategy (str): Tarred dataset shard distribution strategy chosen as a str value during ddp.
- `scatter`: The default shard strategy applied by WebDataset, where each node gets
a unique set of shards, which are permanently pre-allocated and never changed at runtime.
- `replicate`: Optional shard strategy, where each node gets all of the set of shards
available in the tarred dataset, which are permanently pre-allocated and never changed at runtime.
The benefit of replication is that it allows each node to sample data points from the entire
dataset independently of other nodes, and reduces dependence on value of `shuffle_n`.
.. warning::
Replicated strategy allows every node to sample the entire set of available tarfiles,
and therefore more than one node may sample the same tarfile, and even sample the same
data points! As such, there is no assured guarantee that all samples in the dataset will be
sampled at least once during 1 epoch. Scattered strategy, on the other hand, on specific
occasions (when the number of shards is not divisible with ``world_size``), will not sample
the entire dataset. For these reasons it is not advisable to use tarred datasets as validation
or test datasets.
global_rank (int): Worker rank, used for partitioning shards. Defaults to 0.
world_size (int): Total number of processes, used for partitioning shards. Defaults to 0.
reverse_lang_direction (bool): When True, swaps the source and target directions when returning minibatches.
"""
def __init__(
self,
text_tar_filepaths: str,
metadata_path: str,
tokenizer: str,
shuffle_n: int = 1,
shard_strategy: str = "scatter",
global_rank: int = 0,
world_size: int = 0,
):
super(TarredSentenceDataset, self).__init__()
self.tokenizer = tokenizer
self.pad_id = tokenizer.pad_id
valid_shard_strategies = ['scatter', 'replicate']
if shard_strategy not in valid_shard_strategies:
raise ValueError(
f"Invalid shard strategy of type {type(shard_strategy)} "
f"{repr(shard_strategy) if len(repr(shard_strategy)) < 100 else repr(shard_strategy)[:100] + '...'}! "
f"Allowed values are: {valid_shard_strategies}."
)
with open(metadata_path, 'r') as f:
metadata = json.load(f)
self.metadata = metadata
if isinstance(text_tar_filepaths, str):
# Replace '(', '[', '<' and '_OP_' with '{'
brace_keys_open = ['(', '[', '<', '_OP_']
for bkey in brace_keys_open:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "{")
# Replace ')', ']', '>' and '_CL_' with '}'
brace_keys_close = [')', ']', '>', '_CL_']
for bkey in brace_keys_close:
if bkey in text_tar_filepaths:
text_tar_filepaths = text_tar_filepaths.replace(bkey, "}")
if isinstance(text_tar_filepaths, str):
# Brace expand
text_tar_filepaths = list(braceexpand.braceexpand(text_tar_filepaths))
if shard_strategy == 'scatter':
logging.info("Tarred dataset shards will be scattered evenly across all nodes.")
if len(text_tar_filepaths) % world_size != 0:
logging.warning(
f"Number of shards in tarred dataset ({len(text_tar_filepaths)}) is not divisible "
f"by number of distributed workers ({world_size}). "
f"Some shards will not be used ({len(text_tar_filepaths) % world_size})."
)
batches_per_tar = self.metadata['num_batches'] // len(text_tar_filepaths)
begin_idx = (len(text_tar_filepaths) // world_size) * global_rank
end_idx = begin_idx + (len(text_tar_filepaths) // world_size)
logging.info('Begin Index : %d' % (begin_idx))
logging.info('End Index : %d' % (end_idx))
text_tar_filepaths = text_tar_filepaths[begin_idx:end_idx]
logging.info(
"Partitioning tarred dataset: process (%d) taking shards [%d, %d)", global_rank, begin_idx, end_idx
)
self.length = batches_per_tar * len(text_tar_filepaths) * world_size
elif shard_strategy == 'replicate':
logging.info("All tarred dataset shards will be replicated across all nodes.")
self.length = self.metadata['num_batches']
else:
raise ValueError(f"Invalid shard strategy ! Allowed values are : {valid_shard_strategies}")
self.tarpath = text_tar_filepaths
# Put together WebDataset
self._dataset = wd.WebDataset(urls=text_tar_filepaths, nodesplitter=None)
if shuffle_n > 0:
self._dataset = self._dataset.shuffle(shuffle_n)
else:
logging.info("WebDataset will not shuffle files within the tar files.")
self._dataset = self._dataset.rename(pkl='pkl', key='__key__').to_tuple('pkl', 'key').map(f=self._build_sample)
def _build_sample(self, fname):
# Load file
pkl_file, _ = fname
pkl_file = io.BytesIO(pkl_file)
data = pickle.load(pkl_file) # loads np.int64 vector
pkl_file.close()
ids = data["src"]
mask = (ids != self.pad_id).astype(np.int32)
return ids, mask
def __iter__(self):
return self._dataset.__iter__()
def __len__(self):
return self.length
| NeMo-main | nemo/collections/nlp/data/language_modeling/sentence_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import shutil
import struct
from functools import lru_cache
from itertools import accumulate
from typing import List
import numpy as np
import torch
from nemo.utils import logging
__all__ = ["KNNIndex", "MMapRetrievalIndexedDataset", "MMapRetrievalIndexedDatasetBuilder"]
dtypes = {1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float64, 7: np.double, 8: np.uint16}
def code(dtype):
for k in dtypes.keys():
if dtypes[k] == dtype:
return k
raise ValueError(dtype)
def index_file_path(prefix_path):
return prefix_path + '.idx'
def data_file_path(prefix_path):
return prefix_path + '.bin'
def _warmup_mmap_file(path):
with open(path, 'rb') as stream:
while stream.read(100 * 1024 * 1024):
pass
class KNNIndex(object):
"""
Index file for fast KNN mapping.
It is built by `build_knn_map_index.py` script.
It contains a big matrix of shape (chunk_id, K neighbors)
where `chunk_id` are all the chunk ids in the RETRO training data.
E.g. the KNN neighbor chunk ids in the retrieval data for ith chunk id in the training data
is self.knn_map[i].
This index can hold partial maps used for building sharding index.
"""
_HDR_MAGIC = b'KNNRETM\x00\x00'
@classmethod
def writer(cls, path, K, offset=0):
"""
path: file path of the index
K: number of neighbors for a chunk
offset: start chunk_id for shard index
"""
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
self._file.write(struct.pack('<Q', 1))
self._file.write(struct.pack('<Q', K))
# reserve the space for total number of chunks
self._file.write(struct.pack('<Q', 0))
# chunk start
self._file.write(struct.pack('<Q', offset))
self.K = K
self.count_chunks = 0
self.path = path
return self
def write(self, chunk_knn: np.array):
assert chunk_knn.dtype == np.int64
assert chunk_knn.shape[1] == self.K
self._file.write(chunk_knn.tobytes(order='C'))
self.count_chunks += chunk_knn.shape[0]
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
# Update the chunk size, Since total number of chunks is determined in the end
_bin_buffer_mmap = np.memmap(self.path, mode='r+', order='C', shape=(9 + 8 + 8 + 8),)
buffer = memoryview(_bin_buffer_mmap)
len_array = np.frombuffer(buffer, dtype=np.int64, count=1, offset=9 + 8 + 8)
len_array[0] = self.count_chunks
_bin_buffer_mmap.flush()
_bin_buffer_mmap._mmap.close()
return _Writer()
def __init__(self, path, skip_warmup=True):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, 'Index file doesn\'t match expected format. '
version = struct.unpack('<Q', stream.read(8))
assert (1,) == version
self.K = struct.unpack('<Q', stream.read(8))[0]
self.len = struct.unpack('<Q', stream.read(8))[0]
self.chunk_start_id = struct.unpack('<Q', stream.read(8))[0]
self.chunk_end_id = self.chunk_start_id + self.len
offset = stream.tell()
if not skip_warmup:
logging.info(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
logging.info(" reading KNN map")
self.knn_map = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self.len * self.K, offset=offset).reshape(
self.len, self.K
)
def get_KNN_chunk_ids(self, chunk_id):
""" get the chunk address (in bytes) from chunk id
"""
if not (self.chunk_start_id <= chunk_id < self.chunk_end_id):
raise ValueError(f'chunk {chunk_id} is out side the range [{self.chunk_start_id}, {self.chunk_end_id})')
return self.knn_map[chunk_id - self.chunk_start_id]
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
def __len__(self):
"""
total number of chunks in the data
"""
return self.len
def merge_knn_files(knn_files: List[KNNIndex], output_file: str):
"""
Merge a list of knn sharding index files into one.
"""
files = [KNNIndex(f) for f in knn_files]
sorted_files = sorted(files, key=lambda x: x.chunk_start_id)
# consistence check
start_id = sorted_files[0].chunk_start_id
previous_end = sorted_files[0].chunk_end_id
K = sorted_files[0].K
for i in sorted_files[1:]:
assert previous_end == i.chunk_start_id
assert K == i.K
previous_end = i.chunk_end_id
with KNNIndex.writer(output_file, K, offset=start_id) as w:
for i in sorted_files:
w.write(i.knn_map)
f = KNNIndex(output_file)
logging.info(f'{output_file} index starts at {f.chunk_start_id}')
logging.info(f'{output_file} index ends at {f.chunk_end_id}')
logging.info(f'total len {f.len}')
assert f.len == (f.chunk_end_id - f.chunk_start_id)
class MMapRetrievalIndexedDataset(torch.utils.data.Dataset):
"""
Memory Map Index and Binary file for RETRO DATA.
It provides `chunks` to the original MMap data so data can be fetched at both document and chunk level.
It can be used both for training data and Retrieval Data.
Retrieval Dataset adds an extra `chunk_size` padded tokens at the end of each document. '
`self._index.retrieval_db` is indicating whether it is retrieval dataset or not.
It is built by `preprocess_data_for_megatron.py` script.
"""
class Index(object):
_HDR_MAGIC = b'MMIDRET\x00\x00'
@classmethod
def writer(cls, path, dtype, retrieval_db):
class _Writer(object):
def __enter__(self):
self._file = open(path, 'wb')
self._file.write(cls._HDR_MAGIC)
# write index file version
self._file.write(struct.pack('<L', 1))
return self
@staticmethod
def _get_pointers(sizes, chunk_size):
dtype_size = dtype().itemsize
address = 0
pointers = []
for size in sizes:
pointers.append(address)
address += size * dtype_size
if retrieval_db:
# if it is retrieval db, the the last chunk is reserved for padding
address += chunk_size * dtype_size
return pointers
@staticmethod
def _get_chunk_id_and_address(sizes, chunk_size, stride):
if chunk_size % stride != 0:
raise ValueError(f"the chunk size {chunk_size} should be the multiple of {stride}")
dtype_size = dtype().itemsize
chunk_ids = []
last_id = 0
address = 0
pointers = []
for size in sizes:
chunk_ids.append(last_id)
num_of_chunks = len(range(0, size - chunk_size + 1, stride))
if size % chunk_size != 0:
raise ValueError(f"the document size {size} should be the multiple of {chunk_size}")
for i in range(0, size - chunk_size + 1, stride):
pointers.append(address)
if i == size - chunk_size:
address += chunk_size * dtype_size
else:
address += stride * dtype_size
if retrieval_db:
# if it is retrieval db, the the last chunk is reserved for padding
address += chunk_size * dtype_size
last_id += num_of_chunks
return chunk_ids, pointers
def write(self, sizes, chunk_size, stride=64):
pointers = self._get_pointers(sizes, chunk_size)
chunk_ids, chunk_address = self._get_chunk_id_and_address(sizes, chunk_size, stride)
# write index chunk stride step
self._file.write(struct.pack('<L', stride))
self._file.write(struct.pack('<B', code(dtype)))
self._file.write(struct.pack('<Q', len(sizes)))
self._file.write(struct.pack('<Q', chunk_size))
self._file.write(struct.pack('<Q', len(chunk_address)))
self._file.write(struct.pack('<B', int(retrieval_db)))
sizes = np.array(sizes, dtype=np.int32)
self._file.write(sizes.tobytes(order='C'))
del sizes
pointers = np.array(pointers, dtype=np.int64)
self._file.write(pointers.tobytes(order='C'))
del pointers
chunk_ids = np.array(chunk_ids, dtype=np.int64)
self._file.write(chunk_ids.tobytes(order='C'))
del chunk_ids
chunk_address = np.array(chunk_address, dtype=np.int64)
self._file.write(chunk_address.tobytes(order='C'))
def __exit__(self, exc_type, exc_val, exc_tb):
self._file.close()
return _Writer()
def __init__(self, path, skip_warmup=True):
with open(path, 'rb') as stream:
magic_test = stream.read(9)
assert self._HDR_MAGIC == magic_test, (
'Index file doesn\'t match expected format. '
'Make sure that --dataset-impl is configured properly.'
)
version = struct.unpack('<L', stream.read(4))
assert (1,) == version
# load the stride size
(self.stride,) = struct.unpack('<L', stream.read(4))
# for legacy compatibility
if self.stride == 0:
self.stride = 64
(dtype_code,) = struct.unpack('<B', stream.read(1))
self._dtype = dtypes[dtype_code]
self._dtype_size = self._dtype().itemsize
self._len = struct.unpack('<Q', stream.read(8))[0]
self.chunk_size = struct.unpack('<Q', stream.read(8))[0]
self.num_chunks = struct.unpack('<Q', stream.read(8))[0]
self.retrieval_db = bool(struct.unpack('<B', stream.read(1))[0])
# self.chunk_size = struct.unpack('<Q', stream.read(8))[0]
# self.num_chunks = struct.unpack('<Q', stream.read(8))[0]
offset = stream.tell()
if not skip_warmup:
logging.info(" warming up index mmap file...")
_warmup_mmap_file(path)
self._bin_buffer_mmap = np.memmap(path, mode='r', order='C')
self._bin_buffer = memoryview(self._bin_buffer_mmap)
logging.info(" reading document sizes...")
self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset)
logging.info(" reading document pointers...")
self._pointers = np.frombuffer(
self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes
)
logging.info(" reading document chunk offset...")
self._chunk_id_start = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self._len,
offset=offset + self._sizes.nbytes + self._pointers.nbytes,
)
logging.info(" reading chunk address...")
self._chunk_address = np.frombuffer(
self._bin_buffer,
dtype=np.int64,
count=self.num_chunks,
offset=offset + self._sizes.nbytes + self._pointers.nbytes + self._chunk_id_start.nbytes,
)
def get_chunk_address(self, chunk_id):
""" get the chunk address from chunk id
"""
return self._chunk_address[chunk_id]
def get_chunk_id(self, sentence_id, position):
""" get the chunk id from sentence idx and offset position.
"""
chunk_offset = position // self.stride
size = self._sizes[sentence_id]
if chunk_offset * self.stride >= size:
raise ValueError('offset is too large')
return (self._chunk_id_start[sentence_id] + chunk_offset).item()
def from_chunk_id_to_doc_id(self, chunk_id):
""" from chunk_id, calculate the document id
"""
if chunk_id >= self.num_chunks:
raise ValueError('chunk_id is out of bound')
doc_id = np.searchsorted(self._chunk_id_start, chunk_id, side='right')
return doc_id - 1
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
@property
def dtype(self):
"""
Token id integer type
"""
return self._dtype
@property
def sizes(self):
"""
number of tokens for each of the documents
"""
return self._sizes
@lru_cache(maxsize=8)
def __getitem__(self, i):
"""
return a single document staring address (in bytes) and number of tokens
"""
return self._pointers[i], self._sizes[i]
def __len__(self):
return self._len
def __init__(self, path, skip_warmup=True):
super().__init__()
self._path = None
self._index = None
self._bin_buffer = None
self._do_init(path, skip_warmup)
def __getstate__(self):
return self._path
# def __setstate__(self, state):
# self._do_init(state)
def _do_init(self, path, skip_warmup):
self._path = path
self._index = self.Index(index_file_path(self._path), skip_warmup)
if not skip_warmup:
logging.info(" warming up data mmap file...")
_warmup_mmap_file(data_file_path(self._path))
logging.info(" creating numpy buffer of mmap...")
self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C')
logging.info(" creating memory view of numpy buffer...")
self._bin_buffer = memoryview(self._bin_buffer_mmap)
def __del__(self):
self._bin_buffer_mmap._mmap.close()
del self._bin_buffer_mmap
del self._index
def __len__(self):
"""
Total number of documents
"""
return len(self._index)
# @lru_cache(maxsize=8)
def __getitem__(self, idx):
"""
return a single document or a slice of documents, excluding the paddings for the retrieval db
"""
if isinstance(idx, int):
# no need to handle retrieval_db since size exclude the paddings
ptr, size = self._index[idx]
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
return np_array
elif isinstance(idx, slice):
start, stop, step = idx.indices(len(self))
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
ptr = self._index._pointers[start]
if self._index.retrieval_db:
# for retrieval db, need to add the padding of chunk_size at the end of each document
sizes = self._index._sizes[idx] + self._index.chunk_size
else:
sizes = self._index._sizes[idx]
# offsets get the number of tokens for each document including the paddings
offsets = list(accumulate(sizes))
total_size = sum(sizes)
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
sents = np.split(np_array, offsets[:-1])
if self._index.retrieval_db:
# remove the paddings
sents = [sent[: -self._index.chunk_size] for sent in sents]
return sents
def get(self, idx, offset=0, length=None):
""" Retrieves a single item from the dataset with the option to only
return a portion of the item.
get(idx) is the same as [idx] but get() does not support slicing.
"""
# no need to handle retrieval_db since size exclude the paddings
ptr, size = self._index[idx]
if length is None:
length = size - offset
ptr += offset * np.dtype(self._index.dtype).itemsize
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr)
return np_array
def get_chunk_id(self, idx, offset=0):
""" get the chunk id from document idx and offset position.
"""
# make sure offset is a multiple of chunk_size
assert offset % self._index.chunk_size == 0
return self._index.get_chunk_id(idx, offset)
def from_chunk_id_to_doc_id(self, chunk_id):
""" from chunk_id, calculate the document id
"""
return self._index.from_chunk_id_to_doc_id(chunk_id)
def get_chunk(self, chunk_id, force_no_cont_ids=False):
""" Retrieves a single chunk item from the dataset.
It will get chunk_size tokens for training data
or 2*chunk_size tokens for retrieval data.
If force_no_cont_ids=True, it will always get chunk_size tokens
"""
if isinstance(chunk_id, (int, np.int64, np.int32)):
ptr = self._index.get_chunk_address(chunk_id)
if self._index.retrieval_db and (not force_no_cont_ids):
size = self._index.chunk_size * 2
else:
size = self._index.chunk_size
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr)
return np_array
elif isinstance(chunk_id, slice):
start, stop, step = chunk_id.indices(self.chunks)
if step != 1:
raise ValueError("Slices into indexed_dataset must be contiguous")
if self._index.retrieval_db and (not force_no_cont_ids):
chunk_size = self._index.chunk_size * 2
else:
chunk_size = self._index.chunk_size
ptr = self._index.get_chunk_address(start)
end_address = self._index.get_chunk_address(stop - 1) + chunk_size * self._index._dtype_size
address = self._index._chunk_address[chunk_id]
starting_pos = address // self._index._dtype_size
total_size = (end_address - ptr) // self._index._dtype_size
np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr)
sents = [np_array[pos : pos + chunk_size] for pos in starting_pos - starting_pos[0]]
return sents
@property
def sizes(self):
"""
Number of tokens for each of the documents
"""
return self._index.sizes
@property
def chunks(self):
"""
Total number of chunks
"""
return self._index.num_chunks
@property
def chunk_size(self):
"""
Number of tokens per chunk
"""
return self._index.chunk_size
@property
def supports_prefetch(self):
return False
@staticmethod
def exists(path):
return os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))
class MMapRetrievalIndexedDatasetBuilder(object):
def __init__(self, out_file, chunk_size, pad_id, retrieval_db=False, dtype=np.int64, stride=64):
self._data_file = open(out_file, 'wb')
self._dtype = dtype
self.chunk_size = chunk_size
self._sizes = []
self.retrieval_db = retrieval_db
self.pad_id = pad_id
self.stride = stride
def add_item(self, tensor):
"""
Add one document to the indexed dataset.
It will pad the tokens to be the multiple of chunk_size.
If it is retrieval dataset, it will pad extra chunk_size tokens at the end of the document.
"""
np_array = np.array(tensor.numpy(), dtype=self._dtype)
padded_size = self.chunk_size - (len(np_array) % self.chunk_size)
data_size = np_array.size + padded_size
if self.retrieval_db:
# for retrieval database, added one more chunk in the end as padding
padded_size += self.chunk_size
np_array = np.pad(np_array, (0, padded_size), 'constant', constant_values=self.pad_id)
self._data_file.write(np_array.tobytes(order='C'))
self._sizes.append(data_size)
def end_document(self):
"""
Do nothing. Since each item is one document
"""
pass
def merge_file_(self, another_file):
# Concatenate index
index = MMapRetrievalIndexedDataset.Index(index_file_path(another_file))
assert index.dtype == self._dtype
for size in index.sizes:
self._sizes.append(size)
# Concatenate data
with open(data_file_path(another_file), 'rb') as f:
shutil.copyfileobj(f, self._data_file)
def finalize(self, index_file):
"""
Last step of creating the indexed dataset.
Flush and close the binary file.
Finalizing the index file by using the aggregated document size information.
"""
self._data_file.close()
with MMapRetrievalIndexedDataset.Index.writer(index_file, self._dtype, self.retrieval_db) as index:
index.write(self._sizes, self.chunk_size, stride=self.stride)
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/indexed_retrieval_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import warnings
from typing import Tuple
import torch
from nemo.utils.decorators import experimental
__all__ = [
"MegatronPretrainingBatchSampler",
"MegatronPretrainingRandomBatchSampler",
]
class BaseMegatronBatchSampler:
"""Megatron style BatchSampler.
Let mbs, gbs, tp, pp, and dp stand for "micro batch size", "global batch size",
"tensor model parallel world size", "pipeline model parallel world size", and
"data parallel world size", the number of micro batches (hereafter, nmb) is defined as
:math:`nmb = gbs \\div (mbs \\times dp)`.
See `apex/transformer/microbatches.py#L91-L98 <https://github.com/NVIDIA/apex/blob/
44c3043685b6115e7b81b3458a6c76601b1e55b4/apex/transformer/microbatches.py#L91-L98>`_
for the initial settings of the number of micro batches and
`apex/transformer/microbatches.py#L160-L177 <https://github.com/NVIDIA/apex/blob/
44c3043685b6115e7b81b3458a6c76601b1e55b4/apex/transformer/microbatches.py#L160-L177>_`.
for warming up of global batch size.
e.g.) `(mbs, gbs, tp, pp, dp) = (1, 16, 1, 1, 2)`, then the number of micro batches is
:math:`gbs \\div (mbs \\times dp) = 16 \\div (1 \\times 2) = 8`.
In this case, an instance of Megatron Batch Sampler on each data parallel rank is expected
returns :math:`nmb \\times mbs = 8` indices.
"""
_global_batch_size: int
_num_micro_batches: int
_global_batch_size_on_this_data_parallel_rank: int
def __init__(
self,
total_samples: int,
consumed_samples: int,
micro_batch_size: int,
global_batch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool,
pad_samples_to_global_batch_size=False,
) -> None:
"""Constructor of Megatron-LM style Batch Sampler.
Args:
total_samples: The size of dataset.
consumed_samples: The number of samples that have been used.
micro_batch_size: The size of each micro batch.
global_batch_size: The size of global batch.
data_parallel_rank: The value you can obtain via
`parallel_state.get_data_parallel_rank()` of megatron.core.
data_parallel_size: The value you can obtain via
`parallel_state.get_data_parallel_world_size()` of megatron.core.
"""
# Sanity checks.
if total_samples <= 0:
raise RuntimeError("no sample to consume: {}".format(total_samples))
if consumed_samples >= total_samples:
raise RuntimeError("no samples left to consume: {}, {}".format(consumed_samples, total_samples))
if micro_batch_size <= 0:
raise RuntimeError(f"micro_batch_size size must be greater than 0, but {micro_batch_size}")
if data_parallel_size <= 0:
raise RuntimeError(f"data parallel size must be greater than 0, but {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise RuntimeError(
"data_parallel_rank should be smaller than data size, but {} >= {}".format(
data_parallel_rank, data_parallel_size
)
)
# Keep a copy of input params for later use.
self.total_samples: int = total_samples
self.consumed_samples: int = consumed_samples
self.micro_batch_size: int = micro_batch_size
self.data_parallel_rank: int = data_parallel_rank
self.data_parallel_size: int = data_parallel_size
self.drop_last: bool = drop_last
self.pad_samples_to_global_batch_size = pad_samples_to_global_batch_size
self.update_global_batch_size(global_batch_size)
def update_global_batch_size(self, new_global_batch_size: int) -> None:
"""Update the global batch size."""
self._global_batch_size = new_global_batch_size
if self._global_batch_size % (self.micro_batch_size * self.data_parallel_size) != 0:
raise RuntimeError(
f"`global_batch_size` ({self._global_batch_size}) is not divisible by "
f"`micro_batch_size ({self.micro_batch_size}) x data_parallel_size "
f"({self.data_parallel_size})`"
)
self._num_micro_batches = self._global_batch_size // (self.micro_batch_size * self.data_parallel_size)
self._global_batch_size_on_this_data_parallel_rank = self._num_micro_batches * self.micro_batch_size
@property
def global_batch_size(self) -> int:
return self._global_batch_size
@global_batch_size.setter
def global_batch_size(self, new_global_batch_size: int) -> None:
warnings.warn("`self.update_global_batch_size(new_global_batch_size)` is recommended.")
self.update_global_batch_size(new_global_batch_size=new_global_batch_size)
def __len__(self) -> int:
"""Length of Batch Sampler.
..note::
When `rampup_batch_size` is enabled, the return value can be not exactly precise.
"""
num_available_samples: int = self.total_samples - self.consumed_samples
if self.drop_last:
return num_available_samples // self.global_batch_size
else:
return (num_available_samples + self.global_batch_size - 1) // self.global_batch_size
@abc.abstractmethod
def __iter__(self):
...
class MegatronPretrainingBatchSampler(BaseMegatronBatchSampler):
def get_start_end_idx(self) -> Tuple[int, int]:
start_idx = self.data_parallel_rank * self._global_batch_size_on_this_data_parallel_rank
end_idx = start_idx + self._global_batch_size_on_this_data_parallel_rank
return start_idx, end_idx
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
for idx in range(self.consumed_samples, self.total_samples):
batch.append(idx)
if len(batch) == self._global_batch_size:
# start_idx, end_idx = self.get_start_end_idx()
indices = [
batch[i] for i in range(self.data_parallel_rank, self._global_batch_size, self.data_parallel_size,)
]
assert len(indices) == self._global_batch_size_on_this_data_parallel_rank
yield indices
# yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
# start_idx, end_idx = self.get_start_end_idx()
indices = [batch[i] for i in range(self.data_parallel_rank, len(batch), self.data_parallel_size)]
if self.pad_samples_to_global_batch_size:
num_pad = self._global_batch_size // self.data_parallel_size - len(indices)
indices = indices + [-1] * num_pad
yield indices
@experimental
class MegatronPretrainingRandomBatchSampler(BaseMegatronBatchSampler):
# NOTE (mkozuki): [[Argument of `dataset` and `data_sharding`]]
# From the commit below, it seems like `dataset` argument and `data_sharding` argument
# are necessary for ViT training. However, to keep this simple,
# I omit those two arguments.
# commit: https://github.com/NVIDIA/Megatron-LM/commit/7a77abd9b6267dc0020a60b424b4748fc22790bb
def __init__(
self,
total_samples: int,
consumed_samples: int,
micro_batch_size: int,
global_batch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool,
) -> None:
super().__init__(
total_samples=total_samples,
consumed_samples=consumed_samples,
micro_batch_size=micro_batch_size,
global_batch_size=global_batch_size,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
drop_last=drop_last,
)
self.last_batch_size = self.total_samples % self._global_batch_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % (self.micro_batch_size * self.data_parallel_size) == 0
# data sharding and random sampling
bucket_size = (self.total_samples // (self.micro_batch_size * self.data_parallel_size)) * self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self._global_batch_size_on_this_data_parallel_rank:
self.consumed_samples += self._global_batch_size
yield batch
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
yield batch
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/megatron_batch_samplers.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from random import sample
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import get_samples_mapping
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import JSONLMemMapDataset
from nemo.core import Dataset
from nemo.utils import logging
__all__ = ['RetroQAFineTuneDataset']
class RetroQAFineTuneDataset(Dataset):
"""
The dataset class for fine tune RETRO models.
Args:
data (list[strings], list[dicts]): (1) paths to .jsonl or .json files, (2) dict objects corresponding to each input example
tokenizer (tokenizer): Tokenizer from frozen language model
task_templates (dict): Dictionary containing all task template information needed to format prompts. Created in the GPTPromptLearningModel class.
pad_token_id (int): ID of pad token from tokenizer
max_seq_length (int): maximum sequence length for each dataset examples. Examples will either be truncated to fit this length or dropped if they cannot be truncated.
min_seq_length (int): min length of each data example in the dataset. Data examples will be dropped if they do not meet the min length requirements.
add_bos (bool): Whether to add a beginning of sentence token to each data example
add_eos (bool): Whether to add an end of sentence token to each data example
for_train (bool): Whether you're creating a dataset for training or inference
tokens_to_generate (int): (inference only) Number of tokens to generate during inference
"""
def __init__(
self,
data,
tokenizer,
answer_only_loss: bool,
pad_token_id: int,
max_seq_length: int,
add_bos: bool = False,
add_eos: bool = True,
max_num_samples: int = None,
seed: int = 1234,
neighbors: int = 20,
):
self.tokenizer = tokenizer
self.pad_token_id = pad_token_id
self.max_seq_length = max_seq_length
self.add_bos = add_bos
self.add_eos = add_eos
self.answer_only_loss = answer_only_loss
self.max_num_samples = max_num_samples
self.seed = seed
self.neighbors = neighbors
assert self.max_seq_length > 0, "Max sequence length should be greater than 0"
logging.info("Loading and tokenizing dataset ... ")
self.indexed_dataset = JSONLMemMapDataset(dataset_paths=[data], tokenizer=None, header_lines=0, workers=12)
# Will be None after this call if `max_num_samples` is None
self._build_samples_mapping(data)
def _build_samples_mapping(self, file_path):
if self.max_num_samples is not None:
self.samples_mapping = get_samples_mapping(
indexed_dataset=self.indexed_dataset,
data_prefix=file_path,
num_epochs=None,
max_num_samples=self.max_num_samples,
max_seq_length=self.max_seq_length - 2,
short_seq_prob=0,
seed=self.seed,
name=file_path.split('/')[-1],
binary_head=False,
)
else:
self.samples_mapping = None
def __len__(self):
if self.max_num_samples is None:
return len(self.indexed_dataset)
else:
return len(self.samples_mapping)
def __getitem__(self, idx):
if isinstance(idx, np.int64):
idx = idx.item()
if self.samples_mapping is not None:
assert idx < len(self.samples_mapping)
idx, _, _ = self.samples_mapping[idx]
if isinstance(idx, np.uint32):
idx = idx.item()
assert idx < len(self.indexed_dataset)
example = self.indexed_dataset[idx]
return self._process_example(example)
def _process_example(self, example):
"""
Process a single example from the dataset into IDs and other T0-related metadata.
"""
question = example['question'].strip()
tokenized_input = self.tokenizer.text_to_ids(f"question: {question}\n")
# add a space between input and output
if 'answers' in example:
# sample one answer from answers
answer = sample(example['answers'], 1)[0].strip()
tokenized_output = self.tokenizer.text_to_ids(f"answer: {answer}")
else:
tokenized_output = self.tokenizer.text_to_ids('answer: ')
bos_id = self.tokenizer.bos_id
if self.add_bos:
tokenized_input = [bos_id] + tokenized_input
if self.add_eos:
target = tokenized_output + [self.tokenizer.eos_id]
else:
target = tokenized_output
# pad the question so 'answer:' coincides with the end of the first chunk of 64
if len(tokenized_input) < 64:
padding_length = 64 - len(tokenized_input)
tokenized_input = [self.pad_token_id] * padding_length + tokenized_input
if len(tokenized_input) + len(target) > self.max_seq_length:
cut_tokens = len(tokenized_input) + len(target) - self.max_seq_length
if len(tokenized_input) - cut_tokens > 0:
# cut the input by default
tokenized_input = tokenized_input[: len(tokenized_input) - cut_tokens]
elif len(target) - cut_tokens > 0:
# cut the output
target = target[: len(target) - cut_tokens]
else:
# cut both the input and output
cut_input_tokens = len(tokenized_input) - 1 # retain at least one token
cut_output_tokens = cut_tokens - cut_input_tokens
tokenized_input = tokenized_input[: len(tokenized_input) - cut_input_tokens]
target = target[: len(target) - cut_output_tokens]
chunks = []
contexts = example['ctxs']
assert self.neighbors <= len(
contexts
), f"specify {self.neighbors}, but only provide {len(contexts)} neighbors in the dataset"
for neighbor in contexts[: self.neighbors]:
tokens = self.tokenizer.text_to_ids(neighbor)
tokens = tokens[:128]
if len(tokens) < 128:
tokens = tokens + [self.pad_token_id] * (128 - len(tokens))
chunks.append(tokens)
answer_start_idx = len(tokenized_input)
input_ids = tokenized_input + target
assert len(input_ids) <= 128, "cannot handle more than two chunks yet"
chunks = np.array(chunks).reshape(1, self.neighbors, -1).astype(np.int64)
results = (input_ids, answer_start_idx, chunks)
return results
def collate_fn(self, batch, tp_workers=0):
""" Prepares input_ids, labels, loss mask, attention_mask, and position ids for global batch """
input_ids, answer_starts, chunks = zip(*batch)
# convert chunks into torch tensors
chunks = torch.tensor(chunks)
# Get max sequence length of batch
batch_max = max(len(ids) for ids in input_ids)
if tp_workers > 1:
# make sure the sequence length is multiply of number of tp_workers, needed for sequence parallel.
resi_padding = (tp_workers - (batch_max - 1) % tp_workers) % tp_workers
else:
resi_padding = 0
batch_max += resi_padding
input_ids, loss_mask = self.pad_batch_and_build_loss_mask(input_ids, batch_max, answer_starts)
# Should be a label for every token in batch, label is the next token
labels = input_ids[:, 1:].contiguous()
input_ids = input_ids[:, :-1].contiguous()
batch_max -= 1
# Loss mask should align with labels
loss_mask = loss_mask[:, 1:].contiguous()
hidden_mask = input_ids != self.pad_token_id
context_mask = chunks != self.pad_token_id
# Using causal attention mask for whole input
return {
'tokens': input_ids,
'labels': labels,
'tokens_mask': hidden_mask,
'loss_mask': loss_mask,
'retrieved_emb_mask': context_mask,
'retrieved_ids': chunks,
}
def pad_batch_and_build_loss_mask(self, input_ids, batch_max, answer_starts):
""" Pad input_ids in batch to max batch length while building loss mask """
batch_loss_masks = []
padded_input_ids = []
for ids, answer_start_idx in zip(input_ids, answer_starts):
if self.answer_only_loss and answer_start_idx is not None:
# Loss mask where answer tokens are 1.0 and all other tokens are 0.0
loss_mask = [float(idx >= answer_start_idx) for idx in range(len(ids))]
else:
# Loss mask where virtual tokens are 0.0 and all other tokens are 1.0
loss_mask = [1.0] * len(ids)
# Pad to max length
input_length = len(ids)
padding_length = batch_max - input_length
ids = ids + [self.pad_token_id] * padding_length
padded_input_ids.append(ids)
# Account for padding in loss mask
loss_mask.extend([0.0] * padding_length)
batch_loss_masks.append(torch.tensor(loss_mask, dtype=torch.float))
# Make into torch tensors
padded_input_ids = torch.tensor(padded_input_ids, dtype=torch.long)
batch_loss_masks = torch.stack(batch_loss_masks)
return padded_input_ids, batch_loss_masks
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/retro_fine_tune_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from nemo.collections.nlp.modules.common import VirtualPromptSource
from nemo.core import Dataset
from nemo.utils import logging
__all__ = ['BasePromptLearningDataset']
class BasePromptLearningDataset(Dataset):
"""
The base dataset class for prompt-tuning or p-tuning.
TODO: (@adithyare) should be merged into GPTPromptLearningDataset
"""
def __init__(
self,
datasets,
tokenizer,
virtual_prompt_source: VirtualPromptSource,
task_templates: dict,
pseudo_tokens,
pad_token_id: str,
max_seq_length: int,
min_seq_length: int = 1,
add_bos: bool = False,
add_eos: bool = True,
for_train: bool = True,
):
self.tokenizer = tokenizer
self.virtual_prompt_source = virtual_prompt_source
self.task_templates = task_templates
self.pseudo_tokens = pseudo_tokens
self.pseudo_token_ids = set(self.tokenizer.tokens_to_ids(self.pseudo_tokens))
self.pad_token_id = pad_token_id
self.max_seq_length = max_seq_length
self.min_seq_length = min_seq_length
self.add_bos = add_bos
self.add_eos = add_eos
self.for_train = for_train
self.examples = []
assert self.min_seq_length <= max_seq_length, "Min sequence length should be less than or equal to max"
assert self.max_seq_length > 0, "Max sequence length should be greater than 0"
logging.info("Loading and tokenizing dataset ... ")
# Datasets is just a list of json dicts
if isinstance(datasets[0], dict):
self.load_data(datasets)
# Datasets are a list of file path strings to .json or .jsonl files
elif isinstance(datasets[0], str):
for path in datasets:
dataset = open(path, 'r', encoding='utf-8')
self.load_data(dataset)
else:
raise ValueError("Datasets must be a list of dicts or a list of filepath strings")
def _insert_virtual_token_placeholders(self, input_example, virtual_token_splits):
""" Insert the correct number of pseudo tokens at the <|VIRTUAL_PROMPT_n|> markers """
total_inserted_tokens = 0
for idx in range(len(virtual_token_splits)):
split_start = total_inserted_tokens
split_end = total_inserted_tokens + virtual_token_splits[idx]
pseudo_tokens_for_split = "".join(self.pseudo_tokens[split_start:split_end])
input_example = input_example.replace(f'<|VIRTUAL_PROMPT_{idx}|>', pseudo_tokens_for_split)
total_inserted_tokens = split_end
return input_example
def _truncate_input(self, truncation_field, input_ids, taskname, doc, total_virtual_tokens=0):
""" Try to truncate input text to fit into the max sequence length """
logging.info(
f"Input greater than max sequence length. Attempting to truncate: '{truncation_field}' in task: '{taskname}'"
)
# Truncate the text ids in this part of input to try and fit max sequence length
if truncation_field is not None and truncation_field in doc.keys():
truncation_length = len(input_ids) - self.max_seq_length
field_text = doc[truncation_field]
field_text = self._add_leading_space(taskname, truncation_field, field_text)
# Truncate field text
field_text_ids = self.tokenizer.text_to_ids(field_text)
truncated_text_ids = field_text_ids[: -min(truncation_length, len(field_text_ids))]
# Replace original text ids with truncated text ids
field_start, field_end = find_subsequence_location(input_ids, field_text_ids)
input_ids = input_ids[:field_start] + truncated_text_ids + input_ids[field_end + 1 :]
else:
if not self.for_train:
# Hack alert! Slash and burn
# @TODO (@adithyare) need a more graceful truncation here, we should not skip examples in test
input_ids = (
input_ids[:total_virtual_tokens]
+ input_ids[total_virtual_tokens:][-self.max_seq_length + total_virtual_tokens :]
)
return input_ids
def _add_leading_space(self, taskname, field_name, field_text):
""" Add leading space to text if there is a space before it in the template """
prompt_template = self.task_templates[taskname]["prompt_template"]
field_text_start = prompt_template.find("{" + field_name + "}")
if field_text_start != 0 and prompt_template[field_text_start - 1] == " ":
field_text = " " + field_text
return field_text
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def _input_sanity_checks(
self,
total_virtual_tokens,
virtual_token_splits,
prompt_template,
prompt_template_fields,
truncation_field,
answer_field,
doc,
answer_only_loss=None,
):
# Sanity check amount of virtual token
assert (
total_virtual_tokens < self.max_seq_length
), "virtual prompt tokens should not exceed max sequence length"
# Make sure virtual token splits add up to the total number of virtual tokens
assert (
sum(virtual_token_splits) == total_virtual_tokens
), "Sum of prompt token split values must equal total number of prompt tokens"
# Make sure number of virtual prompt locations match the number of virtual prompt splits
assert prompt_template.count('<|VIRTUAL_PROMPT_') == len(
virtual_token_splits
), "The number of '<|VIRTUAL_PROMPT_n|>' markers and the number of prompt token splits must match"
# Check if input example has fields not present in template
keys_not_in_template = list(set(doc.keys()) - set(prompt_template_fields) - set(['taskname']))
assert (
len(keys_not_in_template) == 0
), f"Examples in your dataset contain the fields: {keys_not_in_template} that are not in the task template."
# Check answer field
if self.for_train:
assert answer_field is not None, "An answer_field must be given"
assert answer_field in doc.keys(), f"The given answer_field '{answer_field}' is not in data json"
assert truncation_field != answer_field, "Answer field and truncation field should not match"
answer_placeholder = "{" + answer_field + "}"
answer_placeholder_len = len(answer_placeholder)
placeholder_start = len(prompt_template) - answer_placeholder_len
assert prompt_template[placeholder_start:] == answer_placeholder, "Answer field must be at prompt end"
def pad_taskname_ids(self, taskname_ids):
# Pad taskname_ids to be the same length for the prompt encoder
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
max_taskname_length = max(len(ids) for ids in taskname_ids)
taskname_ids = [ids + [self.pad_token_id] * (max_taskname_length - len(ids)) for ids in taskname_ids]
taskname_ids = torch.tensor(taskname_ids)
# Task ids are just used for a look up embeddings for prompt-table
elif self.virtual_prompt_source == VirtualPromptSource.NO_PROMPT:
taskname_ids = torch.tensor(taskname_ids)
return taskname_ids
def find_subsequence_location(sequence, subsequence):
""" Finds the start and end index of the first occurance
of a given subsequence within a larger list. Returns
the two indices corresponding to the postition of
the first and last token of the subseqeunce.
Assumes subsequence is known to be in sequence.
"""
assert len(sequence) >= len(subsequence), "subsequence too long"
start_idx = None
next_subseq_token = subsequence[0]
next_subsequence_idx = 1
for seq_idx, token in enumerate(sequence):
if token == next_subseq_token:
if start_idx is None:
start_idx = seq_idx
if next_subsequence_idx == len(subsequence):
end_idx = seq_idx
return start_idx, end_idx
else:
next_subseq_token = subsequence[next_subsequence_idx]
next_subsequence_idx += 1
else:
start_idx = None
next_subseq_token = subsequence[0]
next_subsequence_idx = 1
raise ValueError("Subsequence not found in sequence")
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/base_prompt_learning_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RETRO Style dataset."""
import os
from typing import List
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
get_train_valid_test_split_,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import (
_build_index_mappings,
get_indexed_dataset_,
)
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
)
from nemo.core import Dataset
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
__all__ = [
"RETRODataset",
"build_train_valid_test_datasets",
"MockRETRODataset",
"build_mock_train_valid_test_datasets",
]
class RETRODataset(Dataset):
"""
Dataset for RETRO model.
It constructs single data record from the training/retrieval indexed retrieval dataset and knn index file.
The KNN index file maps data chunk id to K-nearest neighbors in the the retrieval dataset chunk ids.
First, it loads a long sequence (2048) from training dataset. Then for each chunk in the sequence, it finds the kNN
chunks from the retrieval dataset using the KNN index. Lastly, compute the masks based on pad id.
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name: str,
data_prefix: str,
documents, # document ids in the indexed_dataset used for this dataset
indexed_dataset: MMapRetrievalIndexedDataset,
num_samples: int, # number of data samples, max_steps * global_batch_size
seq_length: int, # input seq length
seed: int,
knn_index: KNNIndex,
retrieval_index: MMapRetrievalIndexedDataset,
):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
self.name = name
self.indexed_dataset: MMapRetrievalIndexedDataset = indexed_dataset
self.knn_index: KNNIndex = knn_index
self.retrieval_index: MMapRetrievalIndexedDataset = retrieval_index
self.chunk_size = self.indexed_dataset.chunk_size
# make sure seq_length is a multiple of chunk_size
assert seq_length % self.chunk_size == 0
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
self.eos_id = tokenizer.eos_id
self.pad_id = tokenizer.pad_id
assert self.retrieval_index._index.retrieval_db
self._validate_pad_id()
# save index mappings to a configurable dir
self.index_mapping_dir = cfg.data.get('index_mapping_dir', None)
self.neighbors = cfg.data.get('neighbors', self.knn_index.K)
# the number of neighbors cannot exceed the max number of neighbors in the index
assert self.neighbors <= self.knn_index.K
# create index_mapping_dir on rank 0
if torch.distributed.is_available() and torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.index_mapping_dir is not None and not os.path.isdir(self.index_mapping_dir):
os.makedirs(self.index_mapping_dir)
torch.distributed.barrier()
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name,
data_prefix,
documents,
self.indexed_dataset.sizes,
num_samples,
seq_length,
seed,
index_mapping_dir=self.index_mapping_dir,
)
if len(self.doc_idx) > np.iinfo('int32').max:
raise "number of epochs exceeds the maximum number for int32 used by sample_idx"
self.padding_context = np.ones(2 * self.chunk_size, dtype=self.retrieval_index._index.dtype) * self.pad_id
def _validate_pad_id(self):
# validate the pad_id matches the dataset pad_id
ptr, size = self.retrieval_index._index[0]
ptr += size * np.dtype(self.retrieval_index._index.dtype).itemsize
# padded chunk_size of pad_ids at the end of the doc
retrieval_paddings = np.frombuffer(
self.retrieval_index._bin_buffer,
dtype=self.retrieval_index._index.dtype,
count=self.chunk_size,
offset=ptr,
)
assert (retrieval_paddings == self.pad_id).all()
ptr, size = self.indexed_dataset._index[0]
ptr += (size - 1) * np.dtype(self.indexed_dataset._index.dtype).itemsize
data_paddings = np.frombuffer(
self.indexed_dataset._bin_buffer, dtype=self.indexed_dataset._index.dtype, count=1, offset=ptr
)
# the last element is either a padding or an eos
assert (data_paddings == self.pad_id).all() or (data_paddings == self.eos_id).all()
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def _get_chunks(self, chunk_id: int, num_chunks: int, chunks: List):
"""
starting from chunk_id, loop for num_chunks, get the
KNN chunk ids from retrieval dataset, and get the chunk token ids,
put them into the chunks list
"""
for i in range(chunk_id, chunk_id + num_chunks):
knn = self.knn_index.get_KNN_chunk_ids(i)
for rid in knn[: self.neighbors]:
if rid < 0:
# no neighbor, just pad it
one_chunk = self.padding_context
else:
one_chunk = self.retrieval_index.get_chunk(rid)
chunks.append(one_chunk)
def _get_text(self, idx: int) -> np.ndarray:
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + 1
)
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_f], offset_f)
num_chunks = (offset_l - offset_f) // self.chunk_size
chunks = []
self._get_chunks(chunk_id, num_chunks, chunks)
chunks = np.stack(chunks, axis=0).reshape(num_chunks, self.neighbors, -1).astype(np.int64)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
num_chunks = (self.indexed_dataset._index.sizes[self.doc_idx[doc_index_f]] - offset_f) // self.chunk_size
total_chunks = num_chunks
chunks = []
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_f], offset_f)
self._get_chunks(chunk_id, num_chunks, chunks)
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[i], 0)
num_chunks = self.indexed_dataset._index.sizes[self.doc_idx[i]] // self.chunk_size
total_chunks += num_chunks
self._get_chunks(chunk_id, num_chunks, chunks)
# And finally add the relevant portion of last document.
chunk_id = self.indexed_dataset.get_chunk_id(self.doc_idx[doc_index_l], 0)
num_chunks = (offset_l) // self.chunk_size
total_chunks += num_chunks
self._get_chunks(chunk_id, num_chunks, chunks)
sample_list.append(self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + 1))
sample = np.concatenate(sample_list)
chunks = np.stack(chunks, axis=0).reshape(total_chunks, self.neighbors, -1).astype(np.int64)
return sample.astype(np.int64), chunks
def __getitem__(self, idx):
text, retrieved = self._get_text(idx)
text = torch.from_numpy(text)
retrieved = torch.from_numpy(retrieved)
tokens = text[:-1].contiguous()
labels = text[1:].contiguous()
hidden_mask = tokens != self.pad_id
context_mask = retrieved != self.pad_id
return {
'tokens': tokens,
'labels': labels,
'tokens_mask': hidden_mask,
'loss_mask': hidden_mask,
'retrieved_emb_mask': context_mask,
'retrieved_ids': retrieved,
}
def build_train_valid_test_datasets(
cfg,
trainer,
data_prefix: List[str],
data_impl: str,
splits_string: str,
train_valid_test_num_samples,
seq_length: int,
seed: int,
skip_warmup: bool,
tokenizer,
retrieval_prefix: str,
knn_map_path: List[str],
):
"""Build train, valid, and test RETRO datasets.
There is one to one mapping between data_prefix and knn_map_path.
Currently only supports one retrieval dataset.
"""
# make sure there is one to one mapping between data_prefix and knn_map_path
assert len(data_prefix) == len(knn_map_path)
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix[0],
data_impl,
splits_string,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
tokenizer,
retrieval_prefix,
knn_map_path[0],
)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
train_n, valid_n, test_n = map(sum, zip(*datasets_train_valid_test_num_samples))
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
cfg,
trainer,
prefixes[i],
data_impl,
splits_string,
datasets_train_valid_test_num_samples[i],
seq_length,
seed,
skip_warmup,
tokenizer,
retrieval_prefix,
knn_map_path[i],
)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights, train_n)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_n)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights, test_n)
return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
def _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix: str,
data_impl: str,
splits_string: str,
train_valid_test_num_samples,
seq_length: int,
seed: int,
skip_warmup: bool,
tokenizer,
retrieval_prefix: str,
knn_map_path: str,
):
"""Build train, valid, and test datasets."""
# Indexed dataset.
indexed_dataset: MMapRetrievalIndexedDataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup)
knn_index: KNNIndex = KNNIndex(knn_map_path, skip_warmup)
retrieval_index: MMapRetrievalIndexedDataset = get_indexed_dataset_(retrieval_prefix, data_impl, skip_warmup)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32)
dataset = RETRODataset(
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
train_valid_test_num_samples[index],
seq_length,
seed,
knn_index,
retrieval_index,
)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
class MockRETRODataset(torch.utils.data.Dataset):
def __init__(self, cfg, trainer, tokenizer, name, size):
super().__init__()
self.name = name
self.tokenizer = tokenizer
self._cfg = cfg
self.size = size
seed_val = parallel_state.get_data_parallel_rank() * 131 + 97
torch.manual_seed(seed_val)
def __len__(self):
return self.size
def __getitem__(self, idx):
vocab_size = self.tokenizer.vocab_size
neighbors = self._cfg.data.neighbors
input_length = self._cfg.data.seq_length
chunks = input_length // self._cfg.chunk_size
chunk_size = self._cfg.chunk_size
pad_id = self.tokenizer.pad_id
all_tokens = torch.randint(0, vocab_size, (input_length + 1,))
# make sure the eod happens at the end of each chunk, can add paddings to it
# e.g. [..., id, id, pad, pad, pad, eod] each has chunk_size, each sentence
# has length of multiple of chunk_size
hidden = all_tokens[:-1]
labels = all_tokens[1:]
hidden_mask = hidden != pad_id
# to mask out the token ids [id, id, eod, id, pad, eod, id, id]
# so attention is not across eod, mask should be:
# [false, true, true, true, true, true, true, true]
# [false, false, true, true, true, true, true, true]
# [false, false, false,true, true, true, true, true]
# [true, true, true, false, true, true, true, true]
# [true, true, true, true, true, true, true, true]
# [true, true, true, false, true, false, true, true]
# [true, true, true, true, true, true, false, true]
# [true, true, true, true, true, true, false, false]
retrieved = torch.randint(0, vocab_size, (chunks, neighbors, 2 * chunk_size))
context_mask = retrieved != pad_id
return {
'tokens': hidden,
'labels': labels,
'tokens_mask': hidden_mask,
'loss_mask': hidden_mask,
'retrieved_emb_mask': context_mask,
'retrieved_ids': retrieved,
}
def build_mock_train_valid_test_datasets(
cfg, trainer, splits_string, tokenizer, mock_data_size,
):
"""Build train, valid, and test datasets."""
splits = get_train_valid_test_split_(splits_string, mock_data_size)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
dataset = MockRETRODataset(cfg, trainer, tokenizer, name, splits[index + 1] - splits[index],)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/retro_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Blendable dataset."""
import time
import numpy as np
import torch
from nemo.utils import logging
from nemo.utils.app_state import AppState
class BlendableDataset(torch.utils.data.Dataset):
def __init__(self, datasets, weights, size):
self.datasets = datasets
num_datasets = len(datasets)
assert num_datasets == len(weights)
self.size = size
# Normalize weights.
weights = np.array(weights, dtype=np.float64)
sum_weights = np.sum(weights)
assert sum_weights > 0.0
weights /= sum_weights
# Build indecies.
start_time = time.time()
assert num_datasets < 255
self.dataset_index = np.zeros(self.size, dtype=np.uint8)
self.dataset_sample_index = np.zeros(self.size, dtype=np.int64)
app_state = AppState()
try:
if app_state.local_rank == 0:
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import compile_helper
compile_helper()
torch.distributed.barrier()
from nemo.collections.nlp.data.language_modeling.megatron import helpers
except ImportError:
raise ImportError(
f'Could not compile megatron dataset C++ helper functions and therefore cannot import helpers python file.'
)
helpers.build_blending_indices(
self.dataset_index,
self.dataset_sample_index,
weights,
num_datasets,
self.size,
torch.distributed.get_rank() == 0,
)
logging.info(
'> elapsed time for building blendable dataset indices: ' '{:.2f} (sec)'.format(time.time() - start_time)
)
def __len__(self):
return self.size
def __getitem__(self, idx):
dataset_idx = self.dataset_index[idx]
sample_idx = self.dataset_sample_index[idx]
return self.datasets[dataset_idx][sample_idx]
def create_data_mmap(self):
for dataset in self.datasets:
dataset.create_data_mmap()
class MemoryEfficientBlendableDataset(torch.utils.data.Dataset):
"""
A BlendableDataset implementation that uses less memory than the original implementation.
Indices are computed algorithmically instead of storing them in memory.
To test call: MemoryEfficientBlendableDataset.test_index_blending()
"""
def __init__(self, datasets, weights, size, weight_bins=100):
self.datasets = datasets
num_datasets = len(datasets)
assert num_datasets == len(weights)
weight_bins = min(weight_bins, size)
self.size = size
self.weight_bins = weight_bins
# Normalize weights.
weights = np.array(weights, dtype=np.float64)
assert (weights > 0.0).all()
sum_weights = np.sum(weights)
assert sum_weights > 0.0
self.weights = weights / sum_weights
# create ds index based on weights
ds_index = []
ds_bias = []
for i, w in enumerate(self.weights):
n = int(w * weight_bins)
ds_index.extend([i] * n)
ds_bias.extend(range(n))
# make sure arrays have length of weight_bins
n = weight_bins - len(ds_index)
ds_index.extend([i] * n)
ds_bias.extend(range(ds_bias[-1], ds_bias[-1] + n))
self.ds_index = np.array(ds_index, dtype=np.uint32)
self.ds_index_size = np.array([(self.ds_index == i).sum() for i in range(num_datasets)], dtype=np.uint32)
assert (
self.ds_index_size > 0
).all(), f"Some datasets have no samples in the blendable dataset, increase weight_bins or the offending weight. ds_index_size = {self.ds_index_size}"
self.ds_bias = np.array(ds_bias, dtype=np.uint32)
self.ds_size = np.array([len(ds) for ds in datasets], dtype=np.uint32)
def get_ds_sample_idx(self, idx):
"""Returns ds index and sample index (within the ds) for the given index in the blendable dataset."""
bin = idx % self.weight_bins
ds_idx = self.ds_index[bin]
sample_idx = (self.ds_bias[bin] + (idx // self.weight_bins) * self.ds_index_size[ds_idx]) % self.ds_size[
ds_idx
]
return ds_idx, sample_idx
def __len__(self):
return self.size
def __getitem__(self, idx):
ds_idx, sample_idx = self.get_ds_sample_idx(idx)
return self.datasets[ds_idx][sample_idx]
@classmethod
def test_index_blending(cls):
"""Visualize indices of blended dataset"""
import matplotlib.pyplot as plt
plt.ion()
class DS(torch.utils.data.Dataset):
def __init__(self, size, data):
self.size = size
self.data = data
def __len__(self):
return self.size
def __getitem__(self, idx):
return self.data[idx]
for weight_bins in [10, 100]:
blend_ds = MemoryEfficientBlendableDataset(
[DS(10, "a"), DS(10, "b"), DS(10, "c")], [0.5, 0.3, 0.2], 50, weight_bins=weight_bins
)
ds_sample_idx_list = [blend_ds.get_ds_sample_idx(i) for i in range(50)]
ds_list = list(zip(*ds_sample_idx_list))[0]
sample_list = list(zip(*ds_sample_idx_list))[1]
plt.figure()
plt.plot(ds_list, label="ds idx")
plt.plot(sample_list, label="sample")
plt.legend()
plt.grid()
plt.title(f"weight_bins={weight_bins}")
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/blendable_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import GPTDataset
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
class T5LMAdaptedDataset(GPTDataset):
"""
Dataset for unlearning span corruption (https://arxiv.org/abs/2104.08691) in T5 models.
Corresponds to the prefix-LM objective in the T5 paper (Table 3 in https://arxiv.org/abs/1910.10683).
"""
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
seed,
max_seq_length_encoder,
max_seq_length_decoder,
**kwargs,
):
self.max_seq_length_encoder = max_seq_length_encoder
self.max_seq_length_decoder = max_seq_length_decoder
self.seed = seed
self.tokenizer = tokenizer
super().__init__(
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
self.max_seq_length_encoder
+ self.max_seq_length_decoder
+ 1, # +1 because the decoder sequence gets truncated by one due to shifting to for teacher-forcing.
seed,
)
@classmethod
def get_prefix_lm_sample(
cls,
sample,
max_seq_length_encoder,
max_seq_length_decoder,
np_rng,
tokenizer,
pivot_mean=0.25,
pivot_distribution=LengthDistribution.uniform,
add_eos=False,
):
# get random split index
if pivot_distribution == LengthDistribution.truncated_normal and (pivot_mean < 0.0 or pivot_mean > 1.0):
raise ValueError(
f"Invalid pivot_mean: {pivot_mean}. Must be in [0.0, 1.0]. It is a fraction of the encoder sequence length."
)
# 1) If the sample is larger than max encoder sequence length, use max encoder sequence length
# 2) Otherwwise use sample length - 1 so that there is at least one token on the decoder.
max_split_idx = min(len(sample) - 1, max_seq_length_encoder)
if pivot_distribution == LengthDistribution.uniform:
split_idx = np_rng.randint(0, max_split_idx)
elif pivot_distribution == LengthDistribution.truncated_normal:
loc = pivot_mean * max_split_idx
split_idx = np.clip(int(np_rng.normal(loc=loc, scale=loc)), 0, max_split_idx,)
else:
raise ValueError(f"Invalid pivot_distribution: {pivot_distribution}")
# Encoder inputs get truncated based on the split indx
tokens_enc = np.concatenate(
[sample[:split_idx], [tokenizer.pad_id] * (max_seq_length_encoder - split_idx)]
).astype(np.int64)
# The decoder sequence is never truncated and is always of max decoder length.
offset = 1 if add_eos else 0
tokens_dec = sample[split_idx : split_idx + max_seq_length_decoder - offset]
# NOTE: Add bos only and not eos because the model will always generate till max seq length.
example = np.concatenate([[tokenizer.bos_id], tokens_dec])
if add_eos:
example = np.concatenate([example, [tokenizer.eos_id]])
# Example can be + 1 over sequence length at this point since we'll be shifting by 1 to create the inputs and outputs to the decoder.
assert len(example) <= max_seq_length_decoder + 1
tokens_dec = np.concatenate(
[example, [tokenizer.pad_id] * (max_seq_length_decoder - len(example) + 1)]
).astype(np.int64)
# Shift sequences for teacher forcing
tokens_dec_in = tokens_dec[:-1]
labels = tokens_dec[1:]
# Create attention masks
enc_mask = (tokens_enc != tokenizer.pad_id).astype(np.int64)
dec_mask = (tokens_dec_in != tokenizer.pad_id).astype(np.int64)
loss_mask = dec_mask
train_sample = {
'text_enc': tokens_enc,
'text_dec': tokens_dec_in,
'labels': labels,
'loss_mask': loss_mask,
'enc_mask': enc_mask,
'dec_mask': dec_mask,
}
return train_sample
def __getitem__(self, idx):
text = super()._get_text(idx)
np_rng = np.random.RandomState(seed=(self.seed + idx))
sample = T5LMAdaptedDataset.get_prefix_lm_sample(
sample=text,
max_seq_length_encoder=self.max_seq_length_encoder,
max_seq_length_decoder=self.max_seq_length_decoder,
np_rng=np_rng,
tokenizer=self.tokenizer,
pivot_distribution=LengthDistribution.uniform,
)
return sample
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/lm_adapted_t5_dataset.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pickle
import torch
from tqdm.auto import tqdm
from nemo.collections.nlp.modules.common import VirtualPromptSource
from nemo.collections.nlp.modules.common.megatron.utils import build_position_ids
from nemo.core import Dataset
from nemo.utils import AppState, logging
__all__ = ['GPTPromptLearningDataset']
class GPTPromptLearningDataset(Dataset):
"""
The dataset class for prompt-tuning or p-tuning pretrained GPT models.
Args:
data (list[strings], list[dicts]): (1) paths to .jsonl or .json files, (2) dict objects corresponding to each input example
tokenizer (tokenizer): Tokenizer from frozen language model
virtual_prompt_source (Enum): Either VirtualPromptSource.NO_PROMPTS or VirtualPromptSource.PROMPT_ENCODER
task_templates (dict): Dictionary containing all task template information needed to format prompts. Created in the GPTPromptLearningModel class.
pseudo_tokens (list[strings]): A list of virtual prompt token placeholders e.g [<prompt_1>, <prompt_2>, ...] up to max num virtual tokens
pad_token_id (int): ID of pad token from tokenizer
max_seq_length (int): maximum sequence length for each dataset examples. Examples will either be truncated to fit this length or dropped if they cannot be truncated.
min_seq_length (int): min length of each data example in the dataset. Data examples will be dropped if they do not meet the min length requirements.
add_bos (bool): Whether to add a beginning of sentence token to each data example
add_eos (bool): Whether to add an end of sentence token to each data example
for_train (bool): Whether you're creating a dataset for training or inference
tokens_to_generate (int): (inference only) Number of tokens to generate during inference
"""
def __init__(
self,
data,
tokenizer,
virtual_prompt_source: VirtualPromptSource,
task_templates: dict,
pseudo_tokens,
pad_token_id: int,
max_seq_length: int,
min_seq_length: int = 1,
add_bos: bool = False,
add_eos: bool = True,
for_train: bool = True,
tokens_to_generate=None,
cache_data_path: str = None, # the cache file
load_cache: bool = True, # whether to load from the cache if it is available
):
self.tokenizer = tokenizer
self.virtual_prompt_source = virtual_prompt_source
self.task_templates = task_templates
self.pseudo_tokens = pseudo_tokens
self.pseudo_token_ids = set(self.tokenizer.tokens_to_ids(self.pseudo_tokens))
self.pad_token_id = pad_token_id
self.max_seq_length = max_seq_length
self.min_seq_length = min_seq_length
self.add_bos = add_bos
self.add_eos = add_eos
self.for_train = for_train
self.examples = []
if not self.for_train:
self.tokens_to_generate = tokens_to_generate
assert self.min_seq_length <= max_seq_length, "Min sequence length should be less than or equal to max"
assert self.max_seq_length > 0, "Max sequence length should be greater than 0"
logging.info("Loading and tokenizing dataset ... ")
if load_cache and cache_data_path is not None and os.path.exists(cache_data_path):
# load it from the cache
logging.info(f'load the data from the cache file {cache_data_path}')
with open(cache_data_path, 'rb') as f:
self.examples = pickle.load(f)
else:
# Data is just a list of dicts already loaded from a json file or passed in directly as a dict
if isinstance(data[0], dict):
self.load_data(data)
# Datasets are a list of file path strings to .json or .jsonl files
elif isinstance(data[0], str):
for path in data:
dataset = open(path, 'r', encoding='utf-8')
self.load_data(dataset)
else:
raise ValueError("Datasets must be a list of filepath strings or a list of data example dicts")
if cache_data_path is not None:
# the first worker save the results into the cache file
app_state = AppState()
if app_state._global_rank == 0:
with open(cache_data_path, 'wb') as f:
pickle.dump(self.examples, f)
logging.info(f'save the data to the cache file {cache_data_path}')
def load_data(self, dataset):
"""
Loads a dataset by filling in the task templates specified in the config file
with the information from each training/inference example. Converts all input
text into token ids. Also replaces the <|VIRTUAL_PROMPT_#|> placeholders in
the task templates with the actual virtual prompt token ids.
params:
dataset: A list of json objects or a dictionary objects each
containing the information needed for a training example
"""
skipped = 0
for json_line in tqdm(dataset):
# Read example dict or load the information for a single example from .json file
if type(json_line) == dict:
doc = json_line
else:
doc = json.loads(json_line)
taskname = doc["taskname"]
prompt_template = self.task_templates[taskname]["prompt_template"]
prompt_template_fields = self.task_templates[taskname]["prompt_template_fields"]
total_virtual_tokens = self.task_templates[taskname]["total_virtual_tokens"]
virtual_token_splits = self.task_templates[taskname]["virtual_token_splits"]
truncation_field = self.task_templates[taskname]['truncate_field']
answer_only_loss = self.task_templates[taskname]["answer_only_loss"]
answer_field = self.task_templates[taskname]["answer_field"]
input_example = prompt_template
self._input_sanity_checks(
total_virtual_tokens,
virtual_token_splits,
prompt_template,
prompt_template_fields,
truncation_field,
answer_only_loss,
answer_field,
doc,
)
# Format the input example according to the template
input_example = self._insert_text_in_template(input_example, prompt_template_fields, doc)
input_example = self._insert_virtual_token_placeholders(input_example, virtual_token_splits)
input_ids = self.tokenizer.text_to_ids(input_example)
# Add BOS/EOS if desired, adds EOS by default
if self.add_bos:
input_ids = [self.tokenizer.bos_id] + input_ids
if self.add_eos:
input_ids = input_ids + [self.tokenizer.eos_id]
# Try to truncate input text to fit into the max sequence length
if len(input_ids) > self.max_seq_length:
input_ids = self._truncate_input(
truncation_field,
input_ids,
taskname,
doc,
prompt_template,
prompt_template_fields,
virtual_token_splits,
)
# Skip example if the final length doesn't fit length requirements even after truncation
if self.min_seq_length <= len(input_ids) <= self.max_seq_length:
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
taskname_id = self.tokenizer.text_to_ids(taskname)
elif self.virtual_prompt_source == VirtualPromptSource.NO_PROMPT:
taskname_id = -1
else:
raise ValueError("Invalid virtual prompt source specified")
# Find answer field indices if training and answer_only_loss is True
answer_start_idx = None
if answer_only_loss and self.for_train:
answer_start_idx = self._find_answer_start(taskname, input_ids, answer_field, doc)
self.examples.append((taskname_id, input_ids, answer_start_idx))
else:
skipped += 1
logging.info(f'Skipped {skipped} sentences, sequence length too short or too long even after truncation')
def _input_sanity_checks(
self,
total_virtual_tokens,
virtual_token_splits,
prompt_template,
prompt_template_fields,
truncation_field,
answer_only_loss,
answer_field,
doc,
):
# Sanity check amount of virtual token
assert (
total_virtual_tokens < self.max_seq_length
), "virtual prompt tokens should not exceed max sequence length"
# Make sure virtual token splits add up to the total number of virtual tokens
assert (
sum(virtual_token_splits) == total_virtual_tokens
), "Sum of prompt token split values must equal total number of prompt tokens"
# Make sure number of virtual prompt locations match the number of virtual prompt splits
assert prompt_template.count('<|VIRTUAL_PROMPT_') == len(
virtual_token_splits
), "The number of '<|VIRTUAL_PROMPT_n|>' markers and the number of prompt token splits must match"
# Check if input example has fields not present in template
keys_not_in_template = list(set(doc.keys()) - set(prompt_template_fields) - set(['taskname']))
assert (
len(keys_not_in_template) == 0
), f"Examples in your dataset contain the fields: {keys_not_in_template} that are not in the task template."
# Answer field checks
if answer_only_loss and self.for_train:
assert answer_field is not None, "If answer_only_loss=True, an answer_field must be given"
assert (
answer_field in doc.keys()
), f"answer_only_loss=True but the given answer_field '{answer_field}' is not in data json"
assert truncation_field != answer_field, "Answer field and truncation field should not match"
answer_placeholder = "{" + answer_field + "}"
answer_placeholder_len = len(answer_placeholder)
placeholder_start = len(prompt_template) - answer_placeholder_len
assert prompt_template[placeholder_start:] == answer_placeholder, "Answer field must be at prompt end"
def _insert_text_in_template(self, input_example, prompt_template_fields, doc):
""" Format the input example according to the template """
for field in prompt_template_fields:
if field in doc.keys():
field_text = doc[field]
input_example = input_example.replace('{' + field + '}', field_text)
# If some fields from the template aren't present, e.g. {answer} during inference
# just remove that field from the template, leaving the space blank
else:
input_example = input_example.replace('{' + field + '}', "")
return input_example.strip(" ")
def _insert_virtual_token_placeholders(self, input_example, virtual_token_splits):
""" Insert the correct number of pseudo tokens at the <|VIRTUAL_PROMPT_n|> markers """
total_inserted_tokens = 0
for idx in range(len(virtual_token_splits)):
split_start = total_inserted_tokens
split_end = total_inserted_tokens + virtual_token_splits[idx]
pseudo_tokens_for_split = "".join(self.pseudo_tokens[split_start:split_end])
input_example = input_example.replace(f'<|VIRTUAL_PROMPT_{idx}|>', pseudo_tokens_for_split)
total_inserted_tokens = split_end
return input_example
def _truncate_input(
self, truncation_field, input_ids, taskname, doc, prompt_template, prompt_template_fields, virtual_token_splits
):
""" Try to truncate input text to fit into the max sequence length """
logging.info(
f"Input greater than max sequence length. Attempting to truncate: '{truncation_field}' in task: '{taskname}'"
)
# Truncate the text ids in this part of input to try and fit max sequence length
if truncation_field is not None and truncation_field in doc.keys():
truncation_length = (len(input_ids) - self.max_seq_length) + 1
field_text = doc[truncation_field]
# Truncate field text
field_text_ids = self.tokenizer.text_to_ids(field_text)
truncated_text_ids = field_text_ids[: -min(truncation_length, len(field_text_ids))]
truncated_field_text = self.tokenizer.ids_to_text(truncated_text_ids)
doc[truncation_field] = truncated_field_text
# Re-insert the truncated text string into the text prompt
input_example = prompt_template
input_example = self._insert_text_in_template(input_example, prompt_template_fields, doc)
input_example = self._insert_virtual_token_placeholders(input_example, virtual_token_splits)
# Re-tokenize the whole prompt
input_ids = self.tokenizer.text_to_ids(input_example)
return input_ids
def _find_answer_start(self, taskname, input_ids, answer_field, doc):
""" Find the token ids corresponding to the answer start, for loss masking purposes.
Assumes the answer is always at the end of the prompt.
"""
answer_text = doc[answer_field]
answer_text = self._add_leading_space(taskname, answer_field, answer_text)
answer_text_ids = self.tokenizer.text_to_ids(answer_text)
num_answer_text_ids = len(answer_text_ids)
if self.add_eos:
num_answer_text_ids += 1
answer_start_idx = len(input_ids) - num_answer_text_ids
return answer_start_idx
def _add_leading_space(self, taskname, field_name, field_text):
""" Add leading space to text if there is a space before it in the template """
prompt_template = self.task_templates[taskname]["prompt_template"]
field_text_start = prompt_template.find("{" + field_name + "}")
if field_text_start != 0 and prompt_template[field_text_start - 1] == " ":
field_text = " " + field_text
return field_text
def __len__(self):
return len(self.examples)
def __getitem__(self, idx):
return self.examples[idx]
def _ceil_to_nearest(self, n, m):
return (n + m - 1) // m * m
def collate_fn(self, batch, tp_workers=0):
""" Prepares input_ids, labels, loss mask, attention_mask, and position ids for global batch """
taskname_ids, input_ids, answer_starts = zip(*batch)
# Pad taskname_ids to be the same length for the prompt encoder
if self.virtual_prompt_source == VirtualPromptSource.PROMPT_ENCODER:
max_taskname_length = max(len(ids) for ids in taskname_ids)
taskname_ids = [ids + [self.pad_token_id] * (max_taskname_length - len(ids)) for ids in taskname_ids]
taskname_ids = torch.tensor(taskname_ids)
# Task ids are just used for a look up embeddings for prompt-table
elif self.virtual_prompt_source == VirtualPromptSource.NO_PROMPT:
taskname_ids = torch.tensor(taskname_ids)
# Get max sequence length of batch
batch_max = max(len(ids) for ids in input_ids)
if tp_workers > 1:
# more sure the sequence length is multiply of number of tp_workers, needed for sequence parallel.
resi_padding = (tp_workers - (batch_max - 1) % tp_workers) % tp_workers
else:
resi_padding = 0
batch_max += resi_padding
ceil_batch_max = self._ceil_to_nearest(
batch_max, 8
) # @adithyare this padding does not conflict with the tp_workers padding above
# since tp_workers is always a multiple of 2. the padding to multiple of 8 is to ensure an mem-optimized softmax is used.
batch_max = ceil_batch_max + 1
input_ids, loss_mask = self.pad_batch_and_build_loss_mask(input_ids, batch_max, answer_starts)
# Should be a label for every token in batch, label is the next token
labels = input_ids[:, 1:].contiguous()
input_ids = input_ids[:, :-1].contiguous()
batch_max -= 1 # @adithyare I *think* this negatition is done to account for the above 2 lines which removes one item from the input_ids seq.
# Loss mask should align with labels
loss_mask = loss_mask[:, 1:].contiguous()
# Using causal attention mask for whole input
batch_size = len(input_ids)
attention_mask = torch.tril(torch.ones((batch_size, batch_max, batch_max))).view(
batch_size, 1, batch_max, batch_max
)
# Convert attention mask from float to bool
attention_mask = attention_mask < 0.5
position_ids = build_position_ids(input_ids)
return input_ids, labels, loss_mask, position_ids, attention_mask, taskname_ids
def pad_batch_and_build_loss_mask(self, input_ids, batch_max, answer_starts):
""" Pad input_ids in batch to max batch length while building loss mask """
batch_loss_masks = []
padded_input_ids = []
for ids, answer_start_idx in zip(input_ids, answer_starts):
if answer_start_idx is not None:
# Loss mask where answer tokens are 1.0 and all other tokens are 0.0
loss_mask = [float(idx >= answer_start_idx) for idx in range(len(ids))]
else:
# Loss mask where virtual tokens are 0.0 and all other tokens are 1.0
loss_mask = [float(token_id not in self.pseudo_token_ids) for token_id in ids]
# Pad to max length
input_length = len(ids)
padding_length = batch_max - input_length
pad_extend = [self.pad_token_id] * padding_length
ids = ids + pad_extend
padded_input_ids.append(ids)
# Account for padding in loss mask
loss_mask.extend([0.0] * padding_length)
batch_loss_masks.append(torch.tensor(loss_mask, dtype=torch.float))
# Make into torch tensors
padded_input_ids = torch.tensor(padded_input_ids, dtype=torch.long)
batch_loss_masks = torch.stack(batch_loss_masks)
return padded_input_ids, batch_loss_masks
def inference_collate_fn(self, batch):
"""
Used for loading inference data.
"""
task_id_nums, input_ids, answer_starts = zip(*batch)
input_lengths = torch.cuda.LongTensor([len(inputs) for inputs in input_ids])
task_id_nums = torch.cuda.LongTensor(task_id_nums)
batch_max = input_lengths.max().item()
batch_max += self.tokens_to_generate
input_ids, _ = self.pad_batch_and_build_loss_mask(input_ids, batch_max, answer_starts)
input_ids = input_ids.cuda()
input_ids = torch.cuda.LongTensor(input_ids)
return task_id_nums, (input_ids, input_lengths)
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/gpt_prompt_learning_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT Style dataset."""
import os
from typing import Any, Optional
import numpy as np
import torch
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import (
create_masked_lm_predictions,
create_tokens_and_tokentypes,
get_a_and_b_segments,
get_samples_mapping,
truncate_segments,
)
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import MMapIndexedDataset
class BertDataset(torch.utils.data.Dataset):
def __init__(
self,
cfg: dict,
name: str,
indexed_dataset: MMapIndexedDataset,
data_prefix: str,
num_epochs: Optional[int],
max_num_samples: int,
masked_lm_prob: float,
max_seq_length: int,
short_seq_prob: float,
seed: int,
binary_head: bool,
tokenizer: Any,
):
# Params to store.
self.name = name
self.seed = seed
self.masked_lm_prob = masked_lm_prob
self.max_seq_length = max_seq_length
self.binary_head = binary_head
# Dataset.
self.indexed_dataset = indexed_dataset
# save index mappings to a configurable dir
self.index_mapping_dir = cfg.data.get('index_mapping_dir', None)
# create index_mapping_dir on rank 0
if torch.distributed.is_available() and torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.index_mapping_dir is not None and not os.path.isdir(self.index_mapping_dir):
os.makedirs(self.index_mapping_dir)
torch.distributed.barrier()
# Build the samples mapping.
self.samples_mapping = get_samples_mapping(
self.indexed_dataset,
data_prefix,
num_epochs,
max_num_samples,
self.max_seq_length - 3, # account for added tokens
short_seq_prob,
self.seed,
self.name,
self.binary_head,
index_mapping_dir=self.index_mapping_dir,
)
# Vocab stuff.
self.vocab_id_list = list(tokenizer.ids_to_tokens.keys())
self.vocab_id_to_token_dict = tokenizer.ids_to_tokens
self.cls_id = tokenizer.cls_token_id
self.sep_id = tokenizer.sep_token_id
self.mask_id = tokenizer.mask_token_id
self.pad_id = tokenizer.pad_token_id
def __len__(self):
return self.samples_mapping.shape[0]
def __getitem__(self, idx):
start_idx, end_idx, seq_length = self.samples_mapping[idx]
sample = [self.indexed_dataset[i] for i in range(start_idx, end_idx)]
# Note that this rng state should be numpy and not python since
# python randint is inclusive whereas the numpy one is exclusive.
# We % 2**32 since numpy requres the seed to be between 0 and 2**32 - 1
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2 ** 32))
return build_training_sample(
sample,
seq_length,
self.max_seq_length, # needed for padding
self.vocab_id_list,
self.vocab_id_to_token_dict,
self.cls_id,
self.sep_id,
self.mask_id,
self.pad_id,
self.masked_lm_prob,
np_rng,
self.binary_head,
)
def build_training_sample(
sample,
target_seq_length,
max_seq_length,
vocab_id_list,
vocab_id_to_token_dict,
cls_id,
sep_id,
mask_id,
pad_id,
masked_lm_prob,
np_rng,
binary_head,
whole_word_masking=True,
skip_masking_id=None,
):
"""Biuld training sample.
Arguments:
sample: A list of sentences in which each sentence is a list token ids.
target_seq_length: Desired sequence length.
max_seq_length: Maximum length of the sequence. All values are padded to
this length.
vocab_id_list: List of vocabulary ids. Used to pick a random id.
vocab_id_to_token_dict: A dictionary from vocab ids to text tokens.
cls_id: Start of example id.
sep_id: Separator id.
mask_id: Mask token id.
pad_id: Padding token id.
masked_lm_prob: Probability to mask tokens.
np_rng: Random number genenrator. Note that this rng state should be
numpy and not python since python randint is inclusive for
the opper bound whereas the numpy one is exclusive.
whole_word_masking: Whether to mask only whole words instead of independent subwords.
skip_mask_id: ID of a token that should not be masked. #TODO: make this a list of tokens.
"""
if binary_head:
# We assume that we have at least two sentences in the sample
assert len(sample) > 1
assert target_seq_length <= max_seq_length
# Divide sample into two segments (A and B).
if binary_head:
tokens_a, tokens_b, is_next_random = get_a_and_b_segments(sample, np_rng)
else:
tokens_a = []
for j in range(len(sample)):
tokens_a.extend(sample[j])
tokens_b = []
is_next_random = False
# Truncate to `target_sequence_length`.
max_num_tokens = target_seq_length
truncated = truncate_segments(tokens_a, tokens_b, len(tokens_a), len(tokens_b), max_num_tokens, np_rng)
# Build tokens and toketypes.
tokens, tokentypes = create_tokens_and_tokentypes(tokens_a, tokens_b, cls_id, sep_id)
# Masking.
max_predictions_per_seq = masked_lm_prob * max_num_tokens
(tokens, masked_positions, masked_labels, _, _) = create_masked_lm_predictions(
tokens,
vocab_id_list,
vocab_id_to_token_dict,
masked_lm_prob,
cls_id,
sep_id,
mask_id,
max_predictions_per_seq,
np_rng,
whole_word_masking=whole_word_masking,
skip_masking_id=skip_masking_id,
)
# Padding.
tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np = pad_and_convert_to_numpy(
tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length
)
train_sample = {
'text': tokens_np,
'types': tokentypes_np,
'labels': labels_np,
'is_random': int(is_next_random),
'loss_mask': loss_mask_np,
'padding_mask': padding_mask_np,
'truncated': int(truncated),
}
return train_sample
def pad_and_convert_to_numpy(tokens, tokentypes, masked_positions, masked_labels, pad_id, max_seq_length):
"""Pad sequences and convert them to numpy."""
# Some checks.
num_tokens = len(tokens)
padding_length = max_seq_length - num_tokens
assert padding_length >= 0
assert len(tokentypes) == num_tokens
assert len(masked_positions) == len(masked_labels)
# Tokens and token types.
filler = [pad_id] * padding_length
tokens_np = np.array(tokens + filler, dtype=np.int64)
tokentypes_np = np.array(tokentypes + filler, dtype=np.int64)
# Padding mask.
padding_mask_np = np.array([1] * num_tokens + [0] * padding_length, dtype=np.int64)
# Lables and loss mask.
labels = [-1] * max_seq_length
loss_mask = [0] * max_seq_length
for i in range(len(masked_positions)):
assert masked_positions[i] < num_tokens
labels[masked_positions[i]] = masked_labels[i]
loss_mask[masked_positions[i]] = 1
labels_np = np.array(labels, dtype=np.int64)
loss_mask_np = np.array(loss_mask, dtype=np.int64)
return tokens_np, tokentypes_np, labels_np, padding_mask_np, loss_mask_np
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/bert_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from typing import List, Optional
import numpy as np
import torch
from datasets import load_dataset
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import get_samples_mapping
from nemo.collections.nlp.data.language_modeling.text_memmap_dataset import JSONLMemMapDataset
from nemo.core.classes import Dataset
from nemo.utils import logging
__all__ = ['GPTSFTDataset']
class GPTSFTDataset(Dataset):
def __init__(
self,
file_path: str,
tokenizer: TokenizerSpec,
max_seq_length: int = 1024,
min_seq_length: int = 1,
add_bos: bool = False,
add_eos: bool = True,
add_sep: bool = False,
sep_id: int = None,
max_num_samples: int = None,
seed: int = 1234,
label_key: str = "answer",
answer_only_loss: bool = True,
truncation_field: str = "text",
pad_to_max_length: bool = False, # (@adithyare) allows for much faster training especially in PEFT settings.
index_mapping_dir: str = None,
prompt_template: str = None,
virtual_tokens: int = 0,
tokens_to_generate: int = 0,
memmap_workers: Optional[int] = None,
hf_dataset: bool = False,
truncation_method: str = 'right',
):
"""
file_path: Path to a JSONL GPT supervised fine-tuning dataset. Data is formatted as multiple JSON lines with each line formatted as follows. {'input': 'John von Neumann\nVon Neumann made fundamental contributions .... Q: What did the math of artificial viscosity do?', 'output': 'smoothed the shock transition without sacrificing basic physics'}
tokenizer: Tokenizer for the dataset. Instance of a class that inherits TokenizerSpec (ex: YTTM, SentencePiece).
max_seq_length (int): maximum sequence length for each dataset examples. Examples will either be truncated to fit this length or dropped if they cannot be truncated.
min_seq_length (int): min length of each data example in the dataset. Data examples will be dropped if they do not meet the min length requirements.
add_bos (bool): Whether to add a beginning of sentence token to each data example
add_eos (bool): Whether to add an end of sentence token to each data example
add_sep (bool): Whether to add a separation token to each data example (goes between prompt and answer)
tokens_to_generate (int): (inference only) Number of tokens to generate during inference
seed: Random seed for data shuffling.
max_num_samples: Maximum number of samples to load. This can be > dataset length if you want to oversample data. If None, all samples will be loaded.
seed: int = 1234,
label_key: Key to use for the label in your JSONL file
answer_only_loss: If True, will compute the loss only on the answer part of the input. If False, will compute the loss on the entire input.
truncation_field: Field to use for truncation. (Options: keys in prompt_template). Field to be used for truncation if the combined length exceeds the max sequence length.
pad_to_max_length: Whether to pad the input to the max sequence length. If False, will pad to the max length of the current batch.
index_mapping_dir: Directory to save the index mapping to. If None, will write to the same folder as the dataset.
prompt_template: Prompt template to inject via an fstring. Formatted like Q: {context_key}\n\nA: {label_key}
hf_dataset: Whether to load the json file with the HuggingFace dataset. otherwise, will load the jsonl file with the JSONLMemMapDataset.
truncation_method: Truncation from which position. Options: ['left', 'right']
"""
self.tokenizer = tokenizer
self.file_path = file_path
self.max_seq_length = max_seq_length
self.min_seq_length = min_seq_length
self.add_bos = add_bos
self.add_eos = add_eos
self.add_sep = add_sep
self.sep_id = sep_id
self.max_num_samples = max_num_samples
self.seed = seed
self.label_key = label_key
self.answer_only_loss = answer_only_loss
self.truncation_fields = truncation_field.split(',')
self.pad_to_max_length = pad_to_max_length
self.index_mapping_dir = index_mapping_dir
self.prompt_template = prompt_template
self.virtual_tokens = virtual_tokens
self.tokens_to_generate = tokens_to_generate
self.truncation_method = truncation_method
if hf_dataset:
self.indexed_dataset = load_dataset(
'json', data_files=file_path, cache_dir=index_mapping_dir, num_proc=memmap_workers, split='train'
)
else:
self.indexed_dataset = JSONLMemMapDataset(
dataset_paths=[file_path],
tokenizer=None,
header_lines=0,
index_mapping_dir=index_mapping_dir,
workers=memmap_workers,
)
# Validate prompt template
self._maybe_validate_prompt_template()
# Will be None after this call if `max_num_samples` is None
self._build_samples_mapping()
def _maybe_validate_prompt_template(self):
assert (
self.prompt_template is not None
), f'we need prompt_template to combine contexts and label {self.label_key}'
# When providing things like newlines in the prompt template via the CLI, they are escaped. This line unescapes them.
self.prompt_template = self.prompt_template.encode('utf-8').decode('unicode_escape')
self.prompt_template_keys = re.findall(r'{(.*?)}', self.prompt_template)
label_placeholder = f'{{{self.label_key}}}'
assert (
self.prompt_template[-len(label_placeholder) :] == label_placeholder
), f'{label_placeholder} must be at the end of prompt_template.'
# Legacy checkpoints has self.truncation_fields = ['context'] and self.prompt_template_keys = ['input', 'output']
if self.prompt_template_keys[0] == 'input' and self.truncation_fields[0] == 'context':
self.truncation_fields[0] = self.prompt_template_keys[0]
assert set(self.truncation_fields).issubset(
self.prompt_template_keys
), f'truncation_fields {self.truncation_fields} must in {self.prompt_template_keys}'
def _build_samples_mapping(self):
if self.max_num_samples is not None:
self.samples_mapping = get_samples_mapping(
indexed_dataset=self.indexed_dataset,
data_prefix=self.file_path,
num_epochs=None,
max_num_samples=self.max_num_samples,
max_seq_length=self.max_seq_length - 2,
short_seq_prob=0,
seed=self.seed,
name=self.file_path.split('/')[-1],
binary_head=False,
index_mapping_dir=self.index_mapping_dir,
)
else:
self.samples_mapping = None
def __len__(self):
if self.max_num_samples is None:
return len(self.indexed_dataset)
else:
return len(self.samples_mapping)
def __getitem__(self, idx):
if isinstance(idx, np.int64):
idx = idx.item()
if self.samples_mapping is not None:
assert idx < len(self.samples_mapping)
idx, _, _ = self.samples_mapping[idx]
if isinstance(idx, np.uint32):
idx = idx.item()
assert idx < len(self.indexed_dataset)
# idx may < 0 because we pad_samples_to_global_batch_size, e.g. id = -1
if idx < 0:
idx = len(self) + idx
try:
example = self.indexed_dataset[idx]
except Exception as e:
logging.error(f"Error while loading example {idx} from dataset {self.file_path}")
raise e
return self._process_example(example)
def _separate_template(self, prompt_template_values: List[str]):
"""
Combine contexts and label based on prompt_template into a list of strings and a list of keys.
Args:
prompt_template_values (List[str]): the list of context and label strings extrated from jsonl file with prompt_template_keys.
Returns:
template_strings (List[str]): separated prompt_template with contexts/label placeholder filled with corresponding strings
template_strings_keys (List[str]): strings point to placeholder keys or <template>
Examples:
prompt_template = 'Context: {context} Question: {question} Answer: {label}'
prompt_template_values = ['xxx', 'yyy', 'zzz']
# tokenizer.space_sensitive = True
template_strings = ['Context:', ' xxx', ' Question:', ' yyy', ' Answer:', ' zzz']
# tokenizer.space_sensitive = False
template_strings = ['Context:', ' xxx', 'Question:', 'yyy', 'Answer:', 'zzz']
template_strings_keys = ['<template>', 'context', '<template>', 'question', '<template>', 'label']
"""
placeholders = [f'{{{k}}}' for k in self.prompt_template_keys]
# placeholder to string
ph_to_s = {ph: s for ph, s in zip(placeholders, prompt_template_values)}
# placeholder to key
ph_to_k = {ph: k for ph, k in zip(placeholders, self.prompt_template_keys)}
# separate prompt_template based on '<space>{placeholder}'
# examples:
# self.prompt_template = "Context:{context} Passage: {passage}\n\nQuestion:{question} {label}"
# template_with_placeholder_separated = ['Context:', '{context}', ' Passage:', ' {passage}', '\n\nQuestion:', '{question}', ' {label}']
template_with_placeholder_separated = re.split('( *?{.+?})', self.prompt_template)
template_with_placeholder_separated = [s for s in template_with_placeholder_separated if len(s) > 0]
# remove space if we have leading space and tokenizer is not space_sensitive
# space_sensitive = True : tokenizer.text_to_tokens('A{num_spaces}B') = tokenizer.text_to_tokens('A') + tokenizer.text_to_tokens('{num_spaces}B')
# space_sensitive = False: tokenizer.text_to_tokens('A{num_spaces}B') = tokenizer.text_to_tokens('A') + tokenizer.text_to_tokens('{num_spaces-1}B')
space_sensitive = getattr(self.tokenizer, 'space_sensitive', False)
template_with_space_reduced = [
s[1:] if not space_sensitive and s[0] == ' ' else s for s in template_with_placeholder_separated
]
# convert placeholder to the corresponding string (preserve left spaces) and key
template_strings, template_strings_keys = [], []
for t in template_with_space_reduced:
placeholder = t.lstrip(' ')
left_spaces = ' ' * (len(t) - len(placeholder))
template_strings.append(left_spaces + ph_to_s.get(placeholder, placeholder))
template_strings_keys.append(ph_to_k.get(placeholder, '<template>'))
return template_strings, template_strings_keys
def _multiple_truncation(self, template_ids: List[List[int]], template_ids_keys: List[str]):
"""
Calculate total tokens and truncate multiple contexts in truncation_fields.
Args:
template_ids (List[List[int]]): the list of separate prompt_template ids.
template_ids_keys (List[str]): the list of placeholder keys or <template> (used to check key in truncation_fields).
Returns:
context_ids (List[int]): all context ids.
label_ids (List[int]): all label ids.
"""
context_ids = template_ids[:-1]
label_ids = template_ids[-1]
total_ids = (
self.virtual_tokens
+ sum(len(ids) for ids in context_ids)
+ max(len(label_ids), self.tokens_to_generate)
+ self.add_bos
+ self.add_sep
+ self.add_eos # Only training need to consider eos token
)
if total_ids > self.max_seq_length:
truncation_length_total = total_ids - self.max_seq_length
num_fields = len(self.truncation_fields)
# sorted equal divide length to each field
# examples:
# truncation_length_total = 3
# num_fields = 11
# truncation_length_list = [3,4,4]
truncation_length_list = [
truncation_length_total // num_fields + (1 if i < truncation_length_total % num_fields else 0)
for i in range(num_fields)[::-1]
]
for i, (ids, key) in enumerate(zip(template_ids, template_ids_keys)):
if key in self.truncation_fields:
truncation_length = truncation_length_list.pop()
assert len(ids) >= truncation_length, f'{key} is not long enough to truncate.'
if self.truncation_method == 'left':
window_offset = truncation_length
elif self.truncation_method == 'right':
window_offset = 0
else:
raise ValueError(f'{self.truncation_method} is not supported')
window_length = len(ids) - truncation_length
template_ids[i] = ids[window_offset : window_offset + window_length]
context_ids = [i for ids in template_ids[:-1] for i in ids]
label_ids = template_ids[-1]
return context_ids, label_ids
def _process_example(self, example):
"""
Create an example by concatenating text and answer.
Truncation is carried out when needed, but it is performed only on the prompt side.
BOS, EOS, and SEP, are added if specified.
"""
prompt_template_values = [example[c].strip(' ') for c in self.prompt_template_keys]
template_strings, template_strings_keys = self._separate_template(prompt_template_values)
template_ids = [self.tokenizer.text_to_ids(s) for s in template_strings]
context_ids, answer_ids = self._multiple_truncation(template_ids, template_strings_keys)
if self.virtual_tokens:
# (@adithyare) we are going to insert "pad/eos" tokens in the beginning of the text and context
# these pad/eos tokens are placeholders for virtual tokens
context_ids = [self.tokenizer.eos_id] * self.virtual_tokens + context_ids
input_ids = context_ids
answer_start_idx = len(input_ids)
# Adds bos token in the start
if self.add_bos:
context_ids = [self.tokenizer.bos_id] + context_ids
input_ids = [self.tokenizer.bos_id] + input_ids
answer_start_idx += 1
# Adds sep token between text/prompt and answer
if self.add_sep:
context_ids = context_ids + [self.sep_id]
input_ids = input_ids + [self.sep_id]
answer_start_idx += 1
input_ids = input_ids + answer_ids
# Only training need to consider eos token
if self.add_eos:
input_ids = input_ids + [self.tokenizer.eos_id]
if len(input_ids) > self.max_seq_length:
logging.warning(f'Input ids length {len(input_ids)} exceed max sequence length {self.max_seq_length}')
input_ids = input_ids[: self.max_seq_length]
# store metadata in dataset, in case user may have keys required in the prediction json files
metadata = {k: v for k, v in example.items() if k not in self.prompt_template_keys}
processed_example = {
'input_ids': input_ids,
'answer_start_idx': answer_start_idx,
'context_ids': context_ids,
'context_length': len(context_ids),
'answer_ids': answer_ids,
'metadata': metadata,
}
return processed_example
def _maybe_cast_to_list(self, x):
if isinstance(x, np.ndarray):
return [item.tolist() for item in x]
return x
def _ceil_to_nearest(self, n, m):
return (n + m - 1) // m * m
def _collate_item(self, item, max_length, pad_id):
item = self._maybe_cast_to_list(item)
# max_length = max([len(x) for x in item]) if item else 0
# here [0] should be tokenizer.pad_id
item = [x + [pad_id] * (max_length - len(x)) for x in item]
return item
def _build_loss_mask(self, processed_example):
""" Pad input_ids in batch to max batch length while building loss mask """
input_ids = processed_example['input_ids']
answer_start_idx = processed_example['answer_start_idx']
if self.answer_only_loss:
loss_mask = [float(idx >= answer_start_idx) for idx in range(len(input_ids))]
else:
loss_mask = [1.0] * len(input_ids)
return loss_mask
@torch.no_grad()
def _create_attention_mask(self, max_length):
"""Create `attention_mask`.
Args:
input_ids: A 1D tensor that holds the indices of tokens.
"""
# seq_length = len(input_ids)
# `attention_mask` has the shape of [1, seq_length, seq_length]
attention_mask = torch.tril(torch.ones((max_length, max_length))).unsqueeze(0)
attention_mask = attention_mask < 0.5
return attention_mask
def collate_fn(self, batch):
input_ids = [item['input_ids'][:-1] for item in batch]
labels = [item['input_ids'][1:] for item in batch]
contexts = [item['context_ids'] for item in batch]
context_lengths = torch.LongTensor([item['context_length'] for item in batch])
answers = [item['answer_ids'] for item in batch]
loss_mask = [self._build_loss_mask(item)[1:] for item in batch]
metadata = [item['metadata'] for item in batch]
max_length = max(max([len(x) for x in input_ids]), max([len(x) for x in contexts]) + self.tokens_to_generate)
# increase max length to nearest multiple of 4 or 8
if self.pad_to_max_length:
max_length = self.max_seq_length
else:
max_length = min(self.max_seq_length, self._ceil_to_nearest(max_length, 8))
assert max_length <= self.max_seq_length
attention_mask = [self._create_attention_mask(max_length) for _ in batch]
attention_mask = torch.stack(attention_mask)
position_ids = [list(range(max_length)) for _ in batch]
position_ids = torch.LongTensor(position_ids)
input_ids = torch.LongTensor(
self._collate_item(input_ids, max_length=max_length, pad_id=self.tokenizer.eos_id)
)
labels = torch.LongTensor(self._collate_item(labels, max_length=max_length, pad_id=self.tokenizer.eos_id))
loss_mask = torch.LongTensor(self._collate_item(loss_mask, max_length=max_length, pad_id=0))
contexts = torch.LongTensor(self._collate_item(contexts, max_length=max_length, pad_id=self.tokenizer.eos_id))
answers = torch.LongTensor(self._collate_item(answers, max_length=max_length, pad_id=self.tokenizer.eos_id))
processed_batch = {
'tokens': input_ids,
'labels': labels,
'attention_mask': attention_mask,
'loss_mask': loss_mask,
'position_ids': position_ids,
'contexts': contexts,
'context_lengths': context_lengths,
'answers': answers,
'metadata': metadata,
}
return processed_batch
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import torch
from nemo.collections.common.tokenizers.sentencepiece_tokenizer import SentencePieceTokenizer
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.language_modeling.megatron.gpt_sft_dataset import GPTSFTDataset
from nemo.utils import logging
__all__ = ['GPTSFTChatDataset']
IGNORE_INDEX = -100
END_SIGNAL = "\n"
END_NAME_SIGNAL = "\n"
SYSTEM_TOKEN = "<extra_id_0>System\n"
TURN_TOKEN = "<extra_id_1>"
TYPE_INSTRUCTION = {
'TEXT_TO_VALUE': "",
'VALUE_TO_TEXT': '',
}
def _mask_targets(
target,
tokenized_lens,
speakers,
header_len,
s_ids,
tokenizer,
mask_role,
gtype,
extra_id_2_token_id,
new_line_token_id,
):
""" This function masks the tokens so the loss is computed only on the non-masked role's responses.
For 'TEXT_TO_VALUE' type, the loss is computed on the value attributes.
Args:
target (Tensor): input ids
tokenized_lens (List[int]): array of lengths of each turns
speakers (List[str]): array of speakers of each turns
header_len (int): the system prompt length
s_ids (List[Tensor]): array of tokenized ids of each turns
tokenizer (TokenizerSpec): tokenizer object
mask_role (str): the speaker id to be masked from loss computation
gtype (str): either 'TEXT_TO_VALUE' or 'VALUE_TO_TEXT'
extra_id_2_token_id (int): <extra_id_2> token id
new_line_token_id (int): new line token id
"""
cur_idx = header_len
tgt_len = target.shape[0]
for i, (tokenized_len, speaker, s_id) in enumerate(zip(tokenized_lens, speakers, s_ids)):
# note, sentence piece will add extra empty token in front. has to compute the diff
id1 = tokenizer.text_to_ids("<extra_id_1>")
id2 = tokenizer.text_to_ids("<extra_id_1>" + TURN_TOKEN + speaker + END_NAME_SIGNAL)
skip_name_len = len(id2) - len(id1)
if extra_id_2_token_id is None:
raise ValueError("extra_id_2 is not in the vocabulary")
if (s_id == extra_id_2_token_id).any().item():
if gtype == 'VALUE_TO_TEXT':
# if contains the token <extra_id_2>
assert skip_name_len == torch.where((s_id == extra_id_2_token_id))[0].item()
# find new line token id 14
more_skip_len = torch.where((s_id[skip_name_len:] == new_line_token_id))[0][0].item() + 1
skip_name_len += more_skip_len
elif gtype == 'TEXT_TO_VALUE':
skip_name_len = torch.where((s_id == extra_id_2_token_id))[0].item() + 1
if cur_idx >= tgt_len:
break
elif cur_idx + tokenized_len < tgt_len:
# Check whether the mask is applied to the correct position, the first token is turn token: <extra_id_1>
# s_id[2:] skips the artifact empty token and the turn token
# target[cur_idx + 1:cur_idx + tokenized_len] skip the turn token
if not torch.equal(target[cur_idx + 1 : cur_idx + tokenized_len], s_id[1:]):
logging.warning("a sentence mismatches the corresponding piece " "in the conversation")
if i == 0 and (gtype == 'VALUE_TO_TEXT' or gtype is None):
# mask the first turn completely to provide at least one turn as context
target[cur_idx : cur_idx + tokenized_len] = IGNORE_INDEX
elif speaker == mask_role and i == 1 and gtype == 'TEXT_TO_VALUE':
# leave the first human tag unmasked
target[cur_idx + 1 : cur_idx + tokenized_len] = IGNORE_INDEX
elif speaker == mask_role and (i > 1):
# leave the first human tag unmasked
target[cur_idx + 1 : cur_idx + tokenized_len] = IGNORE_INDEX
elif speaker == mask_role and (i <= 1):
# mask out everything in the second turn
target[cur_idx : cur_idx + tokenized_len] = IGNORE_INDEX
else:
# mask up to the name end, need to remove one as skip name has an extra artifact empty token
target[cur_idx : cur_idx + skip_name_len] = IGNORE_INDEX
cur_idx += tokenized_len
def cannonical_form_formater(cannoical_form):
return f'<extra_id_2>{cannoical_form}\n'
def response_value_formater(label):
if isinstance(label, str):
return '<extra_id_2>' + label + '\n'
elif label is None:
return ''
else:
raise ValueError(f'Unknown label type {type(label)}, only str type is supported')
def _add_speaker_and_signal(header, source, mask_role, gtype):
"""Add speaker and start/end signal on each round."""
BEGIN_SIGNAL = ""
conversation = header
for i, sentence in enumerate(source):
sentence_from = sentence["from"]
role_token = TURN_TOKEN
if gtype is None:
sentence["value"] = (
BEGIN_SIGNAL + role_token + sentence_from + END_NAME_SIGNAL + sentence["value"] + END_SIGNAL
)
elif gtype == "VALUE_TO_TEXT":
sentence["value"] = (
BEGIN_SIGNAL
+ role_token
+ sentence_from
+ END_NAME_SIGNAL
+ (response_value_formater(sentence['label']) if 'label' in sentence else '')
+ sentence["value"]
+ END_SIGNAL
)
elif gtype == "TEXT_TO_VALUE":
sentence["value"] = (
BEGIN_SIGNAL
+ role_token
+ sentence_from
+ END_NAME_SIGNAL
+ sentence["value"]
+ END_SIGNAL
+ (response_value_formater(sentence['label']) if 'label' in sentence else '')
)
else:
raise ValueError(
f"source type {gtype} not supported, only 'VALUE_TO_TEXT' and 'TEXT_TO_VALUE' are supported"
)
conversation += sentence["value"]
# if the last turn is not masked, add next token start token to the end, which will be included for loss calculation
if sentence_from != mask_role and i == len(source) - 1:
conversation += TURN_TOKEN
return conversation
def preprocess(source: dict, tokenizer: TokenizerSpec, extra_id_2_token_id: int, new_line_token_id: int):
"""
Given a conversation list. This transform:
1. Add signal '### ' at the beginning each sentence, with end signal '\n';
2. Concatenate conversations together;
3. Tokenize the concatenated conversation;
4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX.
"""
data_type = None
if 'type' in source:
data_type = source['type']
assert data_type in TYPE_INSTRUCTION, f"source type {data_type} not supported"
# add end signal and concatenate together
conversation = source['system']
if data_type is not None:
if TYPE_INSTRUCTION[data_type] != '':
conversation = conversation + '\n' + TYPE_INSTRUCTION[data_type]
mask_role = source.get('mask', 'User')
header = f"{SYSTEM_TOKEN}{conversation}"
conversation = _add_speaker_and_signal(header, source['conversations'], mask_role, data_type)
# tokenize conversations
input_ids = tokenizer.text_to_ids(conversation)
target = copy.deepcopy(input_ids)
header_len = len(tokenizer.text_to_ids(header))
ids = []
tokenized_lens = []
for s in source['conversations']:
if isinstance(tokenizer, SentencePieceTokenizer):
tokenized_sentence = tokenizer.text_to_ids(s["value"])
ids.append(torch.tensor(tokenized_sentence)[1:])
# remove one token as it adds an empty token in front
tokenized_lens.append(len(tokenized_sentence) - 1)
else:
tokenized_sentence = tokenizer.text_to_ids(s["value"])
ids.append(torch.tensor(tokenized_sentence))
# remove one token as it adds an empty token in front
tokenized_lens.append(len(tokenized_sentence))
speakers = [sentence["from"] for sentence in source['conversations']]
assert mask_role in speakers, "mask role not in the conversation"
target = torch.LongTensor(target)
# not going to train on the header
target[:header_len] = IGNORE_INDEX
input_ids = torch.LongTensor(input_ids)
_mask_targets(
target,
tokenized_lens,
speakers,
header_len,
ids,
tokenizer,
mask_role,
data_type,
extra_id_2_token_id,
new_line_token_id,
)
mask = (target != IGNORE_INDEX).bool()
assert mask.sum().item() != 0, "mask is empty"
# Choose the last conversation as answer other history are context
last_ignore_index_pos = torch.nonzero(target == IGNORE_INDEX)[-1].item() + 1
context_ids = input_ids[:last_ignore_index_pos]
answer_ids = input_ids[last_ignore_index_pos:]
return dict(input_ids=input_ids, mask=mask, context_ids=context_ids, answer_ids=answer_ids)
def _check_token_in_vocab(tokenizer, token):
ids = tokenizer.text_to_ids(token)
if isinstance(tokenizer, SentencePieceTokenizer):
return len(ids) == 2
else:
return len(ids) == 1
class GPTSFTChatDataset(GPTSFTDataset):
def _maybe_validate_prompt_template(self):
pass
def _build_samples_mapping(self):
super()._build_samples_mapping()
assert hasattr(self.tokenizer, "vocab"), "tokenizer should have vocab property, not supported"
assert _check_token_in_vocab(
self.tokenizer, '<extra_id_0>'
), "<extra_id_0> not in the tokenizer vocab. not supported"
assert _check_token_in_vocab(
self.tokenizer, '<extra_id_1>'
), "<extra_id_1> not in the tokenizer vocab. not supported"
# calcuilate <extra_id_2> id value
if _check_token_in_vocab(self.tokenizer, '<extra_id_2>'):
ids_1 = self.tokenizer.text_to_ids('<extra_id_1><extra_id_2>')
ids_2 = self.tokenizer.text_to_ids('<extra_id_1>')
self.extra_id_2_token_id = ids_1[len(ids_2) :][0]
else:
self.extra_id_2_token_id = None
ids_1 = self.tokenizer.text_to_ids('<extra_id_1>\n')
ids_2 = self.tokenizer.text_to_ids('<extra_id_1>')
self.new_line_token_id = ids_1[len(ids_2) :][0]
def _process_example(self, example):
"""
Create an example by concatenating text and answer.
Truncation is carried out when needed, but it is performed only on the prompt side.
BOS, EOS, and SEP, are added if specified.
"""
result = preprocess(example, self.tokenizer, self.extra_id_2_token_id, self.new_line_token_id)
# store metadata in dataset, in case user may have keys required in the prediction json files
metadata = {k: v for k, v in example.items() if k not in ['conversations']}
result['metadata'] = metadata
return result
def collate_fn(self, batch):
input_ids = [item['input_ids'][:-1].tolist() for item in batch]
labels = [item['input_ids'][1:].tolist() for item in batch]
contexts = [item['context_ids'].tolist() for item in batch]
answers = [item['answer_ids'].tolist() for item in batch]
loss_mask = [item['mask'][1:].tolist() for item in batch]
metadata = [item['metadata'] for item in batch]
max_length = max(max([len(x) for x in input_ids]), max([len(x) for x in contexts]) + self.tokens_to_generate)
if max_length > self.max_seq_length:
# truncate the sequences if it is longer than max_seq_length
input_ids = [x[: self.max_seq_length] for x in input_ids]
labels = [x[: self.max_seq_length] for x in labels]
loss_mask = [x[: self.max_seq_length] for x in loss_mask]
contexts = [x[: self.max_seq_length] for x in contexts]
# increase max length to nearest multiple of 4 or 8
if self.pad_to_max_length:
max_length = self.max_seq_length
else:
max_length = min(self.max_seq_length, self._ceil_to_nearest(max_length, 8))
assert max_length <= self.max_seq_length
attention_mask = [self._create_attention_mask(max_length) for _ in batch]
attention_mask = torch.stack(attention_mask)
position_ids = [list(range(max_length)) for _ in batch]
position_ids = torch.LongTensor(position_ids)
input_ids = torch.LongTensor(
self._collate_item(input_ids, max_length=max_length, pad_id=self.tokenizer.eos_id)
)
labels = torch.LongTensor(self._collate_item(labels, max_length=max_length, pad_id=self.tokenizer.eos_id))
loss_mask = torch.LongTensor(self._collate_item(loss_mask, max_length=max_length, pad_id=0))
context_lengths = torch.LongTensor([len(x) for x in contexts])
contexts = torch.LongTensor(self._collate_item(contexts, max_length=max_length, pad_id=self.tokenizer.eos_id))
answers = torch.LongTensor(self._collate_item(answers, max_length=max_length, pad_id=self.tokenizer.eos_id))
processed_batch = {
'tokens': input_ids,
'labels': labels,
'attention_mask': attention_mask,
'loss_mask': loss_mask,
'position_ids': position_ids,
'contexts': contexts,
'context_lengths': context_lengths,
'answers': answers,
'metadata': metadata,
}
return processed_batch
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/gpt_sft_chat_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GPT style dataset."""
import os
import time
import numpy as np
import torch
from omegaconf.dictconfig import DictConfig
from nemo.collections.nlp.data.language_modeling.megatron.base_dataset_utils import (
get_datasets_weights_and_num_samples,
get_train_valid_test_split_,
)
from nemo.collections.nlp.data.language_modeling.megatron.blendable_dataset import BlendableDataset
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import deallocate_indexed_dataset_memory
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import make_dataset as make_indexed_dataset
from nemo.core import Dataset
from nemo.utils import logging
try:
from megatron.core import parallel_state
HAVE_MEGATRON_CORE = True
except (ImportError, ModuleNotFoundError):
HAVE_MEGATRON_CORE = False
def build_dataset(cfg, trainer, data_prefix, data_impl, num_samples, seq_length, seed, skip_warmup, tokenizer, name):
def _build_dataset(current_data_prefix, current_num_samples):
delay_data_mmap = cfg.data.get('delay_data_mmap', False)
indexed_dataset = get_indexed_dataset_(current_data_prefix, data_impl, skip_warmup, delay_data_mmap)
total_num_of_documents = indexed_dataset.sizes.shape[0]
# Print stats about the splits.
logging.info(' > dataset split:')
logging.info(' Total {} documents is : {} '.format(name, total_num_of_documents))
drop_last = True
if name == "valid":
drop_last = cfg.data.get("validation_drop_last", True)
dataset = GPTDataset(
cfg,
trainer,
tokenizer,
name,
current_data_prefix,
np.arange(start=0, stop=total_num_of_documents, step=1, dtype=np.int32),
indexed_dataset,
current_num_samples,
seq_length,
seed,
drop_last=drop_last,
)
return dataset
if len(data_prefix) == 1:
return _build_dataset(data_prefix[0], num_samples)
else:
output = get_datasets_weights_and_num_samples(data_prefix, num_samples)
prefixes, weights, datasets_num_samples = output
datasets = []
for i in range(len(prefixes)):
dataset = _build_dataset(prefixes[i], datasets_num_samples[i])
datasets.append(dataset)
return BlendableDataset(datasets, weights, num_samples)
def build_train_valid_test_datasets(
cfg,
trainer,
data_prefix,
data_impl,
splits_string,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
tokenizer,
):
if data_impl in ['mock']:
logging.info('Initializing mock GPT dataset for train, validate, and test')
if len(data_prefix) != 0:
# Mock data will be generated instead of loading files.
logging.warning(f"Requested data_impl={data_impl}, so ignoring data_prefix setting: {data_prefix}")
if tokenizer is None:
# Vocabulary size is inferred from tokenizer.
raise ValueError("Tokenizer is required for a mock GPT dataset")
train_ds = MockGPTDataset(cfg, tokenizer, "train", int(train_valid_test_num_samples[0]), seq_length, seed,)
valid_ds = MockGPTDataset(cfg, tokenizer, "valid", int(train_valid_test_num_samples[1]), seq_length, seed,)
test_ds = MockGPTDataset(cfg, tokenizer, "test", int(train_valid_test_num_samples[2]), seq_length, seed,)
return train_ds, valid_ds, test_ds
if isinstance(data_prefix, DictConfig):
assert (
data_prefix.get('train') is not None
and data_prefix.get('test') is not None
and data_prefix.get('validation') is not None
), f"Data prefix dictionary should have train, test and validation keys. data_prefix currently has only {data_prefix.keys()}"
if cfg.data.splits_string is not None:
logging.warning(cfg.data.splits_string + " ignored since data prefix is of type dictionary.")
train_ds = build_dataset(
cfg,
trainer,
data_prefix["train"],
data_impl,
int(train_valid_test_num_samples[0]),
seq_length,
seed,
skip_warmup,
tokenizer,
"train",
)
validation_ds = build_dataset(
cfg,
trainer,
data_prefix["validation"],
data_impl,
int(train_valid_test_num_samples[1]),
seq_length,
seed,
skip_warmup,
tokenizer,
"valid",
)
test_ds = build_dataset(
cfg,
trainer,
data_prefix["test"],
data_impl,
int(train_valid_test_num_samples[2]),
seq_length,
seed,
skip_warmup,
tokenizer,
"test",
)
return train_ds, validation_ds, test_ds
else:
# Single dataset.
if len(data_prefix) == 1:
return _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix[0],
data_impl,
splits_string,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
tokenizer,
)
# Blending dataset.
# Parse the values.
output = get_datasets_weights_and_num_samples(data_prefix, train_valid_test_num_samples)
prefixes, weights, datasets_train_valid_test_num_samples = output
# Build individual datasets.
train_datasets = []
valid_datasets = []
test_datasets = []
for i in range(len(prefixes)):
train_ds, valid_ds, test_ds = _build_train_valid_test_datasets(
cfg,
trainer,
prefixes[i],
data_impl,
splits_string,
datasets_train_valid_test_num_samples[i],
seq_length,
seed,
skip_warmup,
tokenizer,
)
if train_ds:
train_datasets.append(train_ds)
if valid_ds:
valid_datasets.append(valid_ds)
if test_ds:
test_datasets.append(test_ds)
train_n, valid_n, test_n = map(sum, zip(*datasets_train_valid_test_num_samples))
# Blend.
blending_train_dataset = None
if train_datasets:
blending_train_dataset = BlendableDataset(train_datasets, weights, train_n)
blending_valid_dataset = None
if valid_datasets:
blending_valid_dataset = BlendableDataset(valid_datasets, weights, valid_n)
blending_test_dataset = None
if test_datasets:
blending_test_dataset = BlendableDataset(test_datasets, weights, test_n)
return (blending_train_dataset, blending_valid_dataset, blending_test_dataset)
def _build_train_valid_test_datasets(
cfg,
trainer,
data_prefix,
data_impl,
splits_string,
train_valid_test_num_samples,
seq_length,
seed,
skip_warmup,
tokenizer,
):
"""Build train, valid, and test datasets."""
# Indexed dataset.
delay_data_mmap = cfg.data.get('delay_data_mmap', False)
indexed_dataset = get_indexed_dataset_(data_prefix, data_impl, skip_warmup, delay_data_mmap)
total_num_of_documents = indexed_dataset.sizes.shape[0]
splits = get_train_valid_test_split_(splits_string, total_num_of_documents)
# Print stats about the splits.
logging.info(' > dataset split:')
def print_split_stats(name, index):
logging.info(' {}:'.format(name))
logging.info(
' document indices in [{}, {}) total of {} '
'documents'.format(splits[index], splits[index + 1], splits[index + 1] - splits[index])
)
print_split_stats('train', 0)
print_split_stats('validation', 1)
print_split_stats('test', 2)
def build_dataset(index, name):
dataset = None
if splits[index + 1] > splits[index]:
documents = np.arange(start=splits[index], stop=splits[index + 1], step=1, dtype=np.int32)
drop_last = True
if name == "valid":
drop_last = cfg.data.get("validation_drop_last", True)
dataset = GPTDataset(
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
train_valid_test_num_samples[index],
seq_length,
seed,
drop_last=drop_last,
)
return dataset
train_dataset = build_dataset(0, 'train')
valid_dataset = build_dataset(1, 'valid')
test_dataset = build_dataset(2, 'test')
return (train_dataset, valid_dataset, test_dataset)
def get_indexed_dataset_(data_prefix, data_impl, skip_warmup, delay_data_mmap=False):
"""Build indexed dataset."""
logging.info(' > building dataset index ...')
start_time = time.time()
indexed_dataset = make_indexed_dataset(data_prefix, data_impl, skip_warmup, delay_data_mmap=delay_data_mmap)
logging.info(' > finished creating indexed dataset in {:4f} ' 'seconds'.format(time.time() - start_time))
logging.info(' number of documents: {}'.format(indexed_dataset.sizes.shape[0]))
return indexed_dataset
class GPTDataset(Dataset):
def __init__(
self,
cfg,
trainer,
tokenizer,
name,
data_prefix,
documents,
indexed_dataset,
num_samples,
seq_length,
seed,
drop_last=True,
):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"megatron-core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
self.name = name
self.indexed_dataset = indexed_dataset
self.drop_last = drop_last
self.seq_length = seq_length
# Checks
assert np.min(documents) >= 0
assert np.max(documents) < indexed_dataset.sizes.shape[0]
self.reset_position_ids = cfg.data.get('reset_position_ids', False)
self.reset_attention_mask = cfg.data.get('reset_attention_mask', False)
self.eod_mask_loss = cfg.data.get('eod_mask_loss', False)
self.create_inputs = any([self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss])
self.cached_inputs = False
self.eos_id = tokenizer.eos_id
self.no_seqlen_plus_one_input_tokens = cfg.data.get('no_seqlen_plus_one_input_tokens', False)
self.add_extra_token = 1
if self.no_seqlen_plus_one_input_tokens:
self.add_extra_token = 0
self.shuffle_documents = cfg.data.get('shuffle_documents', True)
self.exchange_indices_distributed = cfg.data.get('exchange_indices_distributed', False)
# save index mappings to a configurable dir
self.index_mapping_dir = cfg.data.get('index_mapping_dir', None)
# create index_mapping_dir on rank 0
if torch.distributed.is_available() and torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
if self.index_mapping_dir is not None and not os.path.isdir(self.index_mapping_dir):
os.makedirs(self.index_mapping_dir)
torch.distributed.barrier()
# Build index mappings.
self.doc_idx, self.sample_idx, self.shuffle_idx = _build_index_mappings(
self.name,
data_prefix,
documents,
self.indexed_dataset.sizes,
num_samples,
seq_length,
seed,
index_mapping_dir=self.index_mapping_dir,
drop_last=drop_last,
add_extra_token=self.add_extra_token,
shuffle_documents=self.shuffle_documents,
exchange_indices_distributed=self.exchange_indices_distributed,
)
deallocate_indexed_dataset_memory(self.indexed_dataset)
def create_data_mmap(self):
self.indexed_dataset.create_data_mmap()
def __len__(self):
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
return self.sample_idx.shape[0] - 1
def _get_text(self, idx: int) -> np.ndarray:
# Get the shuffled index.
idx = self.shuffle_idx[idx]
# Start and end documents and offsets.
doc_index_f = self.sample_idx[idx][0]
doc_index_l = self.sample_idx[idx + 1][0]
offset_f = self.sample_idx[idx][1]
offset_l = self.sample_idx[idx + 1][1]
# If we are within the same document, just extract the chunk.
if doc_index_f == doc_index_l:
sample = self.indexed_dataset.get(
self.doc_idx[doc_index_f], offset=offset_f, length=offset_l - offset_f + self.add_extra_token
)
else:
# Otherwise, get the rest of the initial document.
sample_list = [self.indexed_dataset.get(self.doc_idx[doc_index_f], offset=offset_f)]
# Loop over all in between documents and add the entire document.
for i in range(doc_index_f + 1, doc_index_l):
sample_list.append(self.indexed_dataset.get(self.doc_idx[i]))
# And finally add the relevant portion of last document.
sample_list.append(
self.indexed_dataset.get(self.doc_idx[doc_index_l], length=offset_l + self.add_extra_token)
)
sample = np.concatenate(sample_list)
if len(sample) != (self.seq_length + self.add_extra_token):
logging.info(
F' > WARNING: Got sample of length: {len(sample)} for sequence length={self.seq_length+self.add_extra_token}, padding the sample to match sequence length'
)
sample = np.array(sample, dtype=np.int64)
sample = np.pad(
sample, (0, self.seq_length + self.add_extra_token - len(sample)), mode='constant', constant_values=-1
)
return sample.astype(np.int64)
def __getitem__(self, idx):
text = torch.from_numpy(self._get_text(idx))
if self.add_extra_token:
tokens = text[:-1].contiguous()
labels = text[1:].contiguous()
else:
tokens = text
labels = torch.roll(text, shifts=-1, dims=0)
labels[-1] = -1
if self.create_inputs or not self.cached_inputs:
attention_mask, loss_mask, position_ids = _create_ltor_masks_and_position_ids(
tokens, self.eos_id, self.reset_position_ids, self.reset_attention_mask, self.eod_mask_loss,
)
if not self.create_inputs:
self.cached_attention_mask = attention_mask
self.cached_loss_mask = loss_mask
self.cached_position_ids = position_ids
self.cached_inputs = True
else:
attention_mask = self.cached_attention_mask
loss_mask = self.cached_loss_mask
position_ids = self.cached_position_ids
loss_mask[labels == -1] = 0.0
tokens[tokens == -1] = 0
labels[labels == -1] = 0
# Negative index comes when we pad the last batch in MegatronPretrainingBatchSampler
# We make the loss_mask zero to mask out loss from these samples
if idx < 0:
logging.debug('Got negative index. Masking loss from this sample')
loss_mask = torch.zeros_like(loss_mask)
return {
'tokens': tokens,
'labels': labels,
'attention_mask': attention_mask,
'loss_mask': loss_mask,
'position_ids': position_ids,
}
class MockGPTDataset(Dataset):
def __init__(
self, cfg, tokenizer, name, num_samples, seq_length, seed,
):
if not HAVE_MEGATRON_CORE:
raise ImportError(
"Megatron core was not found. Please see the NeMo README for installation instructions: https://github.com/NVIDIA/NeMo#megatron-gpt."
)
super().__init__()
self.name = name
self.seq_length = seq_length
self.vocab_size = tokenizer.vocab_size
self.length = num_samples
self.seed = seed
self.attention_mask = torch.tril(torch.ones((self.seq_length, self.seq_length))).unsqueeze(0)
self.attention_mask = self.attention_mask < 0.5
self.loss_mask = torch.ones(self.seq_length, dtype=torch.float)
self.position_ids = torch.arange(self.seq_length, dtype=torch.int64)
def __len__(self):
return self.length
def _get_text(self, idx: int) -> np.ndarray:
np_gen = np.random.default_rng(seed=(self.seed + idx))
return np_gen.integers(self.vocab_size, size=[self.seq_length], dtype=np.int64)
def __getitem__(self, idx):
# Generate data of the expected size and datatype (based on GPTDataset).
np_gen = np.random.default_rng(seed=(self.seed + idx))
tokens = torch.from_numpy(np_gen.integers(self.vocab_size, size=[self.seq_length], dtype=np.int64))
labels = torch.from_numpy(np_gen.integers(self.vocab_size, size=[self.seq_length], dtype=np.int64))
return {
'tokens': tokens,
'labels': labels,
'attention_mask': self.attention_mask,
'loss_mask': self.loss_mask,
'position_ids': self.position_ids,
}
@torch.no_grad()
def _create_ltor_masks_and_position_ids(
tokens: torch.Tensor, eod_token: int, reset_position_ids: bool, reset_attention_mask: bool, eod_mask_loss: bool,
):
"""Create `attention_mask`, `loss_mask`, and `position_ids`.
This function is modified :func:`get_ltor_masks_and_position_ids` in nemo/collections/nlp/modules/common/megatron/utils.py:
`get_ltor_masks_and_position_ids` assumes a microbatch of ``tokens``, i.e. 2D tensor while
this function assumes ``tokens`` to be 1D tensor.
Args:
tokens: A 1D tensor that holds the indices of tokens.
eod_token:
reset_position_ids:
reset_attention_mask:
eod_mask_loss
"""
assert tokens.ndim == 1
seq_length = tokens.numel()
# `attention_mask` has the shape of [1, seq_length, seq_length]
attention_mask = torch.tril(torch.ones((seq_length, seq_length))).unsqueeze(0)
loss_mask = torch.ones(seq_length, dtype=torch.float)
if eod_mask_loss:
loss_mask[tokens == eod_token] = 0.0
position_ids = torch.arange(seq_length, dtype=torch.int64)
if reset_position_ids:
position_ids = position_ids.clone()
if reset_position_ids or reset_attention_mask:
# Find indices where EOD token is.
eod_index = position_ids[tokens[b] == eod_token]
# Detach indices from positions if going to modify positions.
if reset_position_ids:
eod_index = eod_index.clone()
prev_index = 0
for j in range(eod_index.numel()):
i = eod_index[j]
if reset_attention_mask:
attention_mask[0, (i + 1) :, : (i + 1)] = 0
if reset_position_ids:
position_ids[(i + 1) :] -= i + 1 - prev_index
prev_index = i + 1
# Convert attention mask to binary.
attention_mask = attention_mask < 0.5
return attention_mask, loss_mask, position_ids
def _build_index_mappings(
name,
data_prefix,
documents,
sizes,
num_samples,
seq_length,
seed,
index_mapping_dir: str = None,
drop_last: bool = True,
add_extra_token: int = 1,
shuffle_documents: bool = True,
exchange_indices_distributed: bool = False,
):
"""Build doc-idx, sample-idx, and shuffle-idx.
doc-idx: is an array (ordered) of documents to be used in training.
sample-idx: is the start document index and document offset for each
training sample.
shuffle-idx: maps the sample index into a random index into sample-idx.
"""
# Number of tokens in each epoch and number of required epochs.
tokens_per_epoch = _num_tokens(documents, sizes)
num_epochs = _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token)
# rng state
np_rng = np.random.RandomState(seed=seed)
# Filename of the index mappings.
if index_mapping_dir is not None:
_filename = os.path.join(index_mapping_dir, os.path.basename(data_prefix))
else:
_filename = data_prefix
_filename += '_{}_indexmap'.format(name)
_filename += '_{}ns'.format(num_samples)
_filename += '_{}sl'.format(seq_length)
_filename += '_{}s'.format(seed)
doc_idx_filename = _filename + '_doc_idx.npy'
sample_idx_filename = _filename + '_sample_idx.npy'
shuffle_idx_filename = _filename + '_shuffle_idx.npy'
# Build the indexed mapping if not exist.
if torch.distributed.get_rank() == 0:
using_cached_indices = True
if (
(not os.path.isfile(doc_idx_filename))
or (not os.path.isfile(sample_idx_filename))
or (not os.path.isfile(shuffle_idx_filename))
):
using_cached_indices = False
logging.info(' > WARNING: could not find index map files, building ' 'the indices on rank 0 ...')
# For the last epoch, decide whether include the entire epoch
# in the global shuffle or not.
# If we need only one epoch, then separating last epoch does
# not mean anything.
if num_epochs == 1:
separate_last_epoch = False
print(' > only one epoch required, setting ' 'separate_last_epoch to False', flush=True)
else:
# Get the number of samples for the last epoch
num_samples_from_epochs_minus_one = (
(num_epochs - 1) * tokens_per_epoch - add_extra_token
) // seq_length
last_epoch_num_samples = num_samples - num_samples_from_epochs_minus_one
assert last_epoch_num_samples >= 0, 'last epoch number of samples should be non-negative.'
num_samples_per_epoch = (tokens_per_epoch - add_extra_token) // seq_length
assert last_epoch_num_samples <= (
num_samples_per_epoch + 1
), 'last epoch number of samples exceeded max value.'
# If we have less than 80% of the samples for the last epoch,
# seperate out the epoch and treat it differently.
# Note: the 80% number is just based on common sense and can
# be adjusted if needed.
separate_last_epoch = last_epoch_num_samples < int(0.80 * num_samples_per_epoch)
if separate_last_epoch:
string = (
' > last epoch number of samples ({}) is smaller '
'than 80% of number of samples per epoch ({}), '
'setting separate_last_epoch to True'
)
else:
string = (
' > last epoch number of samples ({}) is larger '
'than 80% of number of samples per epoch ({}), '
'setting separate_last_epoch to False'
)
print(string.format(last_epoch_num_samples, num_samples_per_epoch), flush=True)
# doc-idx.
start_time = time.time()
doc_idx = _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch, shuffle_documents)
np.save(doc_idx_filename, doc_idx, allow_pickle=True)
logging.info(
' > elasped time to build and save doc-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time)
)
# sample-idx.
start_time = time.time()
# Use C++ implementation for speed.
# First compile and then import.
assert doc_idx.dtype == np.int32
assert sizes.dtype == np.int32
try:
from nemo.collections.nlp.data.language_modeling.megatron.dataset_utils import compile_helper
compile_helper()
from nemo.collections.nlp.data.language_modeling.megatron import helpers
except ImportError:
raise ImportError(
f'Could not compile megatron dataset C++ helper functions and therefore cannot import helpers python file.'
)
sample_idx = helpers.build_sample_idx(
sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch, drop_last, add_extra_token
)
# sample_idx = _build_sample_idx(sizes, doc_idx, seq_length,
# num_epochs, tokens_per_epoch, drop_last, add_extra_token)
np.save(sample_idx_filename, sample_idx, allow_pickle=True)
logging.info(
' > elasped time to build and save sample-idx mapping '
'(seconds): {:4f}'.format(time.time() - start_time)
)
# shuffle-idx.
start_time = time.time()
# -1 is due to data structure used to retieve the index:
# sample i --> [sample_idx[i], sample_idx[i+1])
if separate_last_epoch:
num_samples_ = num_samples_from_epochs_minus_one
else:
num_samples_ = sample_idx.shape[0] - 1
shuffle_idx = _build_shuffle_idx(num_samples_, sample_idx.shape[0] - 1, np_rng)
np.save(shuffle_idx_filename, shuffle_idx, allow_pickle=True)
logging.info(
' > elasped time to build and save shuffle-idx mapping'
' (seconds): {:4f}'.format(time.time() - start_time)
)
torch.distributed.barrier()
counts = torch.cuda.LongTensor([1])
torch.distributed.all_reduce(counts, group=parallel_state.get_data_parallel_group())
torch.distributed.all_reduce(counts, group=parallel_state.get_pipeline_model_parallel_group())
assert counts[0].item() == (
torch.distributed.get_world_size()
// torch.distributed.get_world_size(group=parallel_state.get_tensor_model_parallel_group())
)
if not exchange_indices_distributed or (torch.distributed.get_rank() == 0 and using_cached_indices):
# Load mappings.
start_time = time.time()
logging.info(' > loading doc-idx mapping from {}'.format(doc_idx_filename))
doc_idx = np.load(doc_idx_filename, allow_pickle=True, mmap_mode='r')
logging.info(' > loading sample-idx mapping from {}'.format(sample_idx_filename))
sample_idx = np.load(sample_idx_filename, allow_pickle=True, mmap_mode='r')
logging.info(' > loading shuffle-idx mapping from {}'.format(shuffle_idx_filename))
shuffle_idx = np.load(shuffle_idx_filename, allow_pickle=True, mmap_mode='r')
logging.info(' loaded indexed file in {:3.3f} seconds'.format(time.time() - start_time))
logging.info(' total number of samples: {}'.format(sample_idx.shape[0]))
logging.info(' total number of epochs: {}'.format(num_epochs))
if exchange_indices_distributed:
if torch.distributed.get_rank() == 0:
indices = [(doc_idx, sample_idx, shuffle_idx)]
else:
indices = [None]
torch.distributed.broadcast_object_list(indices)
doc_idx, sample_idx, shuffle_idx = indices[0]
return doc_idx, sample_idx, shuffle_idx
def _num_tokens(documents, sizes):
"""Total number of tokens in the dataset."""
return np.sum(sizes[documents])
def _num_epochs(tokens_per_epoch, seq_length, num_samples, add_extra_token=1):
"""Based on number of samples and sequence lenght, calculate how many
epochs will be needed."""
num_epochs = 0
total_tokens = 0
while True:
num_epochs += 1
total_tokens += tokens_per_epoch
# -1 is because we need to retrieve seq_length + 1 token each time
# but the last token will overlap with the first token of the next
# sample except for the last sample.
if ((total_tokens - add_extra_token) // seq_length) >= num_samples:
return num_epochs
def _build_doc_idx(documents, num_epochs, np_rng, separate_last_epoch, shuffle=True):
"""Build an array with length = number-of-epochs * number-of-dcuments.
Each index is mapped to a corresponding document."""
if not separate_last_epoch or num_epochs == 1:
doc_idx = np.mgrid[0:num_epochs, 0 : len(documents)][1]
doc_idx[:] = documents
doc_idx = doc_idx.reshape(-1)
doc_idx = doc_idx.astype(np.int32)
if shuffle:
np_rng.shuffle(doc_idx)
else:
logging.info('Document shuffling disabled')
return doc_idx
doc_idx_first = _build_doc_idx(documents, num_epochs - 1, np_rng, False, shuffle)
doc_idx_last = _build_doc_idx(documents, 1, np_rng, False, shuffle)
return np.concatenate((doc_idx_first, doc_idx_last))
def _build_sample_idx(sizes, doc_idx, seq_length, num_epochs, tokens_per_epoch, drop_last=True, add_extra_token=1):
"""Sample index mapping is a 2D array with sizes
[number-of-samples + 1, 2] where [..., 0] contains
the index into `doc_idx` and [..., 1] is the
starting offset in that document."""
# Total number of samples. For -1 see comments in `_num_epochs`.
if not drop_last:
num_samples = -(-(num_epochs * tokens_per_epoch - add_extra_token) // seq_length)
else:
num_samples = (num_epochs * tokens_per_epoch - add_extra_token) // seq_length
sample_idx = np.zeros([num_samples + 1, 2], dtype=np.int32)
# Index into sample_idx.
sample_index = 0
# Index into doc_idx.
doc_idx_index = 0
# Begining offset for each document.
doc_offset = 0
# Start with first document and no offset.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
while sample_index <= num_samples:
# Start with a fresh sequence.
remaining_seq_length = seq_length + add_extra_token
while remaining_seq_length != 0:
# Get the document length.
doc_id = doc_idx[doc_idx_index]
doc_length = sizes[doc_id] - doc_offset
# And add it to the current sequence.
remaining_seq_length -= doc_length
# If we have more than a full sequence, adjust offset and set
# remaining length to zero so we return from the while loop.
# Note that -1 here is for the same reason we have -1 in
# `_num_epochs` calculations.
if remaining_seq_length <= 0:
doc_offset += remaining_seq_length + doc_length - add_extra_token
remaining_seq_length = 0
else:
# Otherwise, start from the begining of the next document.
if doc_idx_index == (len(doc_idx) - 1):
assert (
sample_index == num_samples
), F"sample_index={sample_index} and num_samples={num_samples} should be the same"
doc_offset = sizes[doc_idx[doc_idx_index]] - add_extra_token
break
doc_idx_index += 1
doc_offset = 0
# Record the sequence.
sample_idx[sample_index][0] = doc_idx_index
sample_idx[sample_index][1] = doc_offset
sample_index += 1
return sample_idx
def _build_shuffle_idx(num_samples, total_size, np_rng):
"""Build the range [0, size) and shuffle."""
print(
' > building shuffle index with split [0, {}) and [{}, {}) '
'...'.format(num_samples, num_samples, total_size),
flush=True,
)
dtype_ = np.uint32
if total_size >= (np.iinfo(np.uint32).max - 1):
dtype_ = np.int64
shuffle_idx_first = np.arange(start=0, stop=num_samples, step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_first)
if num_samples == total_size:
return shuffle_idx_first
shuffle_idx_last = np.arange(start=num_samples, stop=total_size, step=1, dtype=dtype_)
np_rng.shuffle(shuffle_idx_last)
return np.concatenate((shuffle_idx_first, shuffle_idx_last))
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/gpt_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""XLM-Style datasets"""
from typing import Dict, List
import numpy as np
from nemo.collections.common.tokenizers.tokenizer_spec import TokenizerSpec
from nemo.collections.nlp.data.common.sequence_to_sequence_dataset import (
BinarizedMemmapSequenceToSequenceDataset,
TextMemmapSequenceToSequenceDataset,
)
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import (
build_training_sample as build_training_sample_bert,
)
from nemo.collections.nlp.data.language_modeling.megatron.length_distribution_type import LengthDistribution
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
from nemo.collections.nlp.data.language_modeling.megatron.ul2_dataset import UL2Dataset
class CrossLingualBERTDataset(BinarizedMemmapSequenceToSequenceDataset):
"""Cross-lingual BERT dataset similar to the translation-language modeling objective in the XLM paper (https://arxiv.org/abs/1901.07291)"""
def __init__(
self,
src_dataset_prefix: str,
tgt_dataset_prefix: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
seed: int = 1234,
max_num_samples: int = None,
masked_lm_prob: float = 0.15,
):
super().__init__(
src_dataset_prefix=src_dataset_prefix,
tgt_dataset_prefix=tgt_dataset_prefix,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
seed=seed,
max_num_samples=max_num_samples,
)
assert src_tokenizer == tgt_tokenizer
# Vocab stuff.
self.vocab = src_tokenizer.vocab
self.vocab_id_list = list(range(self.src_tokenizer.original_vocab_size))
self.vocab_id_to_token_dict = {idx: token for idx, token in enumerate(self.vocab)}
self.cls_id = src_tokenizer.cls_id
self.sep_id = src_tokenizer.sep_id
self.mask_id = src_tokenizer.mask_id
self.pad_id = src_tokenizer.pad_id
self.bos_id = src_tokenizer.bos_id
self.eos_id = src_tokenizer.eos_id
self.max_seq_length = max_src_seq_length + max_tgt_seq_length
self.masked_lm_prob = masked_lm_prob
def __getitem__(self, idx):
src, tgt = super()._get_sample(idx)
if len(src) > self.max_src_seq_length:
src = src[: self.max_src_seq_length]
if len(tgt) > self.max_tgt_seq_length - 1: # -1 here to account for the <sep> token that gets added.
tgt = tgt[: self.max_tgt_seq_length]
np_rng = np.random.RandomState(seed=((self.seed + idx) % 2 ** 32))
# Potentially swap src, tgt with a 50% chance to avoid learning associations based on position in the sequence.
swap_src_tgt = np_rng.randint(0, 2)
if swap_src_tgt == 0:
sample = [np.concatenate((src, [self.sep_id], tgt))]
elif swap_src_tgt == 1:
sample = [np.concatenate((tgt, [self.sep_id], src))]
return build_training_sample_bert(
sample=sample,
target_seq_length=sample[0].shape[0],
max_seq_length=self.max_seq_length, # needed for padding
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
cls_id=self.cls_id,
sep_id=self.sep_id,
mask_id=self.mask_id,
pad_id=self.pad_id,
masked_lm_prob=self.masked_lm_prob,
np_rng=np_rng,
binary_head=False,
whole_word_masking=False,
skip_masking_id=self.sep_id,
)
# Skip the parent collate function, since we don't need it for this dataset.
def collate_fn(self, batch):
return batch
class CrossLingualMakedSequenceToSequenceDataset(BinarizedMemmapSequenceToSequenceDataset):
def __init__(
self,
src_dataset_prefix: str,
tgt_dataset_prefix: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
max_src_seq_length: int,
max_tgt_seq_length: int,
max_seq_length_dec: int,
seed: int = 1234,
max_num_samples: int = None,
masked_lm_prob: float = 0.15,
extreme_masked_lm_prob: float = 0.5,
max_ngram_size: int = 10,
mean_ngram_size: int = None,
min_ngram_size: int = 1,
extreme_max_ngram_size: int = 128,
extreme_mean_ngram_size: int = 64,
extreme_min_ngram_size: int = 32,
extreme_ngram_span_length_distribution: LengthDistribution = LengthDistribution.truncated_normal,
geometric_dist: bool = True,
permutation: bool = False,
favor_long_ngrams: bool = False,
masking_type: str = "t5",
):
super().__init__(
src_dataset_prefix=src_dataset_prefix,
tgt_dataset_prefix=tgt_dataset_prefix,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length,
max_tgt_seq_length=max_tgt_seq_length,
seed=seed,
max_num_samples=max_num_samples,
)
self.max_seq_length_dec = max_seq_length_dec
self.max_ngram_size = max_ngram_size
self.mean_ngram_size = mean_ngram_size
self.min_ngram_size = min_ngram_size
self.geometric_dist = geometric_dist
self.permutation = permutation
self.favor_long_ngrams = favor_long_ngrams
self.extreme_masked_lm_prob = extreme_masked_lm_prob
self.extreme_max_ngram_size = extreme_max_ngram_size
self.extreme_mean_ngram_size = extreme_mean_ngram_size
self.extreme_min_ngram_size = extreme_min_ngram_size
self.extreme_ngram_span_length_distribution = extreme_ngram_span_length_distribution
self.masking_type = masking_type
assert src_tokenizer == tgt_tokenizer
# Vocab stuff.
self.vocab_id_list = src_tokenizer.vocab
self.vocab_id_to_token_dict = {idx: token for idx, token in enumerate(self.vocab_id_list)}
self.cls_id = src_tokenizer.cls_id
self.sep_id = src_tokenizer.sep_id
self.mask_id = src_tokenizer.mask_id
self.pad_id = src_tokenizer.pad_id
self.bos_id = src_tokenizer.bos_id
self.eos_id = src_tokenizer.eos_id
self.max_seq_length = max_src_seq_length + max_tgt_seq_length
self.masked_lm_prob = masked_lm_prob
self.tokenizer_type = T5Dataset._determine_tokenizer_type(src_tokenizer, whole_word_masking=False)
self._build()
def _build(self):
"""
Class-specific build method to be overridden by child classes.
"""
self.sentinel_tokens = self.src_tokenizer.additional_special_tokens_ids
assert len(self.sentinel_tokens) > 0
def __getitem__(self, idx):
src, tgt = super()._get_sample(idx)
if len(src) > self.max_src_seq_length:
src = src[: self.max_src_seq_length]
if len(tgt) > self.max_tgt_seq_length - 1: # -1 here to account for the <sep> token that gets added.
tgt = tgt[: self.max_tgt_seq_length]
np_rng = np.random.RandomState(seed=(self.seed + idx))
return CrossLingualMakedSequenceToSequenceDataset.get_example(
src=src,
tgt=tgt,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
masked_lm_prob=self.masked_lm_prob,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
cls_id=self.cls_id,
sep_id=self.sep_id,
mask_id=self.mask_id,
bos_id=self.bos_id,
eos_id=self.eos_id,
pad_id=self.pad_id,
sentinel_tokens=self.sentinel_tokens,
max_ngram_size=self.max_ngram_size,
np_rng=np_rng,
mean_ngram_size=self.mean_ngram_size,
min_ngram_size=self.min_ngram_size,
extreme_masked_lm_prob=self.extreme_masked_lm_prob,
extreme_max_ngram_size=self.extreme_max_ngram_size,
extreme_mean_ngram_size=self.extreme_mean_ngram_size,
extreme_min_ngram_size=self.extreme_min_ngram_size,
extreme_ngram_span_length_distribution=self.extreme_ngram_span_length_distribution,
favor_long_ngrams=self.favor_long_ngrams,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
tokenizer_type=self.tokenizer_type,
masking_type=self.masking_type,
)
# Skip the parent collate function, since we don't need it for this dataset.
def collate_fn(self, batch):
return batch
class BinarizedMemmapCrossLingualMLMAndTranslationDataset(BinarizedMemmapSequenceToSequenceDataset):
def __init__(
self,
src_dataset_prefix: str,
tgt_dataset_prefix: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
src_language: str,
tgt_language: str,
max_src_seq_length: int,
max_tgt_seq_length: int,
max_seq_length_dec: int,
seed: int = 1234,
max_num_samples: int = None,
masked_lm_prob: float = 0.15,
extreme_masked_lm_prob: float = 0.5,
max_ngram_size: int = 10,
mean_ngram_size: int = None,
min_ngram_size: int = 1,
extreme_max_ngram_size: int = 128,
extreme_mean_ngram_size: int = 64,
extreme_min_ngram_size: int = 32,
extreme_ngram_span_length_distribution: LengthDistribution = LengthDistribution.truncated_normal,
prefix_lm_pivot_mean: float = 0.25, # This is represented as a percentage of the total length.
geometric_dist: bool = True,
permutation: bool = False,
favor_long_ngrams: bool = False,
sampling_ratios: Dict[str, float] = {"x-masking": 0.25, "r-masking": 0.25, "s-masking": 0.25, "nmt": 0.25},
sentinel_tokens: List[int] = None,
):
super().__init__(
src_dataset_prefix=src_dataset_prefix,
tgt_dataset_prefix=tgt_dataset_prefix,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length
- 1, # -1 here to account for <sep> tokens and special prefix tokens to the encoder like <extra_id_x>, <extra_id_r>, etc.
max_tgt_seq_length=max_tgt_seq_length
- 1, # -1 here to account for <sep> tokens and special prefix tokens to the encoder like <extra_id_x>, <extra_id_r>, etc.
seed=seed,
max_num_samples=max_num_samples,
add_bos_to_enc=False,
add_eos_to_enc=False,
)
self.max_seq_length_dec = max_seq_length_dec
self.max_ngram_size = max_ngram_size
self.mean_ngram_size = mean_ngram_size
self.min_ngram_size = min_ngram_size
self.geometric_dist = geometric_dist
self.permutation = permutation
self.favor_long_ngrams = favor_long_ngrams
self.extreme_masked_lm_prob = extreme_masked_lm_prob
self.extreme_max_ngram_size = extreme_max_ngram_size
self.extreme_mean_ngram_size = extreme_mean_ngram_size
self.extreme_min_ngram_size = extreme_min_ngram_size
self.extreme_ngram_span_length_distribution = extreme_ngram_span_length_distribution
self.prefix_lm_pivot_mean = prefix_lm_pivot_mean
self.sampling_ratios = sampling_ratios
self.src_language = src_language
self.tgt_language = tgt_language
# Vocab stuff.
self.vocab_id_list = src_tokenizer.vocab
self.vocab_id_to_token_dict = {idx: token for idx, token in enumerate(self.vocab_id_list)}
self.cls_id = src_tokenizer.cls_id
self.sep_id = src_tokenizer.sep_id
self.mask_id = src_tokenizer.mask_id
self.pad_id = src_tokenizer.pad_id
self.bos_id = src_tokenizer.bos_id
self.eos_id = src_tokenizer.eos_id
self.max_seq_length = max_src_seq_length + max_tgt_seq_length - 1
self.masked_lm_prob = masked_lm_prob
self.sentinel_tokens = sentinel_tokens
self.tokenizer_type = T5Dataset._determine_tokenizer_type(src_tokenizer, whole_word_masking=False)
self._build()
def _build(self):
"""
Class-specific build method to be overridden by child classes.
"""
if self.sentinel_tokens is None:
self.sentinel_tokens = self.src_tokenizer.additional_special_tokens_ids
assert len(self.sentinel_tokens) > 0
@classmethod
def create_sample(
cls,
np_rng: np.random.RandomState,
src: List[int],
tgt: List[int],
sampling_ratios: Dict[str, float],
max_src_seq_length: int,
max_tgt_seq_length: int,
max_seq_length: int,
max_seq_length_dec: int,
src_language: str,
tgt_language: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
sep_id: int,
masked_lm_prob: float,
extreme_masked_lm_prob: float,
max_ngram_size: int,
min_ngram_size: int,
mean_ngram_size: int,
extreme_max_ngram_size: int,
extreme_min_ngram_size: int,
extreme_mean_ngram_size: int,
extreme_ngram_span_length_distribution: LengthDistribution,
sentinel_tokens: List[int],
prefix_lm_pivot_mean: float,
vocab_id_list: List[int],
vocab_id_to_token_dict: Dict[int, str],
favor_long_ngrams: bool = False,
permutation: bool = False,
geometric_dist: bool = True,
tokenizer_type: str = "wordpiece",
):
# Determine which task to perform - NMT/T5/UL2 based on sampling ratios.
task = np_rng.choice(list(sampling_ratios.keys()), p=list(sampling_ratios.values()))
# Potentially swap src, tgt with a 50% chance to avoid learning associations based on position in the sequence.
swap_src_tgt = np_rng.randint(0, 2)
if len(src) > max_src_seq_length:
src = src[:max_src_seq_length]
# Tasks that are not NMT have a <sep> token so we need to account for this in the length, hence we truncate tgt to max_tgt_seq_length - 1.
max_tgt_seq_length = max_tgt_seq_length - 1 if task != "nmt" else max_tgt_seq_length
if len(tgt) > max_tgt_seq_length:
tgt = tgt[:max_tgt_seq_length]
if task == "nmt":
# If src/tgt are swapped, also swap the prepend language token ID.
if swap_src_tgt == 1:
src, tgt = tgt, src
prepend_id = f"<{src_language}>"
else:
prepend_id = f"<{tgt_language}>"
text_dec = np.concatenate([[tgt_tokenizer.bos_id], tgt])
labels = np.concatenate([tgt, [tgt_tokenizer.eos_id]])
nmt_sample = {'text_enc': src, 'text_dec': text_dec, 'labels': labels}
return UL2Dataset._prepend_mask_type_token(src_tokenizer, nmt_sample, prepend_id)
if swap_src_tgt == 0:
sample = [np.concatenate((src, [sep_id], tgt))]
elif swap_src_tgt == 1:
sample = [np.concatenate((tgt, [sep_id], src))]
if task == "x-masking":
return UL2Dataset.get_x_masking_training_sample(
sample=sample,
tokenizer=src_tokenizer,
np_rng=np_rng,
target_seq_length=sample[0].shape[0],
max_seq_length=max_seq_length, # -1 to account for the <extra_id_x> token that gets added after the sample is created.
max_seq_length_dec=max_seq_length_dec,
masked_lm_prob=masked_lm_prob,
extreme_masked_lm_prob=extreme_masked_lm_prob,
max_ngram_size=max_ngram_size,
min_ngram_size=min_ngram_size,
mean_ngram_size=mean_ngram_size,
extreme_max_ngram_size=extreme_max_ngram_size,
extreme_min_ngram_size=extreme_min_ngram_size,
extreme_mean_ngram_size=extreme_mean_ngram_size,
extreme_ngram_span_length_distribution=extreme_ngram_span_length_distribution,
sentinel_tokens=sentinel_tokens,
skip_masking_id=sep_id,
)
elif task == "s-masking":
return UL2Dataset.get_s_masking_training_sample(
sample=sample,
np_rng=np_rng,
max_seq_length_encoder=max_seq_length, # -1 to account for the <extra_id_s> token that gets added after the sample is created.
max_seq_length_decoder=max_seq_length_dec,
tokenizer=src_tokenizer,
prefix_lm_pivot_mean=prefix_lm_pivot_mean,
pivot_distribution=extreme_ngram_span_length_distribution,
add_eos=True, # Most sentences are < max length in cross-lingual data, so we add an EOS to indicate to the model to stop.
)
elif task == "r-masking":
return UL2Dataset.get_r_masking_training_sample(
sample=sample,
tokenizer=src_tokenizer,
np_rng=np_rng,
target_seq_length=sample[0].shape[0],
max_seq_length=max_seq_length, # -1 to account for the <extra_id_r> token that gets added after the sample is created.
max_seq_length_dec=max_seq_length_dec,
masked_lm_prob=masked_lm_prob,
vocab_id_list=vocab_id_list,
vocab_id_to_token_dict=vocab_id_to_token_dict,
max_ngram_size=max_ngram_size,
mean_ngram_size=mean_ngram_size,
whole_word_masking=False,
favor_long_ngrams=favor_long_ngrams,
permutation=permutation,
geometric_dist=geometric_dist,
tokenizer_type=tokenizer_type,
sentinel_tokens=sentinel_tokens,
skip_masking_id=sep_id,
)
def __getitem__(self, idx):
np_rng = np.random.RandomState(seed=(self.seed + idx))
src, tgt = super()._get_sample(idx)
return BinarizedMemmapCrossLingualMLMAndTranslationDataset.create_sample(
np_rng=np_rng,
src=src,
tgt=tgt,
sampling_ratios=self.sampling_ratios,
max_src_seq_length=self.max_src_seq_length,
max_tgt_seq_length=self.max_tgt_seq_length,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
src_language=self.src_language,
tgt_language=self.tgt_language,
src_tokenizer=self.src_tokenizer,
tgt_tokenizer=self.tgt_tokenizer,
sep_id=self.sep_id,
masked_lm_prob=self.masked_lm_prob,
extreme_masked_lm_prob=self.extreme_masked_lm_prob,
max_ngram_size=self.max_ngram_size,
min_ngram_size=self.min_ngram_size,
mean_ngram_size=self.mean_ngram_size,
extreme_max_ngram_size=self.extreme_max_ngram_size,
extreme_min_ngram_size=self.extreme_min_ngram_size,
extreme_mean_ngram_size=self.extreme_mean_ngram_size,
extreme_ngram_span_length_distribution=self.extreme_ngram_span_length_distribution,
sentinel_tokens=self.sentinel_tokens,
prefix_lm_pivot_mean=self.prefix_lm_pivot_mean,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
favor_long_ngrams=self.favor_long_ngrams,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
tokenizer_type=self.tokenizer_type,
)
# NOTE: We want the parent's collate_fn to be used here since NMT examples are not padded even though the other task are.
class TextMemmapCrossLingualMLMAndTranslationDataset(TextMemmapSequenceToSequenceDataset):
def __init__(
self,
src_file_name: str,
tgt_file_name: str,
src_tokenizer: TokenizerSpec,
tgt_tokenizer: TokenizerSpec,
src_language: str,
tgt_language: str,
max_src_seq_length: int,
max_tgt_seq_length: int,
max_seq_length_dec: int,
seed: int = 1234,
max_num_samples: int = None,
masked_lm_prob: float = 0.15,
extreme_masked_lm_prob: float = 0.5,
max_ngram_size: int = 10,
mean_ngram_size: int = None,
min_ngram_size: int = 1,
extreme_max_ngram_size: int = 128,
extreme_mean_ngram_size: int = 64,
extreme_min_ngram_size: int = 32,
extreme_ngram_span_length_distribution: LengthDistribution = LengthDistribution.truncated_normal,
prefix_lm_pivot_mean: float = 0.25, # This is represented as a percentage of the total length.
geometric_dist: bool = True,
permutation: bool = False,
favor_long_ngrams: bool = False,
sampling_ratios: Dict[str, float] = {"x-masking": 0.25, "r-masking": 0.25, "s-masking": 0.25, "nmt": 0.25},
sentinel_tokens: List[int] = None,
):
super().__init__(
src_file_name=src_file_name,
tgt_file_name=tgt_file_name,
src_tokenizer=src_tokenizer,
tgt_tokenizer=tgt_tokenizer,
max_src_seq_length=max_src_seq_length
- 1, # -1 here to account for <sep> tokens and special prefix tokens to the encoder like <extra_id_x>, <extra_id_r>, etc.
max_tgt_seq_length=max_tgt_seq_length
- 1, # -1 here to account for <sep> tokens and special prefix tokens to the encoder like <extra_id_x>, <extra_id_r>, etc.
seed=seed,
max_num_samples=max_num_samples,
add_bos_to_enc=False,
add_eos_to_enc=False,
)
self.max_seq_length_dec = max_seq_length_dec
self.max_ngram_size = max_ngram_size
self.mean_ngram_size = mean_ngram_size
self.min_ngram_size = min_ngram_size
self.geometric_dist = geometric_dist
self.permutation = permutation
self.favor_long_ngrams = favor_long_ngrams
self.extreme_masked_lm_prob = extreme_masked_lm_prob
self.extreme_max_ngram_size = extreme_max_ngram_size
self.extreme_mean_ngram_size = extreme_mean_ngram_size
self.extreme_min_ngram_size = extreme_min_ngram_size
self.extreme_ngram_span_length_distribution = extreme_ngram_span_length_distribution
self.prefix_lm_pivot_mean = prefix_lm_pivot_mean
self.sampling_ratios = sampling_ratios
self.src_language = src_language
self.tgt_language = tgt_language
# Vocab stuff.
self.vocab_id_list = src_tokenizer.vocab
self.vocab_id_to_token_dict = {idx: token for idx, token in enumerate(self.vocab_id_list)}
self.cls_id = src_tokenizer.cls_id
self.sep_id = src_tokenizer.sep_id
self.mask_id = src_tokenizer.mask_id
self.pad_id = src_tokenizer.pad_id
self.bos_id = src_tokenizer.bos_id
self.eos_id = src_tokenizer.eos_id
self.max_seq_length = max_src_seq_length + max_tgt_seq_length - 1
self.masked_lm_prob = masked_lm_prob
self.sentinel_tokens = sentinel_tokens
self.tokenizer_type = T5Dataset._determine_tokenizer_type(src_tokenizer, whole_word_masking=False)
self._build()
def _build(self):
"""
Class-specific build method to be overridden by child classes.
"""
if self.sentinel_tokens is None:
self.sentinel_tokens = self.src_tokenizer.additional_special_tokens_ids
assert len(self.sentinel_tokens) > 0
def __getitem__(self, idx):
np_rng = np.random.RandomState(seed=(self.seed + idx))
src, tgt = super()._get_sample(idx)
return BinarizedMemmapCrossLingualMLMAndTranslationDataset.create_sample(
np_rng=np_rng,
src=src,
tgt=tgt,
sampling_ratios=self.sampling_ratios,
max_src_seq_length=self.max_src_seq_length,
max_tgt_seq_length=self.max_tgt_seq_length,
max_seq_length=self.max_seq_length,
max_seq_length_dec=self.max_seq_length_dec,
src_language=self.src_language,
tgt_language=self.tgt_language,
src_tokenizer=self.src_tokenizer,
tgt_tokenizer=self.tgt_tokenizer,
sep_id=self.sep_id,
masked_lm_prob=self.masked_lm_prob,
extreme_masked_lm_prob=self.extreme_masked_lm_prob,
max_ngram_size=self.max_ngram_size,
min_ngram_size=self.min_ngram_size,
mean_ngram_size=self.mean_ngram_size,
extreme_max_ngram_size=self.extreme_max_ngram_size,
extreme_min_ngram_size=self.extreme_min_ngram_size,
extreme_mean_ngram_size=self.extreme_mean_ngram_size,
extreme_ngram_span_length_distribution=self.extreme_ngram_span_length_distribution,
sentinel_tokens=self.sentinel_tokens,
prefix_lm_pivot_mean=self.prefix_lm_pivot_mean,
vocab_id_list=self.vocab_id_list,
vocab_id_to_token_dict=self.vocab_id_to_token_dict,
favor_long_ngrams=self.favor_long_ngrams,
permutation=self.permutation,
geometric_dist=self.geometric_dist,
tokenizer_type=self.tokenizer_type,
)
# NOTE: We want the parent's collate_fn to be used here since NMT examples are not padded even though the other task are.
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/xlm_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, List
import torch
from torch.utils.data.dataset import Dataset
class GPTRequestDataset(Dataset):
"""
Args:
requests: List of prompts
tokenizer: model tokenizer
tokens_to_generate: int value denoting amount of tokens model should generate
compute_logprobs: bool value denoting if model should generate tokens or compute logprobs
Returns:
data: class object
{'data': tokens, 'tokens_to_generate': tokens_to_generate, 'compute_logprobs': compute_logprobs}
* data: List of token's ids in respect to prompts
* tokens_to_generate: int value denoting amount of tokens model should generate
* compute_logprobs: bool value denoting if model should generate tokens or compute logprobs
"""
def __init__(self, requests: List, tokenizer, tokens_to_generate: int, compute_logprobs: bool) -> None:
super().__init__()
self.requests = requests
self.tokenizer = tokenizer
self.tokens_to_generate = tokens_to_generate
self.compute_logprobs = compute_logprobs
self.tokens = []
self.prompt_tags = []
# tokenize prompt
for request in self.requests:
if type(request) == dict:
prompt_tag = request['prompt_tag']
self.prompt_tags.append(prompt_tag)
text = request['text']
else:
text = request
self.tokens.append(torch.tensor(self.tokenizer.text_to_ids(text)))
if self.prompt_tags:
self.data = {
'prompt_tags': self.prompt_tags,
'data': self.tokens,
'tokens_to_generate': self.tokens_to_generate,
'compute_logprobs': self.compute_logprobs,
}
else:
self.data = {
'data': self.tokens,
'tokens_to_generate': self.tokens_to_generate,
'compute_logprobs': self.compute_logprobs,
}
def __len__(self):
return 1
def __getitem__(self, index):
return self.data
class T5RequestDataset(Dataset):
def __init__(self, request: Dict, tokenizer) -> None:
super().__init__()
self.request = request
self.tokenizer = tokenizer
self.add_eos_to_encoder_input = self.request['add_eos_to_encoder_input']
# tokenize prompt
self.request['tokenized_prompt'] = ' '.join(self.tokenizer.text_to_tokens(request['prompt']))
tokens = self.tokenizer.text_to_ids(request['prompt'])
self.request['tokens'] = torch.tensor(tokens)
self.mask_prompt(self.request['prompt'])
def mask_prompt(self, sample):
sample = sample.split()
sentinel_idx = 0
for i, word in enumerate(sample):
if word == '<mask>':
sample[i] = f'<extra_id_{sentinel_idx}>'
sentinel_idx += 1
sample = ' '.join(sample)
sample = self.tokenizer.text_to_ids(sample)
if self.add_eos_to_encoder_input:
sample = sample + [self.tokenizer.eos_id]
sample = torch.LongTensor(sample)
self.request['masked_sample'] = sample
def __len__(self):
return 1
def __getitem__(self, index):
return self.request
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/request_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataloaders."""
import abc
from itertools import chain
from typing import Optional
import torch
from nemo.utils import logging
class BaseMegatronSampler:
def __init__(
self,
total_samples: int,
consumed_samples: int,
micro_batch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool = True,
global_batch_size: Optional[int] = None,
rampup_batch_size: Optional[list] = None,
pad_samples_to_global_batch_size: Optional[bool] = False,
) -> None:
# Sanity checks.
if total_samples <= 0:
raise RuntimeError("no sample to consume: {}".format(total_samples))
if consumed_samples >= total_samples:
raise RuntimeError("no samples left to consume: {}, {}".format(consumed_samples, total_samples))
if micro_batch_size <= 0:
raise RuntimeError(f"micro_batch_size size must be greater than 0, but {micro_batch_size}")
if data_parallel_size <= 0:
raise RuntimeError(f"data parallel size must be greater than 0, but {data_parallel_size}")
if data_parallel_rank >= data_parallel_size:
raise RuntimeError(
"data_parallel_rank should be smaller than data size, but {} >= {}".format(
data_parallel_rank, data_parallel_size
)
)
if global_batch_size is not None and rampup_batch_size is None:
if global_batch_size % (micro_batch_size * data_parallel_size) != 0:
raise RuntimeError(
f"`global_batch_size` ({global_batch_size}) is not divisible by "
f"`micro_batch_size ({micro_batch_size}) x data_parallel_size "
f"({data_parallel_size})`"
)
if pad_samples_to_global_batch_size and global_batch_size is None:
raise RuntimeError(
f"`pad_samples_to_global_batch_size` can be `True` only when "
f"`global_batch_size` is set to an integer value"
)
# Keep a copy of input params for later use.
self.total_samples = total_samples
self.consumed_samples = consumed_samples
self.micro_batch_size = micro_batch_size
self.data_parallel_rank = data_parallel_rank
self.micro_batch_times_data_parallel_size = self.micro_batch_size * data_parallel_size
self.drop_last = drop_last
self.global_batch_size = global_batch_size
self.pad_samples_to_global_batch_size = pad_samples_to_global_batch_size
logging.info(
f'Instantiating MegatronPretrainingSampler with total_samples: {total_samples} and consumed_samples: {consumed_samples}'
)
def __len__(self):
num_available_samples: int = self.total_samples - self.consumed_samples
if self.global_batch_size is not None:
if self.drop_last:
return num_available_samples // self.global_batch_size
else:
return (num_available_samples + self.global_batch_size - 1) // self.global_batch_size
else:
return (num_available_samples - 1) // self.micro_batch_times_data_parallel_size + 1
@abc.abstractmethod
def __iter__(self):
...
class MegatronPretrainingSampler(BaseMegatronSampler):
def get_start_end_idx(self):
start_idx = self.data_parallel_rank * self.micro_batch_size
end_idx = start_idx + self.micro_batch_size
return start_idx, end_idx
def __iter__(self):
batch = []
# Last batch will be dropped if drop_last is not set False
indices = range(self.consumed_samples, self.total_samples)
if (not self.drop_last) and self.pad_samples_to_global_batch_size:
pad_samples_num = -len(indices) % self.global_batch_size
pad_indices = range(-1, -pad_samples_num - 1, -1)
indices = chain(indices, pad_indices)
for idx in indices:
batch.append(idx)
if len(batch) == self.micro_batch_times_data_parallel_size:
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
assert (
not self.pad_samples_to_global_batch_size
), 'with pad_samples_to_global_batch_size all batches should be complete'
start_idx, end_idx = self.get_start_end_idx()
yield batch[start_idx:end_idx]
class MegatronPretrainingRandomSampler(BaseMegatronSampler):
def __init__(
self,
total_samples: int,
consumed_samples: int,
micro_batch_size: int,
data_parallel_rank: int,
data_parallel_size: int,
drop_last: bool = True,
global_batch_size: Optional[int] = None,
pad_samples_to_global_batch_size: Optional[bool] = False,
) -> None:
super().__init__(
total_samples=total_samples,
consumed_samples=consumed_samples,
micro_batch_size=micro_batch_size,
data_parallel_rank=data_parallel_rank,
data_parallel_size=data_parallel_size,
drop_last=drop_last,
global_batch_size=global_batch_size,
pad_samples_to_global_batch_size=pad_samples_to_global_batch_size,
)
assert (
pad_samples_to_global_batch_size == False
), "`MegatronPretrainingRandomSampler` does not support sample padding"
self.last_batch_size = self.total_samples % self.micro_batch_times_data_parallel_size
def __iter__(self):
active_total_samples = self.total_samples - self.last_batch_size
self.epoch = self.consumed_samples // active_total_samples
current_epoch_samples = self.consumed_samples % active_total_samples
assert current_epoch_samples % self.micro_batch_times_data_parallel_size == 0
# data sharding and random sampling
bucket_size = (self.total_samples // self.micro_batch_times_data_parallel_size) * self.micro_batch_size
bucket_offset = current_epoch_samples // self.data_parallel_size
start_idx = self.data_parallel_rank * bucket_size
g = torch.Generator()
g.manual_seed(self.epoch)
random_idx = torch.randperm(bucket_size, generator=g).tolist()
idx_range = [start_idx + x for x in random_idx[bucket_offset:]]
batch = []
# Last batch if not complete will be dropped.
for idx in idx_range:
batch.append(idx)
if len(batch) == self.micro_batch_size:
self.consumed_samples += self.micro_batch_times_data_parallel_size
yield batch
batch = []
# Check the last partial batch and see drop_last is set
if len(batch) > 0 and not self.drop_last:
yield batch
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/data_samplers.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo.collections.nlp.data.language_modeling.megatron.bert_dataset import BertDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_dataset import GPTDataset
from nemo.collections.nlp.data.language_modeling.megatron.gpt_prompt_learning_dataset import GPTPromptLearningDataset
from nemo.collections.nlp.data.language_modeling.megatron.indexed_dataset import IndexedDataset, MMapIndexedDataset
from nemo.collections.nlp.data.language_modeling.megatron.t5_dataset import T5Dataset
| NeMo-main | nemo/collections/nlp/data/language_modeling/megatron/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.