python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Processing data for megatron pretraining.
Example to create dataset used for training attribute prediction model:
python preprocessing.py --input_file dataset/2023-04-12_oasst_all.trees.jsonl output_file_prefix=oasst_output mask_role=User type=TEXT_TO_VALUE split_ratio=0.95, seed=10
Example to create dataset used for attribute conditioned SFT model:
python preprocessing.py --input_file dataset/2023-04-12_oasst_all.trees.jsonl output_file_prefix=oasst_output mask_role=User type=VALUE_TO_TEXT split_ratio=0.95, seed=10
"""
import json
import random
import fire
# All the keys ['spam', 'lang_mismatch', 'pii', 'not_appropriate', 'hate_speech', 'sexual_content', 'quality', 'toxicity', 'humor', 'creativity', 'violence', 'fails_task', 'helpfulness', 'political_content', 'moral_judgement']
selected_keys = [
'quality',
'toxicity',
'humor',
'creativity',
'violence',
'helpfulness',
'not_appropriate',
'hate_speech',
'sexual_content',
'fails_task',
'political_content',
'moral_judgement',
]
label_values = {}
likert_scale = 5
system_prompt = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\n\n"
def encode_labels(labels):
items = []
for key in selected_keys:
if key in labels:
value = labels[key]['value']
items.append(f'{key}:{round(value*(likert_scale-1))}')
return ','.join(items)
def parse_conversations(tree_obj):
""" recusive function that returns all the sub converstaions in a list starting from node tree_obj
Args:
tree_obj (obj): current conversation node
Returns:
a list of sub conversation threads including the current conversation node
"""
if 'prompt' in tree_obj:
prompt_obj = tree_obj['prompt']
elif 'text' in tree_obj and 'role' in tree_obj:
prompt_obj = tree_obj
else:
return [[]]
if prompt_obj['role'] == 'prompter':
role = 'User'
elif prompt_obj['role'] == 'assistant':
role = 'Assistant'
else:
raise ValueError(f'unknown role {prompt_obj["role"]}')
turn = {'value': prompt_obj['text'], 'from': role}
if 'labels' in prompt_obj:
turn['human_labels'] = prompt_obj['labels']
for key in turn['human_labels']:
value_set = label_values.get(key, set())
value_set.add(turn['human_labels'][key]['value'])
label_values[key] = value_set
turn['label'] = encode_labels(prompt_obj['labels'])
if 'lang' in prompt_obj:
turn['lang'] = prompt_obj['lang'].split('-')[0]
if turn['label'] == '':
turn['label'] = f'lang:{turn["lang"]}'
else:
turn['label'] = turn['label'] + f',lang:{turn["lang"]}'
value_set = label_values.get('lang', set())
value_set.add(turn['lang'])
label_values['lang'] = value_set
all_conversations = []
multiple_sub_threads = []
for next_obj in prompt_obj['replies']:
multiple_threads = parse_conversations(next_obj)
multiple_sub_threads.extend(multiple_threads)
if len(multiple_sub_threads) != 0:
for sub_thread in multiple_sub_threads:
all_conversations.append([turn] + sub_thread)
else:
all_conversations.append([turn])
return all_conversations
def get_data_records(objs, mask_role, type):
output = []
for obj in objs:
multi_conversations = parse_conversations(obj)
for conversations in multi_conversations:
if len(conversations) <= 1:
# remove single turn conversations
continue
conversation_obj = {}
conversation_obj['conversations'] = []
conversation_obj['tree_id'] = obj['message_tree_id']
conversation_obj['conversations'] = conversations
conversation_obj['system'] = system_prompt
conversation_obj['mask'] = mask_role
conversation_obj['type'] = type
output.append(conversation_obj)
return output
def main(
input_file='2023-04-12_oasst_all.trees.jsonl',
output_file_prefix='oasst_output',
mask_role='User',
type='TEXT_TO_VALUE',
split_ratio=0.95,
seed=10,
):
all_objs = []
with open(input_file, 'r', encoding='utf-8') as f:
for line in f:
obj = json.loads(line)
all_objs.append(obj)
random.seed(seed)
random.shuffle(all_objs)
train_num = int(len(all_objs) * split_ratio)
train_objs = all_objs[:train_num]
val_objs = all_objs[train_num:]
train_records = get_data_records(train_objs, mask_role, type)
val_records = get_data_records(val_objs, mask_role, type)
with open(f'{output_file_prefix}_train.jsonl', 'w', encoding='utf-8') as f:
for record in train_records:
f.write(json.dumps(record, ensure_ascii=False) + '\n')
with open(f'{output_file_prefix}_val.jsonl', 'w', encoding='utf-8') as f:
for record in val_records:
f.write(json.dumps(record, ensure_ascii=False) + '\n')
for label in label_values:
values = sorted(list(label_values[label]))
print(f'{label} values: {values}')
if __name__ == "__main__":
fire.Fire(main)
| NeMo-main | scripts/nlp_language_modeling/sft/preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""script to annotate the the datasets with using trained attribute prediciton model.
First, we need to launch the NeMo Megatron inference server
Example:
```bash
python examples/nlp/language_modeling/megatron_gpt_eval.py \
gpt_model_file=/models/TRAINED_ATTR_PREDICTION_MODEL.nemo \
pipeline_model_parallel_split_rank=0 \
server=True \
tensor_model_parallel_size=TP_SIZE \
pipeline_model_parallel_size=PP_SIZE \
trainer.precision=bf16 \
trainer.devices=TP_SIZE*PP_SIZE \
trainer.num_nodes=1 \
web_server=False \
port=1424
```
Then, we can run this script to annotate the dataset.
Example usage:
python scripts/nlp_language_modeling/sft/attribute_annotate.py --batch_size=1 --host=localhost --input_file_name=input.jsonl --output_file_name=output.jsonl --port_num=1424
"""
import json
import os
import fire
import tqdm
from langchain.prompts.few_shot import PromptTemplate
from nemo.collections.nlp.modules.common.megatron.retrieval_services.util import text_generation
langs = [
'ar',
'bg',
'bn',
'ca',
'cs',
'da',
'de',
'el',
'en',
'eo',
'es',
'eu',
'fa',
'fi',
'fr',
'gl',
'he',
'hu',
'id',
'it',
'ja',
'ko',
'nb',
'nl',
'pl',
'pt',
'ro',
'ru',
'sk',
'sv',
'th',
'tr',
'uk',
'vi',
'zh',
]
SFT_PREFIX = """<extra_id_0>System
{system_message}"""
ONE_TRUN_WITH_VAL = """<extra_id_1>{user_name}
{user_message}
<extra_id_2>{label}
"""
ONE_TRUN_WITHOUT_VAL = """<extra_id_1>{user_name}
{user_message}
"""
SYSTEM = PromptTemplate(input_variables=["system_message"], template=SFT_PREFIX)
EXAMPLE_PROMPT_WITH_VAL = PromptTemplate(
input_variables=["user_name", "user_message", "label"], template=ONE_TRUN_WITH_VAL
)
EXAMPLE_PROMPT_WITHOUT_VAL = PromptTemplate(
input_variables=["user_name", "user_message"], template=ONE_TRUN_WITHOUT_VAL
)
selected_keys = [
'quality',
'toxicity',
'humor',
'creativity',
'violence',
'helpfulness',
'not_appropriate',
'hate_speech',
'sexual_content',
'fails_task',
'political_content',
'moral_judgement',
'lang',
]
def calculate_key(obj):
return ":".join([item['value'] for item in obj['conversations']])
def load_data(path):
with open(path, 'r', encoding='utf-8') as fin:
for line in fin:
yield json.loads(line)
def get_prompt(data_obj, turn, current_label="", label_id=0):
if len(data_obj['conversations']) < turn + 1:
return None
examples = []
for i in range(0, turn):
d = data_obj['conversations'][i]
if 'label' in d:
examples.append(
EXAMPLE_PROMPT_WITH_VAL.format(
**{'user_name': d['from'], 'user_message': d['value'], 'label': d['label']}
)
)
else:
examples.append(EXAMPLE_PROMPT_WITHOUT_VAL.format(**{'user_name': d['from'], 'user_message': d['value']}))
example_text = "".join(examples)
d = data_obj['conversations'][turn]
predict_message = EXAMPLE_PROMPT_WITHOUT_VAL.format(**{'user_name': d['from'], 'user_message': d['value']})
if label_id != 0:
current_label = current_label + ',' + selected_keys[label_id] + ':'
else:
current_label = '<extra_id_2>' + selected_keys[label_id] + ':'
return SYSTEM.format(**{'system_message': data_obj['system']}) + example_text + predict_message + current_label
def create_gen_function(host='localhost', port=5555):
def request(prompts, greedy, add_BOS, token_to_gen, min_tokens, temp, top_p, top_k, repetition, end_strings):
data = {
"sentences": prompts,
"tokens_to_generate": int(token_to_gen),
"temperature": temp,
"add_BOS": add_BOS,
"top_k": top_k,
"top_p": top_p,
"greedy": greedy,
"all_probs": False,
"repetition_penalty": repetition,
"min_tokens_to_generate": int(min_tokens),
"end_strings": end_strings,
}
response = text_generation(data, ip=host, port=port)
sentences = response['sentences']
return sentences
return request
class Worker(object):
def __init__(self, host='localhost', port=5555, progress_bar=None, output_file=None, process_lang=False):
self.req = create_gen_function(host=host, port=port)
self.fout = open(output_file, "a", encoding='utf-8')
self.progress_bar = progress_bar
self.process_lang = process_lang
def process_result(self, batch):
while True:
try:
items = [i['item'] for i in batch]
turns = [i['turn'] for i in batch]
prompts = [i['prompt'] for i in batch]
for label_id in range(1, len(selected_keys)):
results = self.req(
prompts,
greedy=True,
add_BOS=False,
token_to_gen=1,
min_tokens=1,
temp=0.1,
top_p=1.0,
top_k=1,
repetition=1.0,
end_strings=["<extra_id_1>", "<|endoftext|>"],
)
# get current value from result
current_values = []
nums = []
for result in results:
# promblem result[-1] is '\n'
current_val = result.split('quality')[-1]
current_val = 'quality' + current_val
# remove whatever after new line
current_val = current_val.split('\n')[0].strip()
# remove everything that is >= selected_keys[label_id]
splits = current_val.split(',')
filtered = []
for item in splits:
filtered.append(item)
if item.split(':')[0] == selected_keys[label_id - 1]:
nums.append(item.split(':')[1])
break
current_val = '<extra_id_2>' + ','.join(filtered)
current_values.append(current_val)
filtered_items = []
filtered_turns = []
filtered_prompts = []
filtered_current_values = []
for result, item, turn, num, current_value in zip(results, items, turns, nums, current_values):
try:
value = int(num)
except Exception as e:
print(f'error {e} when convert {num} to int')
continue
filtered_current_values.append(current_value)
filtered_items.append(item)
filtered_turns.append(turn)
if label_id < len(selected_keys):
prompt = get_prompt(item, turn, current_label=current_value, label_id=label_id)
filtered_prompts.append(prompt)
items = filtered_items
turns = filtered_turns
prompts = filtered_prompts
current_values = filtered_current_values
if self.process_lang:
results = self.req(
prompts,
greedy=True,
add_BOS=False,
token_to_gen=1,
min_tokens=1,
temp=0.1,
top_p=1.0,
top_k=1,
repetition=1.0,
end_strings=["<extra_id_1>", "<|endoftext|>"],
)
# get current value from result
current_values = []
for result in results:
# promblem result[-1] is '\n'
if result.endswith('\n'):
result = result[:-1] + '@'
current_values.append(result.split('\n')[-1])
nums = []
for result in results:
# promblem result[-1] is '\n'
current_val = result.split('quality')[-1]
current_val = 'quality' + current_val
# remove whatever after new line
current_val = current_val.split('\n')[0].strip()
# remove everything that is >= selected_keys[label_id]
splits = current_val.split(',')
filtered = []
for item in splits:
filtered.append(item)
if item.split(':')[0] == selected_keys[label_id]:
nums.append(item.split(':')[1])
break
current_val = '<extra_id_2>' + ','.join(filtered)
current_values.append(current_val)
filtered_items = []
filtered_turns = []
filtered_prompts = []
filtered_current_values = []
for result, item, turn, num, current_value in zip(results, items, turns, nums, current_values):
if num not in langs:
print(f'error {num} not in langs')
continue
filtered_current_values.append(current_value)
filtered_items.append(item)
filtered_turns.append(turn)
items = filtered_items
turns = filtered_turns
current_values = filtered_current_values
batch = []
for item, turn, current_value in zip(items, turns, current_values):
response_text = current_value[12:]
if 'label' in item['conversations'][turn]:
item['conversations'][turn]['gt_label'] = item['conversations'][turn]['label']
item['conversations'][turn]['label'] = response_text
prompt = get_prompt(item, turn + 1, current_label='', label_id=0)
if prompt is not None:
batch.append({'prompt': prompt, 'item': item, 'turn': turn + 1})
else:
self.progress_bar.update(1)
self.fout.write(json.dumps(item, ensure_ascii=False) + "\n")
self.fout.flush()
if self.progress_bar.n >= self.progress_bar.total:
break
if len(batch) == 0:
break
except Exception as e:
print(f'error {e} when processing {batch}')
# ignore the error and continue
self.progress_bar.update(1)
if self.progress_bar.n >= self.progress_bar.total:
break
def main(
batch_size=1,
host='localhost',
input_file_name='input.jsonl',
output_file_name='output.jsonl',
port_num=1424,
process_lang=True,
):
input_data = load_data(f'{input_file_name}')
output_path = f'{output_file_name}'
existing_requests = set()
if os.path.exists(output_path):
with open(output_path, 'r', encoding='utf-8') as fin:
for line in fin:
line = json.loads(line)
existing_requests.add(calculate_key(line))
print(f"Loaded {len(existing_requests)} existing requests")
filter_data = [d for d in input_data if calculate_key(d) not in existing_requests]
progress_bar = tqdm.tqdm(total=len(filter_data))
worker = Worker(
host=host, port=port_num, progress_bar=progress_bar, output_file=output_path, process_lang=process_lang
)
for batch_idx in range(0, len(filter_data), batch_size):
batch = [line for line in filter_data[batch_idx : batch_idx + batch_size]]
turns = [
0 if 'mask' not in d['conversations'][0]['from'] or d['conversations'][0]['from'] == d['mask'] else 1
for d in batch
]
task = [{'prompt': get_prompt(d, turn, "", 0), 'item': d, 'turn': turn} for d, turn in zip(batch, turns)]
worker.process_result(task)
worker.fout.close()
if __name__ == '__main__':
fire.Fire(main)
| NeMo-main | scripts/nlp_language_modeling/sft/attribute_annotate.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import re
from argparse import ArgumentParser
from multiprocessing import Pool
import tensorflow as tf
from sacremoses import MosesDetokenizer
from tasks_splits_and_features import _TASK_SPLITS_AND_FEATURES_DICT
"""
This script converts the P3 dataset used to train T0 from a tfrecords format to individual JSONL files.
Use instructions:
NOTE: This script requires tensorflow to be installed.
1. Download the P3 dataset by cloning it from Huggingface:
git clone https://huggingface.co/datasets/bigscience/P3. The raw data should be at P3/data.
2. Run this script:
python t0_dataset_preproc.py \
--p3_dataset_path P3/data \
--jsonl_output_path P3/data_processed_jsonl
3. The output will be in the jsonl_output_path directory. In the following structure:
- P3/data_processed_jsonl/train
- super_glue_cb_does_this_imply.jsonl
- super_glue_cb_justified_in_saying_score_eval.jsonl
- .....
- P3/data_processed_jsonl/val
- super_glue_cb_does_this_imply.jsonl
- super_glue_cb_justified_in_saying_score_eval.jsonl
- .....
4. Each JSONL file is compatible with NeMo's T0JSONLMemMapDataset (https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/nlp/data/language_modeling/t0_dataset.py)
"""
def _feature_config(shape, dtype):
if dtype in ("int32", "bool"):
# int32 and bool are stored as int64 in the tf.train.Example protobuf.
dtype = "int64"
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
def remove_newline_and_detokenize(x, detokenizer, remove_newlines):
if remove_newlines:
x = re.sub(r'\\n+', ' ', x)
x = re.sub(r'\n+', ' ', x)
x = re.sub(r'\\r+', ' ', x)
x = re.sub(r'\r+', ' ', x)
x = x.strip()
# NOTE: Moving the detokenizer inside this condition since sacremoses detokenize seems to remove \n as well.
if remove_newlines:
x = detokenizer.detokenize([x])
return x
def write_dataset_to_file(dataset, filename, detokenizer, remove_newlines):
with open(filename, 'w') as f:
for item in dataset:
# NOTE: Although we do `.tolist()` here this is not actually a list. This is just to convert from a numpy to python object so we can check if it is True/False.
if 'is_correct' in item and item['is_correct'].numpy().tolist() is False:
print('Skipping example because is_correct is False')
continue
item_object = {}
i = remove_newline_and_detokenize(
item['inputs_pretokenized'].numpy().decode('utf-8'), detokenizer, remove_newlines
)
item_object['input'] = i
t = remove_newline_and_detokenize(
item['targets_pretokenized'].numpy().decode('utf-8'), detokenizer, remove_newlines
)
item_object['output'] = t
if 'answer_choices' in item:
choices = [
remove_newline_and_detokenize(x.decode('utf-8'), detokenizer, remove_newlines)
for x in item['answer_choices'].numpy().tolist()
]
item_object['choices'] = choices
f.write(json.dumps(item_object) + '\n')
def write_train_val_test_dataset_to_file(file_name, folder_name, output_folder, detokenizer, split, remove_newlines):
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([file_name]))
fdict = _TASK_SPLITS_AND_FEATURES_DICT[folder_name]['features_dict']
feature_description = {feat: _feature_config(**desc) for feat, desc in fdict.items()}
ds = ds.map(
lambda pb: tf.io.parse_single_example(pb, feature_description),
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(
lambda x: {k: tf.cast(v, fdict[k]["dtype"]) for k, v in x.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE,
)
write_dataset_to_file(ds, os.path.join(output_folder, split, folder_name + '.jsonl'), detokenizer, remove_newlines)
def process_folder(data_folder, folder_name, output_folder, detokenizer, remove_newlines):
if not os.path.isdir(os.path.join(data_folder, folder_name)):
return
print(f'Processing {folder_name}')
train_fname = os.path.join(data_folder, folder_name, 'train.tfrecord-00000-of-00001')
valid_fname = os.path.join(data_folder, folder_name, 'validation.tfrecord-00000-of-00001')
test_fname = os.path.join(data_folder, folder_name, 'test.tfrecord-00000-of-00001')
if not os.path.exists(train_fname):
print(f'Could not find {train_fname}')
return
write_train_val_test_dataset_to_file(
train_fname, folder_name, output_folder, detokenizer, 'train', remove_newlines
)
if os.path.exists(valid_fname):
write_train_val_test_dataset_to_file(
valid_fname, folder_name, output_folder, detokenizer, 'val', remove_newlines
)
if os.path.exists(test_fname):
write_train_val_test_dataset_to_file(
test_fname, folder_name, output_folder, detokenizer, 'test', remove_newlines
)
def process_all_folders(data_folder, output_folder, remove_newlines):
detokenizer = MosesDetokenizer('en')
assert os.path.isdir(data_folder)
if not os.path.exists(output_folder):
os.system(f'mkdir -p {output_folder}')
if not os.path.exists(os.path.join(output_folder, 'train')):
os.system(f'mkdir -p {os.path.join(output_folder, "train")}')
if not os.path.exists(os.path.join(output_folder, 'val')):
os.system(f'mkdir -p {os.path.join(output_folder, "val")}')
if not os.path.exists(os.path.join(output_folder, 'test')):
os.system(f'mkdir -p {os.path.join(output_folder, "test")}')
print(f'Found {len(os.listdir(data_folder))} folders to process ...')
pool_args = []
for folder_name in os.listdir(data_folder):
pool_args.append((data_folder, folder_name, output_folder, detokenizer, remove_newlines))
pool = Pool()
pool.starmap(process_folder, pool_args)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--p3_dataset_path",
type=str,
required=True,
help="Path to raw P3 data. Should be a folder containing folders for each task. After cloning the repo this should correspond to P3/data",
)
parser.add_argument(
"--jsonl_output_path",
type=str,
required=True,
help="Path to output folder where JSONL files will be written.",
)
parser.add_argument(
"--remove_newlines", action="store_true", help="Whether to remove newlines from the input and output.",
)
args = parser.parse_args()
process_all_folders(args.p3_dataset_path, args.jsonl_output_path, args.remove_newlines)
| NeMo-main | scripts/nlp_language_modeling/t0/t0_dataset_preproc.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) HuggingFace Inc. team.
# Most of the code here has been copied from:
# https://huggingface.co/datasets/bigscience/P3/blob/main/tasks_splits_and_features.py
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
DATA_SPLITS_SIZES = {
"adversarial_qa_dbert_answer_the_following_q": {"validation": 1000, "train": 10000},
"adversarial_qa_dbert_based_on": {"validation": 1000, "train": 10000},
"adversarial_qa_dbert_generate_question": {"validation": 1000, "test": 1000, "train": 10000},
"adversarial_qa_dbert_question_context_answer": {"validation": 1000, "train": 10000},
"adversarial_qa_dbert_tell_what_it_is": {"validation": 1000, "train": 10000},
"adversarial_qa_dbidaf_answer_the_following_q": {"validation": 1000, "train": 10000},
"adversarial_qa_dbidaf_based_on": {"validation": 1000, "train": 10000},
"adversarial_qa_dbidaf_generate_question": {"validation": 1000, "test": 1000, "train": 10000},
"adversarial_qa_dbidaf_question_context_answer": {"validation": 1000, "train": 10000},
"adversarial_qa_dbidaf_tell_what_it_is": {"validation": 1000, "train": 10000},
"adversarial_qa_droberta_answer_the_following_q": {"validation": 1000, "train": 10000},
"adversarial_qa_droberta_based_on": {"validation": 1000, "train": 10000},
"adversarial_qa_droberta_generate_question": {"validation": 1000, "test": 1000, "train": 10000},
"adversarial_qa_droberta_question_context_answer": {"validation": 1000, "train": 10000},
"adversarial_qa_droberta_tell_what_it_is": {"validation": 1000, "train": 10000},
"ag_news_classify": {"test": 7600, "train": 120000},
"ag_news_classify_question_first": {"test": 7600, "train": 120000},
"ag_news_classify_with_choices": {"test": 7600, "train": 120000},
"ag_news_classify_with_choices_question_first": {"test": 7600, "train": 120000},
"ag_news_recommend": {"test": 7600, "train": 120000},
"ag_news_which_section": {"test": 7600, "train": 120000},
"ag_news_which_section_choices": {"test": 7600, "train": 120000},
"ai2_arc_ARC_Challenge_heres_a_problem": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Challenge_i_am_hesitating": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Challenge_multiple_choice": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Challenge_pick_false_options": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Challenge_pick_the_most_correct_option": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Challenge_qa_options": {"validation": 299, "test": 1172, "train": 1119},
"ai2_arc_ARC_Easy_heres_a_problem": {"validation": 570, "test": 2376, "train": 2251},
"ai2_arc_ARC_Easy_i_am_hesitating": {"validation": 570, "test": 2376, "train": 2251},
"ai2_arc_ARC_Easy_multiple_choice": {"validation": 570, "test": 2376, "train": 2251},
"ai2_arc_ARC_Easy_pick_false_options": {"validation": 570, "test": 2376, "train": 2251},
"ai2_arc_ARC_Easy_pick_the_most_correct_option": {"validation": 570, "test": 2376, "train": 2251},
"ai2_arc_ARC_Easy_qa_options": {"validation": 570, "test": 2376, "train": 2251},
"amazon_polarity_Is_this_product_review_positive": {"test": 400000, "train": 3600000},
"amazon_polarity_Is_this_review": {"test": 400000, "train": 3600000},
"amazon_polarity_Is_this_review_negative": {"test": 400000, "train": 3600000},
"amazon_polarity_User_recommend_this_product": {"test": 400000, "train": 3600000},
"amazon_polarity_convey_negative_or_positive_sentiment": {"test": 400000, "train": 3600000},
"amazon_polarity_flattering_or_not": {"test": 400000, "train": 3600000},
"amazon_polarity_negative_or_positive_tone": {"test": 400000, "train": 3600000},
"amazon_polarity_user_satisfied": {"test": 400000, "train": 3600000},
"amazon_polarity_would_you_buy": {"test": 400000, "train": 3600000},
"anli_GPT_3_style_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_GPT_3_style_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_GPT_3_style_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_GPT_3_style_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_GPT_3_style_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_GPT_3_style_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_MNLI_crowdsource_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_MNLI_crowdsource_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_MNLI_crowdsource_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_MNLI_crowdsource_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_MNLI_crowdsource_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_MNLI_crowdsource_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_always_sometimes_never_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_always_sometimes_never_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_always_sometimes_never_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_always_sometimes_never_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_always_sometimes_never_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_always_sometimes_never_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_based_on_the_previous_passage_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_based_on_the_previous_passage_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_based_on_the_previous_passage_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_based_on_the_previous_passage_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_based_on_the_previous_passage_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_based_on_the_previous_passage_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_can_we_infer_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_can_we_infer_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_can_we_infer_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_can_we_infer_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_can_we_infer_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_can_we_infer_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_claim_true_false_inconclusive_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_claim_true_false_inconclusive_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_claim_true_false_inconclusive_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_claim_true_false_inconclusive_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_claim_true_false_inconclusive_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_claim_true_false_inconclusive_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_consider_always_sometimes_never_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_consider_always_sometimes_never_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_consider_always_sometimes_never_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_consider_always_sometimes_never_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_consider_always_sometimes_never_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_consider_always_sometimes_never_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_does_it_follow_that_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_does_it_follow_that_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_does_it_follow_that_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_does_it_follow_that_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_does_it_follow_that_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_does_it_follow_that_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_does_this_imply_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_does_this_imply_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_does_this_imply_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_does_this_imply_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_does_this_imply_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_does_this_imply_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_guaranteed_possible_impossible_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_guaranteed_possible_impossible_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_guaranteed_possible_impossible_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_guaranteed_possible_impossible_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_guaranteed_possible_impossible_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_guaranteed_possible_impossible_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_guaranteed_true_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_guaranteed_true_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_guaranteed_true_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_guaranteed_true_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_guaranteed_true_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_guaranteed_true_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_justified_in_saying_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_justified_in_saying_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_justified_in_saying_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_justified_in_saying_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_justified_in_saying_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_justified_in_saying_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_must_be_true_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_must_be_true_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_must_be_true_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_must_be_true_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_must_be_true_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_must_be_true_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_should_assume_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_should_assume_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_should_assume_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_should_assume_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_should_assume_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_should_assume_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"anli_take_the_following_as_truth_r1": {"validation": 1000, "test": 1000, "train": 16946},
"anli_take_the_following_as_truth_r1_score_eval": {"validation": 3000, "test": 3000, "train": 50838},
"anli_take_the_following_as_truth_r2": {"validation": 1000, "test": 1000, "train": 45460},
"anli_take_the_following_as_truth_r2_score_eval": {"validation": 3000, "test": 3000, "train": 136380},
"anli_take_the_following_as_truth_r3": {"validation": 1200, "test": 1200, "train": 100459},
"anli_take_the_following_as_truth_r3_score_eval": {"validation": 3600, "test": 3600, "train": 301377},
"app_reviews_categorize_rating_using_review": {"train": 288065},
"app_reviews_convert_to_rating": {"train": 288065},
"app_reviews_convert_to_star_rating": {"train": 288065},
"app_reviews_generate_review": {"train": 288065},
"cnn_dailymail_3.0.0_2_or_3_sentences": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_generate_story": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_news_card_view": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_news_stock": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_news_summary": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_spice_up_story": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_sum_in_brief": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_tldr_summary": {"validation": 13368, "test": 11490, "train": 287113},
"cnn_dailymail_3.0.0_write_an_outline": {"validation": 13368, "test": 11490, "train": 287113},
"common_gen_Example_prompt": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_Given_concepts_type_1": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_Given_concepts_type_2": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_Put_together": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_choice_in_concept_centric_sentence_generation": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_random_task_template_prompt": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_sentence_to_concepts": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_topic_to_sentence": {"validation": 4018, "test": 1497, "train": 67389},
"common_gen_topics_from_the_sentence": {"validation": 4018, "test": 1497, "train": 67389},
"cos_e_v1.11_aligned_with_common_sense": {"validation": 1221, "train": 9741},
"cos_e_v1.11_description_question_option_id": {"validation": 1221, "train": 9741},
"cos_e_v1.11_description_question_option_text": {"validation": 1221, "train": 9741},
"cos_e_v1.11_explain_why_human": {"validation": 1221, "train": 9741},
"cos_e_v1.11_generate_explanation_given_text": {"validation": 1221, "train": 9741},
"cos_e_v1.11_i_think": {"validation": 1221, "train": 9741},
"cos_e_v1.11_question_description_option_id": {"validation": 1221, "train": 9741},
"cos_e_v1.11_question_description_option_text": {"validation": 1221, "train": 9741},
"cos_e_v1.11_question_option_description_id": {"validation": 1221, "train": 9741},
"cos_e_v1.11_question_option_description_text": {"validation": 1221, "train": 9741},
"cos_e_v1.11_rationale": {"validation": 1221, "train": 9741},
"cosmos_qa_context_answer_to_question": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_description_question_answer_id": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_description_question_answer_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_description_question_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_question_description_answer_id": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_question_description_answer_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_context_question_description_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_description_context_question_answer_id": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_description_context_question_answer_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_description_context_question_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_no_prompt_id": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_no_prompt_text": {"validation": 2985, "test": 6963, "train": 25262},
"cosmos_qa_only_question_answer": {"validation": 2985, "test": 6963, "train": 25262},
"dbpedia_14_given_a_choice_of_categories_": {"test": 70000, "train": 560000},
"dbpedia_14_given_a_list_of_category_what_does_the_title_belong_to": {"test": 70000, "train": 560000},
"dbpedia_14_given_list_what_category_does_the_paragraph_belong_to": {"test": 70000, "train": 560000},
"dbpedia_14_pick_one_category_for_the_following_text": {"test": 70000, "train": 560000},
"dream_answer_to_dialogue": {"validation": 2040, "test": 2041, "train": 6116},
"dream_baseline": {"validation": 2040, "test": 2041, "train": 6116},
"dream_generate_first_utterance": {"validation": 2040, "test": 2041, "train": 6116},
"dream_generate_last_utterance": {"validation": 2040, "test": 2041, "train": 6116},
"dream_read_the_following_conversation_and_answer_the_question": {"validation": 2040, "test": 2041, "train": 6116},
"duorc_ParaphraseRC_answer_question": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_build_story_around_qa": {"validation": 13111, "test": 13449, "train": 58752},
"duorc_ParaphraseRC_decide_worth_it": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_extract_answer": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_generate_question": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_generate_question_by_answer": {"validation": 13111, "test": 13449, "train": 58752},
"duorc_ParaphraseRC_movie_director": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_question_answering": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_ParaphraseRC_title_generation": {"validation": 15591, "test": 15857, "train": 69524},
"duorc_SelfRC_answer_question": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_build_story_around_qa": {"validation": 12845, "test": 12415, "train": 60094},
"duorc_SelfRC_decide_worth_it": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_extract_answer": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_generate_question": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_generate_question_by_answer": {"validation": 12845, "test": 12415, "train": 60094},
"duorc_SelfRC_movie_director": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_question_answering": {"validation": 12961, "test": 12559, "train": 60721},
"duorc_SelfRC_title_generation": {"validation": 12961, "test": 12559, "train": 60721},
"gigaword_TLDR": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_first_sentence_title": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_generate_summary_for_this": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_in_a_nutshell": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_make_a_title": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_reverse_writing": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_write_a_title_for_this_sentence": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_write_an_article": {"validation": 189651, "test": 1951, "train": 3803957},
"gigaword_write_its_sentence": {"validation": 189651, "test": 1951, "train": 3803957},
"glue_mrpc_equivalent": {"validation": 408, "test": 1725, "train": 3668},
"glue_mrpc_generate_paraphrase": {"validation": 279, "test": 1147, "train": 2474},
"glue_mrpc_generate_sentence": {"validation": 279, "test": 1147, "train": 2474},
"glue_mrpc_paraphrase": {"validation": 408, "test": 1725, "train": 3668},
"glue_mrpc_replace": {"validation": 408, "test": 1725, "train": 3668},
"glue_mrpc_same_thing": {"validation": 408, "test": 1725, "train": 3668},
"glue_mrpc_want_to_know": {"validation": 408, "test": 1725, "train": 3668},
"glue_qqp_answer": {"validation": 40430, "test": 390965, "train": 363846},
"glue_qqp_duplicate": {"validation": 40430, "test": 390965, "train": 363846},
"glue_qqp_duplicate_or_not": {"validation": 40430, "test": 390965, "train": 363846},
"glue_qqp_meaning": {"validation": 40430, "test": 390965, "train": 363846},
"glue_qqp_quora": {"validation": 40430, "test": 390965, "train": 363846},
"glue_qqp_same_thing": {"validation": 40430, "test": 390965, "train": 363846},
"hellaswag_Appropriate_continuation_Yes_or_No": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Open_ended_completion": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Open_ended_start": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Predict_ending_with_hint": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Predict_ending_with_hint_score_eval": {"validation": 40168, "test": 40012, "train": 159620},
"hellaswag_Randomized_prompts_template": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Randomized_prompts_template_score_eval": {"validation": 40168, "test": 40012, "train": 159620},
"hellaswag_Reversed_appropriate_continuation_Yes_or_No": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Topic_of_the_context": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_Topic_without_the_ending_answer": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_complete_first_then": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_complete_first_then_score_eval": {"validation": 40168, "test": 40012, "train": 159620},
"hellaswag_how_ends": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_if_begins_how_continues": {"validation": 10042, "test": 10003, "train": 39905},
"hellaswag_if_begins_how_continues_score_eval": {"validation": 40168, "test": 40012, "train": 159620},
"imdb_Movie_Expressed_Sentiment": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Movie_Expressed_Sentiment_2": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Negation_template_for_positive_and_negative": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Reviewer_Enjoyment": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Reviewer_Enjoyment_Yes_No": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Reviewer_Expressed_Sentiment": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Reviewer_Opinion_bad_good_choices": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Reviewer_Sentiment_Feeling": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Sentiment_with_choices_": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Text_Expressed_Sentiment": {"test": 25000, "unsupervised": 50000, "train": 25000},
"imdb_Writer_Expressed_Sentiment": {"test": 25000, "unsupervised": 50000, "train": 25000},
"kilt_tasks_hotpotqa_combining_facts": {"validation": 5600, "train": 88869},
"kilt_tasks_hotpotqa_complex_question": {"validation": 5600, "train": 88869},
"kilt_tasks_hotpotqa_final_exam": {"validation": 5600, "train": 88869},
"kilt_tasks_hotpotqa_formulate": {"validation": 5600, "train": 88869},
"kilt_tasks_hotpotqa_straighforward_qa": {"validation": 5600, "train": 88869},
"multi_news_distill": {"validation": 5622, "test": 5622, "train": 44972},
"multi_news_expand_reverse_task_": {"validation": 5622, "test": 5622, "train": 44972},
"multi_news_summarize": {"validation": 5622, "test": 5622, "train": 44972},
"multi_news_summary_scenario": {"validation": 5622, "test": 5622, "train": 44972},
"multi_news_synthesize": {"validation": 5622, "test": 5622, "train": 44972},
"multi_news_what_are_the_key_points": {"validation": 5622, "test": 5622, "train": 44972},
"openbookqa_main_choices": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_choose_an_answer_with_options": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_only_options": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_pick_answer_with_options": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_pick_using_id": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_which_correct": {"validation": 500, "test": 500, "train": 4957},
"openbookqa_main_which_correct_inverse": {"validation": 500, "test": 500, "train": 4957},
"paws_labeled_final_Concatenation": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_Concatenation_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_Meaning": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_Meaning_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_PAWS_ANLI_GPT3": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_PAWS_ANLI_GPT3_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_Rewrite": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_Rewrite_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_context_question": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_context_question_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"paws_labeled_final_paraphrase_task": {"validation": 3539, "test": 3536, "train": 21829},
"paws_labeled_final_task_description_no_label": {"validation": 8000, "test": 8000, "train": 49401},
"piqa_Correct_the_solution": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_Correct_the_solution_if_false_from_sol_1": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_Correct_the_solution_if_false_from_sol_2": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_Does_this_solution_make_sense_sol1": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_Does_this_solution_make_sense_sol2": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_choose_the_most_appropriate_solution": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_finish_sentence_with_correct_choice": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_no_prompt_needed": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_pick_correct_choice_index": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_pick_correct_choice_with_choice_given_before_goal": {"validation": 1838, "test": 3084, "train": 16113},
"piqa_what_is_the_correct_ending": {"validation": 1838, "test": 3084, "train": 16113},
"qasc_is_correct_1": {"validation": 926, "test": 920, "train": 8134},
"qasc_is_correct_2": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_combined_facts_1": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_separated_facts_1": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_separated_facts_2": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_separated_facts_3": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_separated_facts_4": {"validation": 926, "test": 920, "train": 8134},
"qasc_qa_with_separated_facts_5": {"validation": 926, "test": 920, "train": 8134},
"quail_context_description_question_answer_id": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_description_question_answer_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_description_question_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_question_answer_description_id": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_question_answer_description_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_question_description_answer_id": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_question_description_answer_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_context_question_description_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_description_context_question_answer_id": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_description_context_question_answer_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_description_context_question_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_no_prompt_id": {"challenge": 556, "validation": 2164, "train": 10246},
"quail_no_prompt_text": {"challenge": 556, "validation": 2164, "train": 10246},
"quarel_choose_between": {"validation": 278, "test": 552, "train": 1941},
"quarel_do_not_use": {"validation": 278, "test": 552, "train": 1941},
"quarel_heres_a_story": {"validation": 278, "test": 552, "train": 1941},
"quarel_logic_test": {"validation": 278, "test": 552, "train": 1941},
"quarel_testing_students": {"validation": 278, "test": 552, "train": 1941},
"quartz_answer_question_based_on": {"validation": 384, "test": 784, "train": 2696},
"quartz_answer_question_below": {"validation": 384, "test": 784, "train": 2696},
"quartz_given_the_fact_answer_the_q": {"validation": 384, "test": 784, "train": 2696},
"quartz_having_read_above_passage": {"validation": 384, "test": 784, "train": 2696},
"quartz_paragraph_question_plain_concat": {"validation": 384, "test": 784, "train": 2696},
"quartz_read_passage_below_choose": {"validation": 384, "test": 784, "train": 2696},
"quartz_use_info_from_paragraph_question": {"validation": 384, "test": 784, "train": 2696},
"quartz_use_info_from_question_paragraph": {"validation": 384, "test": 784, "train": 2696},
"quoref_Answer_Friend_Question": {"validation": 2418, "train": 19399},
"quoref_Answer_Question_Given_Context": {"validation": 2418, "train": 19399},
"quoref_Answer_Test": {"validation": 2418, "train": 19399},
"quoref_Context_Contains_Answer": {"validation": 2418, "train": 19399},
"quoref_Find_Answer": {"validation": 2418, "train": 19399},
"quoref_Found_Context_Online": {"validation": 2418, "train": 19399},
"quoref_Given_Context_Answer_Question": {"validation": 2418, "train": 19399},
"quoref_Guess_Answer": {"validation": 2418, "train": 19399},
"quoref_Guess_Title_For_Context": {"validation": 2418, "train": 19399},
"quoref_Read_And_Extract_": {"validation": 2418, "train": 19399},
"quoref_What_Is_The_Answer": {"validation": 2418, "train": 19399},
"race_high_Is_this_the_right_answer": {"validation": 3451, "test": 3498, "train": 62445},
"race_high_Read_the_article_and_answer_the_question_no_option_": {
"validation": 3451,
"test": 3498,
"train": 62445,
},
"race_high_Select_the_best_answer": {"validation": 3451, "test": 3498, "train": 62445},
"race_high_Select_the_best_answer_generate_span_": {"validation": 3451, "test": 3498, "train": 62445},
"race_high_Select_the_best_answer_no_instructions_": {"validation": 3451, "test": 3498, "train": 62445},
"race_high_Taking_a_test": {"validation": 3451, "test": 3498, "train": 62445},
"race_high_Write_a_multi_choice_question_for_the_following_article": {
"validation": 3451,
"test": 3498,
"train": 62445,
},
"race_high_Write_a_multi_choice_question_options_given_": {"validation": 3451, "test": 3498, "train": 62445},
"race_middle_Is_this_the_right_answer": {"validation": 1436, "test": 1436, "train": 25421},
"race_middle_Read_the_article_and_answer_the_question_no_option_": {
"validation": 1436,
"test": 1436,
"train": 25421,
},
"race_middle_Select_the_best_answer": {"validation": 1436, "test": 1436, "train": 25421},
"race_middle_Select_the_best_answer_generate_span_": {"validation": 1436, "test": 1436, "train": 25421},
"race_middle_Select_the_best_answer_no_instructions_": {"validation": 1436, "test": 1436, "train": 25421},
"race_middle_Taking_a_test": {"validation": 1436, "test": 1436, "train": 25421},
"race_middle_Write_a_multi_choice_question_for_the_following_article": {
"validation": 1436,
"test": 1436,
"train": 25421,
},
"race_middle_Write_a_multi_choice_question_options_given_": {"validation": 1436, "test": 1436, "train": 25421},
"ropes_background_new_situation_answer": {"validation": 1688, "train": 10924},
"ropes_background_situation_middle": {"validation": 1688, "train": 10924},
"ropes_given_background_situation": {"validation": 1688, "train": 10924},
"ropes_new_situation_background_answer": {"validation": 1688, "train": 10924},
"ropes_plain_background_situation": {"validation": 1688, "train": 10924},
"ropes_plain_bottom_hint": {"validation": 1688, "train": 10924},
"ropes_plain_no_background": {"validation": 1688, "train": 10924},
"ropes_prompt_beginning": {"validation": 1688, "train": 10924},
"ropes_prompt_bottom_hint_beginning": {"validation": 1688, "train": 10924},
"ropes_prompt_bottom_no_hint": {"validation": 1688, "train": 10924},
"ropes_prompt_mix": {"validation": 1688, "train": 10924},
"ropes_read_background_situation": {"validation": 1688, "train": 10924},
"rotten_tomatoes_Movie_Expressed_Sentiment": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Movie_Expressed_Sentiment_2": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Reviewer_Enjoyment": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Reviewer_Enjoyment_Yes_No": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Reviewer_Expressed_Sentiment": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Reviewer_Opinion_bad_good_choices": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Reviewer_Sentiment_Feeling": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Sentiment_with_choices_": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Text_Expressed_Sentiment": {"validation": 1066, "test": 1066, "train": 8530},
"rotten_tomatoes_Writer_Expressed_Sentiment": {"validation": 1066, "test": 1066, "train": 8530},
"samsum_Generate_a_summary_for_this_dialogue": {"validation": 818, "test": 819, "train": 14732},
"samsum_Given_the_above_dialogue_write_a_summary": {"validation": 818, "test": 819, "train": 14732},
"samsum_Sum_up_the_following_dialogue": {"validation": 818, "test": 819, "train": 14732},
"samsum_Summarize_": {"validation": 818, "test": 819, "train": 14732},
"samsum_Summarize_this_dialogue_": {"validation": 818, "test": 819, "train": 14732},
"samsum_To_sum_up_this_dialog": {"validation": 818, "test": 819, "train": 14732},
"samsum_Write_a_dialogue_that_match_this_summary": {"validation": 818, "test": 819, "train": 14732},
"sciq_Direct_Question": {"validation": 1000, "test": 1000, "train": 11679},
"sciq_Direct_Question_Closed_Book_": {"validation": 1000, "test": 1000, "train": 11679},
"sciq_Multiple_Choice": {"validation": 1000, "test": 1000, "train": 11679},
"sciq_Multiple_Choice_Closed_Book_": {"validation": 1000, "test": 1000, "train": 11679},
"sciq_Multiple_Choice_Question_First": {"validation": 1000, "test": 1000, "train": 11679},
"social_i_qa_Check_if_a_random_answer_is_valid_or_not": {"validation": 1954, "train": 33410},
"social_i_qa_Generate_answer": {"validation": 1954, "train": 33410},
"social_i_qa_Generate_the_question_from_the_answer": {"validation": 1954, "train": 33410},
"social_i_qa_I_was_wondering": {"validation": 1954, "train": 33410},
"social_i_qa_Show_choices_and_generate_answer": {"validation": 1954, "train": 33410},
"social_i_qa_Show_choices_and_generate_index": {"validation": 1954, "train": 33410},
"squad_v2_Jeopardy_with_Context": {"validation": 5928, "train": 86821},
"squad_v2_Jeopardy_without_Context": {"validation": 5928, "train": 86821},
"squad_v2_Questions_with_Context": {"validation": 11873, "train": 130319},
"squad_v2_Questions_with_Context_Without_Prompt_Keywords": {"validation": 11873, "train": 130319},
"squad_v2_Questions_with_Context_Without_Prompt_Keywords_unanswerable": {"validation": 11873, "train": 130319},
"squad_v2_Questions_with_Context_unanswerable": {"validation": 11873, "train": 130319},
"squad_v2_Topic_Prediction_Context": {"validation": 11873, "train": 130319},
"squad_v2_Topic_Prediction_Context_with_randomized_prompt_options": {"validation": 11873, "train": 130319},
"squad_v2_Topic_Prediction_Context_with_randomized_prompt_options_placed_in_the_end": {
"validation": 11873,
"train": 130319,
},
"squad_v2_Topic_Prediction_Question_and_Answer_Pair": {"validation": 5928, "train": 86821},
"squad_v2_Trivia": {"validation": 5928, "train": 86821},
"squad_v2_Unanwerable_question": {"validation": 11873, "train": 130319},
"super_glue_boolq_GPT_3_Style": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_I_wonder_": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_after_reading": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_based_on_the_following_passage": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_based_on_the_previous_passage": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_could_you_tell_me_": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_exam": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_exercise": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_valid_binary": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_boolq_yes_no_question": {"validation": 3270, "test": 3245, "train": 9427},
"super_glue_cb_GPT_3_style": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_GPT_3_style_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_MNLI_crowdsource": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_MNLI_crowdsource_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_always_sometimes_never": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_always_sometimes_never_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_based_on_the_previous_passage": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_based_on_the_previous_passage_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_can_we_infer": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_can_we_infer_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_claim_true_false_inconclusive": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_claim_true_false_inconclusive_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_consider_always_sometimes_never": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_consider_always_sometimes_never_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_does_it_follow_that": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_does_it_follow_that_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_does_this_imply": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_does_this_imply_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_guaranteed_possible_impossible": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_guaranteed_possible_impossible_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_guaranteed_true": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_guaranteed_true_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_justified_in_saying": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_justified_in_saying_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_must_be_true": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_must_be_true_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_should_assume": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_should_assume_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_cb_take_the_following_as_truth": {"validation": 56, "test": 250, "train": 250},
"super_glue_cb_take_the_following_as_truth_score_eval": {"validation": 168, "test": 750, "train": 750},
"super_glue_copa_C1_or_C2_premise_so_because_": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_C1_or_C2_premise_so_because__score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa__As_a_result_C1_or_C2_": {"validation": 48, "test": 250, "train": 202},
"super_glue_copa__As_a_result_C1_or_C2__score_eval": {"validation": 96, "test": 500, "train": 404},
"super_glue_copa__What_could_happen_next_C1_or_C2_": {"validation": 48, "test": 250, "train": 202},
"super_glue_copa__What_could_happen_next_C1_or_C2__score_eval": {"validation": 96, "test": 500, "train": 404},
"super_glue_copa__which_may_be_caused_by": {"validation": 52, "test": 250, "train": 198},
"super_glue_copa__which_may_be_caused_by_score_eval": {"validation": 104, "test": 500, "train": 396},
"super_glue_copa__why_C1_or_C2": {"validation": 52, "test": 250, "train": 198},
"super_glue_copa__why_C1_or_C2_score_eval": {"validation": 104, "test": 500, "train": 396},
"super_glue_copa_best_option": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_best_option_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_cause_effect": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_cause_effect_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_choose": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_choose_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_exercise": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_exercise_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_i_am_hesitating": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_i_am_hesitating_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_more_likely": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_more_likely_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_copa_plausible_alternatives": {"validation": 100, "test": 500, "train": 400},
"super_glue_copa_plausible_alternatives_score_eval": {"validation": 200, "test": 1000, "train": 800},
"super_glue_multirc_I_was_going_to_say_": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_Would_it_be_good_to_answer_": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_confirm": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_correct": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_decide_valid": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_found_this_answer": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_grading": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_is_a_correct_answer_": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_is_the_correct_answer_": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_multirc_paragraph_question_is_it_": {"validation": 4848, "test": 9693, "train": 27243},
"super_glue_record_Add_sentence_after_after_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_Add_sentence_after_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_Can_you_figure_out_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_GPT_3_style_continuation_choices_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_GPT_3_style_summary_only_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_GPT_3_style_with_labels_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_GPT_3_style_with_labels_without_hyphens_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_GPT_3_style_without_hyphens_continuation_choices_": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_In_the_question_above_the_placeholder_stands_for": {
"validation": 10000,
"test": 10000,
"train": 100730,
},
"super_glue_record_New_highlight_continuation_choices_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_News_article_continuation_choices_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_Summary_first_continuation_choices_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_What_could_the_placeholder_be_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_Which_one_is_the_placeholder_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_choose_between": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_corrupted": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_exercise": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_pick_one_option": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_the_placeholder_refers_to_": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_record_trying_to_decide": {"validation": 10000, "test": 10000, "train": 100730},
"super_glue_rte_GPT_3_style": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_GPT_3_style_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_MNLI_crowdsource": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_MNLI_crowdsource_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_based_on_the_previous_passage": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_based_on_the_previous_passage_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_can_we_infer": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_can_we_infer_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_does_it_follow_that": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_does_it_follow_that_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_does_this_imply": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_does_this_imply_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_guaranteed_true": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_guaranteed_true_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_justified_in_saying": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_justified_in_saying_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_must_be_true": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_must_be_true_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_rte_should_assume": {"validation": 277, "test": 3000, "train": 2490},
"super_glue_rte_should_assume_score_eval": {"validation": 554, "test": 6000, "train": 4980},
"super_glue_wic_GPT_3_prompt": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_GPT_3_prompt_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_GPT_3_prompt_with_label": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_GPT_3_prompt_with_label_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_affirmation_true_or_false": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_affirmation_true_or_false_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_grammar_homework": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_grammar_homework_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_polysemous": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_polysemous_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_question_context": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_question_context_meaning": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_question_context_meaning_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_question_context_meaning_with_label": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_question_context_meaning_with_label_score_eval": {
"validation": 1276,
"test": 2800,
"train": 10856,
},
"super_glue_wic_question_context_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_same_sense": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_same_sense_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wic_similar_sense": {"validation": 638, "test": 1400, "train": 5428},
"super_glue_wic_similar_sense_score_eval": {"validation": 1276, "test": 2800, "train": 10856},
"super_glue_wsc.fixed_GPT_3_Style": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_GPT_3_Style_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_I_think_they_mean": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_I_think_they_mean_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_Who_or_what_is_are": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_Who_or_what_is_are_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_by_p_they_mean": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_by_p_they_mean_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_does_p_stand_for": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_does_p_stand_for_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_does_the_pronoun_refer_to": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_does_the_pronoun_refer_to_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_in_other_words": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_in_other_words_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_p_is_are_r": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_p_is_are_r_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_replaced_with": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_replaced_with_score_eval": {"validation": 208, "test": 292, "train": 1108},
"super_glue_wsc.fixed_the_pronoun_refers_to": {"validation": 104, "test": 146, "train": 554},
"super_glue_wsc.fixed_the_pronoun_refers_to_score_eval": {"validation": 208, "test": 292, "train": 1108},
"trec_fine_grained_ABBR": {"test": 9, "train": 86},
"trec_fine_grained_ABBR_context_first": {"test": 9, "train": 86},
"trec_fine_grained_DESC": {"test": 138, "train": 1162},
"trec_fine_grained_DESC_context_first": {"test": 138, "train": 1162},
"trec_fine_grained_ENTY": {"test": 94, "train": 1250},
"trec_fine_grained_HUM": {"test": 65, "train": 1223},
"trec_fine_grained_HUM_context_first": {"test": 65, "train": 1223},
"trec_fine_grained_LOC": {"test": 81, "train": 835},
"trec_fine_grained_LOC_context_first": {"test": 81, "train": 835},
"trec_fine_grained_NUM": {"test": 113, "train": 896},
"trec_fine_grained_NUM_context_first": {"test": 113, "train": 896},
"trec_fine_grained_open": {"test": 500, "train": 5452},
"trec_fine_grained_open_context_first": {"test": 500, "train": 5452},
"trec_pick_the_best_descriptor": {"test": 500, "train": 5452},
"trec_trec1": {"test": 500, "train": 5452},
"trec_trec2": {"test": 500, "train": 5452},
"trec_what_category_best_describe": {"test": 500, "train": 5452},
"trec_which_category_best_describes": {"test": 500, "train": 5452},
"trivia_qa_unfiltered_first_person_context": {"validation": 11313, "test": 10832, "train": 87622},
"trivia_qa_unfiltered_formal_description": {"validation": 11313, "test": 10832, "train": 87622},
"trivia_qa_unfiltered_guess_question": {"validation": 11313, "train": 87622},
"trivia_qa_unfiltered_question_answer": {"validation": 11313, "test": 10832, "train": 87622},
"trivia_qa_unfiltered_question_with_instruction": {"validation": 11313, "test": 10832, "train": 87622},
"web_questions_get_the_answer": {"test": 2032, "train": 3778},
"web_questions_potential_correct_answer": {"test": 2032, "train": 3778},
"web_questions_question_answer": {"test": 2032, "train": 3778},
"web_questions_short_general_knowledge_q": {"test": 2032, "train": 3778},
"web_questions_whats_the_answer": {"test": 2032, "train": 3778},
"wiki_bio_comprehension": {"val": 72831, "test": 72829, "train": 582639},
"wiki_bio_guess_person": {"val": 72831, "test": 72829, "train": 582639},
"wiki_bio_key_content": {"val": 72831, "test": 72829, "train": 582639},
"wiki_bio_what_content": {"val": 72831, "test": 72829, "train": 582639},
"wiki_bio_who": {"val": 72831, "test": 72829, "train": 582639},
"wiki_hop_original_choose_best_object_affirmative_1": {"validation": 5129, "train": 43738},
"wiki_hop_original_choose_best_object_affirmative_2": {"validation": 5129, "train": 43738},
"wiki_hop_original_choose_best_object_affirmative_3": {"validation": 5129, "train": 43738},
"wiki_hop_original_choose_best_object_interrogative_1": {"validation": 5129, "train": 43738},
"wiki_hop_original_choose_best_object_interrogative_2": {"validation": 5129, "train": 43738},
"wiki_hop_original_explain_relation": {"validation": 5129, "train": 43738},
"wiki_hop_original_generate_object": {"validation": 5129, "train": 43738},
"wiki_hop_original_generate_subject": {"validation": 5129, "train": 43738},
"wiki_hop_original_generate_subject_and_object": {"validation": 5129, "train": 43738},
"wiki_qa_Decide_good_answer": {"validation": 2733, "test": 6165, "train": 20360},
"wiki_qa_Direct_Answer_to_Question": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_Generate_Question_from_Topic": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_Is_This_True_": {"validation": 2733, "test": 6165, "train": 20360},
"wiki_qa_Jeopardy_style": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_Topic_Prediction_Answer_Only": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_Topic_Prediction_Question_Only": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_Topic_Prediction_Question_and_Answer_Pair": {"validation": 140, "test": 293, "train": 1040},
"wiki_qa_automatic_system": {"validation": 2733, "test": 6165, "train": 20360},
"wiki_qa_exercise": {"validation": 2733, "test": 6165, "train": 20360},
"wiki_qa_found_on_google": {"validation": 2733, "test": 6165, "train": 20360},
"winogrande_winogrande_debiased_Replace": {"validation": 1267, "test": 1767, "train": 9248},
"winogrande_winogrande_debiased_Replace_score_eval": {"validation": 2534, "test": 3534, "train": 18496},
"winogrande_winogrande_debiased_does_underscore_refer_to": {"validation": 1267, "test": 1767, "train": 9248},
"winogrande_winogrande_debiased_does_underscore_refer_to_score_eval": {
"validation": 2534,
"test": 3534,
"train": 18496,
},
"winogrande_winogrande_debiased_fill_in_the_blank": {"validation": 1267, "test": 1767, "train": 9248},
"winogrande_winogrande_debiased_fill_in_the_blank_score_eval": {"validation": 2534, "test": 3534, "train": 18496},
"winogrande_winogrande_debiased_stand_for": {"validation": 1267, "test": 1767, "train": 9248},
"winogrande_winogrande_debiased_stand_for_score_eval": {"validation": 2534, "test": 3534, "train": 18496},
"winogrande_winogrande_debiased_underscore_refer_to": {"validation": 1267, "test": 1767, "train": 9248},
"winogrande_winogrande_debiased_underscore_refer_to_score_eval": {
"validation": 2534,
"test": 3534,
"train": 18496,
},
"winogrande_winogrande_xl_Replace": {"validation": 1267, "test": 1767, "train": 40398},
"winogrande_winogrande_xl_Replace_score_eval": {"validation": 2534, "test": 3534, "train": 80796},
"winogrande_winogrande_xl_does_underscore_refer_to": {"validation": 1267, "test": 1767, "train": 40398},
"winogrande_winogrande_xl_does_underscore_refer_to_score_eval": {"validation": 2534, "test": 3534, "train": 80796},
"winogrande_winogrande_xl_fill_in_the_blank": {"validation": 1267, "test": 1767, "train": 40398},
"winogrande_winogrande_xl_fill_in_the_blank_score_eval": {"validation": 2534, "test": 3534, "train": 80796},
"winogrande_winogrande_xl_stand_for": {"validation": 1267, "test": 1767, "train": 40398},
"winogrande_winogrande_xl_stand_for_score_eval": {"validation": 2534, "test": 3534, "train": 80796},
"winogrande_winogrande_xl_underscore_refer_to": {"validation": 1267, "test": 1767, "train": 40398},
"winogrande_winogrande_xl_underscore_refer_to_score_eval": {"validation": 2534, "test": 3534, "train": 80796},
"wiqa_does_the_supposed_perturbation_have_an_effect": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_effect_with_label_answer": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_effect_with_string_answer": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_what_is_the_final_step_of_the_following_process": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_what_is_the_missing_first_step": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_what_might_be_the_first_step_of_the_process": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_what_might_be_the_last_step_of_the_process": {"validation": 6894, "test": 3003, "train": 29808},
"wiqa_which_of_the_following_is_the_supposed_perturbation": {"validation": 6894, "test": 3003, "train": 29808},
"xsum_DOC_boils_down_to_simple_idea_that": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_DOC_given_above_write_one_sentence": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_DOC_how_would_you_rephrase_few_words": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_DOC_tldr": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_DOC_write_summary_of_above": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_article_DOC_summary": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_college_roommate_asked_DOC_so_I_recap": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_read_below_DOC_write_abstract": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_summarize_DOC": {"validation": 11332, "test": 11334, "train": 204045},
"xsum_summarize_this_DOC_summary": {"validation": 11332, "test": 11334, "train": 204045},
"yelp_review_full_based_on_that": {"test": 50000, "train": 650000},
"yelp_review_full_format_rating": {"test": 50000, "train": 650000},
"yelp_review_full_format_score": {"test": 50000, "train": 650000},
"yelp_review_full_format_star": {"test": 50000, "train": 650000},
"yelp_review_full_on_a_scale": {"test": 50000, "train": 650000},
"yelp_review_full_so_i_would": {"test": 50000, "train": 650000},
"yelp_review_full_this_place": {"test": 50000, "train": 650000},
}
split_infos = {
"adversarial_qa_dbert_answer_the_following_q": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbert_based_on": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbert_generate_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbert_question_context_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbert_tell_what_it_is": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbidaf_answer_the_following_q": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbidaf_based_on": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbidaf_generate_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbidaf_question_context_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_dbidaf_tell_what_it_is": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_droberta_answer_the_following_q": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_droberta_based_on": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_droberta_generate_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_droberta_question_context_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"adversarial_qa_droberta_tell_what_it_is": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_classify": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_classify_question_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_classify_with_choices": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_classify_with_choices_question_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_recommend": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_which_section": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ag_news_which_section_choices": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_heres_a_problem": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_i_am_hesitating": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_multiple_choice": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_pick_false_options": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_pick_the_most_correct_option": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Challenge_qa_options": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_heres_a_problem": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_i_am_hesitating": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_multiple_choice": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_pick_false_options": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_pick_the_most_correct_option": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ai2_arc_ARC_Easy_qa_options": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_Is_this_product_review_positive": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_Is_this_review": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_Is_this_review_negative": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_User_recommend_this_product": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_convey_negative_or_positive_sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_flattering_or_not": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_negative_or_positive_tone": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_user_satisfied": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"amazon_polarity_would_you_buy": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_GPT_3_style_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_MNLI_crowdsource_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_always_sometimes_never_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_based_on_the_previous_passage_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_can_we_infer_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_claim_true_false_inconclusive_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_consider_always_sometimes_never_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_it_follow_that_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_does_this_imply_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_possible_impossible_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_guaranteed_true_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_justified_in_saying_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_must_be_true_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_should_assume_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r1_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"anli_take_the_following_as_truth_r3_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"app_reviews_categorize_rating_using_review": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"app_reviews_convert_to_rating": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"app_reviews_convert_to_star_rating": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"app_reviews_generate_review": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_2_or_3_sentences": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_generate_story": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_news_card_view": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_news_stock": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_news_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_spice_up_story": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_sum_in_brief": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_tldr_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cnn_dailymail_3.0.0_write_an_outline": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_Example_prompt": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_Given_concepts_type_1": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_Given_concepts_type_2": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_Put_together": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_choice_in_concept_centric_sentence_generation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_random_task_template_prompt": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_sentence_to_concepts": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_topic_to_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"common_gen_topics_from_the_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_aligned_with_common_sense": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_description_question_option_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_description_question_option_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_explain_why_human": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_generate_explanation_given_text": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_i_think": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_question_description_option_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_question_description_option_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_question_option_description_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_question_option_description_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cos_e_v1.11_rationale": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_answer_to_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_description_question_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_description_question_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_description_question_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_question_description_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_question_description_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_context_question_description_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_description_context_question_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_description_context_question_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_description_context_question_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_no_prompt_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_no_prompt_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"cosmos_qa_only_question_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dbpedia_14_given_a_choice_of_categories_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dbpedia_14_given_a_list_of_category_what_does_the_title_belong_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dbpedia_14_given_list_what_category_does_the_paragraph_belong_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dbpedia_14_pick_one_category_for_the_following_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dream_answer_to_dialogue": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dream_baseline": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dream_generate_first_utterance": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dream_generate_last_utterance": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"dream_read_the_following_conversation_and_answer_the_question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_answer_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_build_story_around_qa": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_decide_worth_it": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_extract_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_generate_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_generate_question_by_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_movie_director": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_question_answering": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_ParaphraseRC_title_generation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_answer_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_build_story_around_qa": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_decide_worth_it": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_extract_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_generate_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_generate_question_by_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_movie_director": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_question_answering": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"duorc_SelfRC_title_generation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_TLDR": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_first_sentence_title": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_generate_summary_for_this": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_in_a_nutshell": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_make_a_title": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_reverse_writing": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_write_a_title_for_this_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_write_an_article": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"gigaword_write_its_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_equivalent": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_generate_paraphrase": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_generate_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_paraphrase": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_replace": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_same_thing": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_mrpc_want_to_know": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_duplicate": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_duplicate_or_not": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_meaning": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_quora": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"glue_qqp_same_thing": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Appropriate_continuation_Yes_or_No": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Open_ended_completion": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Open_ended_start": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Predict_ending_with_hint": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Predict_ending_with_hint_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Randomized_prompts_template": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Randomized_prompts_template_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Reversed_appropriate_continuation_Yes_or_No": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Topic_of_the_context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_Topic_without_the_ending_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_complete_first_then": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_complete_first_then_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_how_ends": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_if_begins_how_continues": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"hellaswag_if_begins_how_continues_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Movie_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Movie_Expressed_Sentiment_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Negation_template_for_positive_and_negative": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Reviewer_Enjoyment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Reviewer_Enjoyment_Yes_No": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Reviewer_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Reviewer_Opinion_bad_good_choices": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Reviewer_Sentiment_Feeling": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Sentiment_with_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Text_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"imdb_Writer_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"kilt_tasks_hotpotqa_combining_facts": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"kilt_tasks_hotpotqa_complex_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"kilt_tasks_hotpotqa_final_exam": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"kilt_tasks_hotpotqa_formulate": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"kilt_tasks_hotpotqa_straighforward_qa": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_distill": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_expand_reverse_task_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_summarize": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_summary_scenario": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_synthesize": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"multi_news_what_are_the_key_points": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_choices": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_choose_an_answer_with_options": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_only_options": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_pick_answer_with_options": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_pick_using_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_which_correct": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"openbookqa_main_which_correct_inverse": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Concatenation": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Concatenation_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Meaning": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Meaning_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_PAWS_ANLI_GPT3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_PAWS_ANLI_GPT3_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Rewrite": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_Rewrite_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_context_question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_context_question_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_paraphrase_task": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"paws_labeled_final_task_description_no_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_Correct_the_solution": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_Correct_the_solution_if_false_from_sol_1": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_Correct_the_solution_if_false_from_sol_2": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_Does_this_solution_make_sense_sol1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_Does_this_solution_make_sense_sol2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_choose_the_most_appropriate_solution": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_finish_sentence_with_correct_choice": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_no_prompt_needed": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_pick_correct_choice_index": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_pick_correct_choice_with_choice_given_before_goal": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"piqa_what_is_the_correct_ending": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_is_correct_1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_is_correct_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_combined_facts_1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_separated_facts_1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_separated_facts_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_separated_facts_3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_separated_facts_4": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"qasc_qa_with_separated_facts_5": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_description_question_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_description_question_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_description_question_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_question_answer_description_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_question_answer_description_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_question_description_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_question_description_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_context_question_description_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_description_context_question_answer_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_description_context_question_answer_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_description_context_question_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_no_prompt_id": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quail_no_prompt_text": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quarel_choose_between": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quarel_do_not_use": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quarel_heres_a_story": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quarel_logic_test": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quarel_testing_students": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_answer_question_based_on": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_answer_question_below": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_given_the_fact_answer_the_q": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_having_read_above_passage": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_paragraph_question_plain_concat": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_read_passage_below_choose": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_use_info_from_paragraph_question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quartz_use_info_from_question_paragraph": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Answer_Friend_Question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Answer_Question_Given_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Answer_Test": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Context_Contains_Answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Find_Answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Found_Context_Online": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Given_Context_Answer_Question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Guess_Answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Guess_Title_For_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_Read_And_Extract_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"quoref_What_Is_The_Answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Is_this_the_right_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Read_the_article_and_answer_the_question_no_option_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Select_the_best_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Select_the_best_answer_generate_span_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Select_the_best_answer_no_instructions_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Taking_a_test": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Write_a_multi_choice_question_for_the_following_article": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_high_Write_a_multi_choice_question_options_given_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Is_this_the_right_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Read_the_article_and_answer_the_question_no_option_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Select_the_best_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Select_the_best_answer_generate_span_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Select_the_best_answer_no_instructions_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Taking_a_test": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Write_a_multi_choice_question_for_the_following_article": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"race_middle_Write_a_multi_choice_question_options_given_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_background_new_situation_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_background_situation_middle": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_given_background_situation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_new_situation_background_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_plain_background_situation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_plain_bottom_hint": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_plain_no_background": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_prompt_beginning": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_prompt_bottom_hint_beginning": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_prompt_bottom_no_hint": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_prompt_mix": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"ropes_read_background_situation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Movie_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Movie_Expressed_Sentiment_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Reviewer_Enjoyment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Reviewer_Enjoyment_Yes_No": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Reviewer_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Reviewer_Opinion_bad_good_choices": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Reviewer_Sentiment_Feeling": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Sentiment_with_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Text_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"rotten_tomatoes_Writer_Expressed_Sentiment": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Generate_a_summary_for_this_dialogue": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Given_the_above_dialogue_write_a_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Sum_up_the_following_dialogue": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Summarize_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Summarize_this_dialogue_": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_To_sum_up_this_dialog": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"samsum_Write_a_dialogue_that_match_this_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"sciq_Direct_Question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"sciq_Direct_Question_Closed_Book_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"sciq_Multiple_Choice": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"sciq_Multiple_Choice_Closed_Book_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"sciq_Multiple_Choice_Question_First": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_Check_if_a_random_answer_is_valid_or_not": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_Generate_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_Generate_the_question_from_the_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_I_was_wondering": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_Show_choices_and_generate_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"social_i_qa_Show_choices_and_generate_index": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Jeopardy_with_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Jeopardy_without_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Questions_with_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Questions_with_Context_Without_Prompt_Keywords": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Questions_with_Context_Without_Prompt_Keywords_unanswerable": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"squad_v2_Questions_with_Context_unanswerable": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"squad_v2_Topic_Prediction_Context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Topic_Prediction_Context_with_randomized_prompt_options": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Topic_Prediction_Context_with_randomized_prompt_options_placed_in_the_end": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Topic_Prediction_Question_and_Answer_Pair": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Trivia": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"squad_v2_Unanwerable_question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_GPT_3_Style": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_I_wonder_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_after_reading": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_based_on_the_following_passage": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_based_on_the_previous_passage": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_could_you_tell_me_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_exam": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_exercise": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_valid_binary": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_boolq_yes_no_question": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_GPT_3_style": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_GPT_3_style_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_MNLI_crowdsource": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_MNLI_crowdsource_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_always_sometimes_never": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_always_sometimes_never_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_based_on_the_previous_passage": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_based_on_the_previous_passage_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_can_we_infer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_can_we_infer_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_claim_true_false_inconclusive": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_claim_true_false_inconclusive_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_consider_always_sometimes_never": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_consider_always_sometimes_never_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_does_it_follow_that": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_does_it_follow_that_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_does_this_imply": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_does_this_imply_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_guaranteed_possible_impossible": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_guaranteed_possible_impossible_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_guaranteed_true": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_guaranteed_true_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_justified_in_saying": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_justified_in_saying_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_must_be_true": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_must_be_true_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_should_assume": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_should_assume_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_take_the_following_as_truth": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_cb_take_the_following_as_truth_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_C1_or_C2_premise_so_because_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_C1_or_C2_premise_so_because__score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__As_a_result_C1_or_C2_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__As_a_result_C1_or_C2__score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__What_could_happen_next_C1_or_C2_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__What_could_happen_next_C1_or_C2__score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__which_may_be_caused_by": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__which_may_be_caused_by_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__why_C1_or_C2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa__why_C1_or_C2_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_best_option": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_best_option_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_cause_effect": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_cause_effect_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_choose": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_choose_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_exercise": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_exercise_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_i_am_hesitating": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_i_am_hesitating_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_more_likely": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_more_likely_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_plausible_alternatives": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_copa_plausible_alternatives_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_I_was_going_to_say_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_Would_it_be_good_to_answer_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_confirm": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_correct": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_decide_valid": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_found_this_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_grading": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_is_a_correct_answer_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_is_the_correct_answer_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_multirc_paragraph_question_is_it_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_Add_sentence_after_after_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_Add_sentence_after_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_Can_you_figure_out_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_GPT_3_style_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_GPT_3_style_summary_only_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_GPT_3_style_with_labels_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_GPT_3_style_with_labels_without_hyphens_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_GPT_3_style_without_hyphens_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_In_the_question_above_the_placeholder_stands_for": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_New_highlight_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_News_article_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_Summary_first_continuation_choices_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.5",
},
"super_glue_record_What_could_the_placeholder_be_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_Which_one_is_the_placeholder_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_choose_between": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_corrupted": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_exercise": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_pick_one_option": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_the_placeholder_refers_to_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_record_trying_to_decide": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_GPT_3_style": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_GPT_3_style_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_MNLI_crowdsource": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_MNLI_crowdsource_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_based_on_the_previous_passage": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_based_on_the_previous_passage_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_can_we_infer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_can_we_infer_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_does_it_follow_that": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_does_it_follow_that_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_does_this_imply": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_does_this_imply_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_guaranteed_true": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_guaranteed_true_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_justified_in_saying": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_justified_in_saying_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_must_be_true": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_must_be_true_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_should_assume": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_rte_should_assume_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_GPT_3_prompt": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_GPT_3_prompt_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_GPT_3_prompt_with_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_GPT_3_prompt_with_label_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_affirmation_true_or_false": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_affirmation_true_or_false_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_grammar_homework": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_grammar_homework_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_polysemous": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_polysemous_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context_meaning": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context_meaning_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context_meaning_with_label": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context_meaning_with_label_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_question_context_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_same_sense": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_same_sense_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_similar_sense": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wic_similar_sense_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_GPT_3_Style": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_GPT_3_Style_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_I_think_they_mean": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_I_think_they_mean_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_Who_or_what_is_are": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_Who_or_what_is_are_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_by_p_they_mean": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_by_p_they_mean_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_does_p_stand_for": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_does_p_stand_for_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_does_the_pronoun_refer_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_does_the_pronoun_refer_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_in_other_words": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_in_other_words_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_p_is_are_r": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_p_is_are_r_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_replaced_with": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_replaced_with_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_the_pronoun_refers_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"super_glue_wsc.fixed_the_pronoun_refers_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_ABBR": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_ABBR_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_DESC": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_DESC_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_ENTY": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_HUM": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_HUM_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_LOC": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_LOC_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_NUM": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_NUM_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_open": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_fine_grained_open_context_first": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_pick_the_best_descriptor": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_trec1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_trec2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_what_category_best_describe": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trec_which_category_best_describes": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trivia_qa_unfiltered_first_person_context": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trivia_qa_unfiltered_formal_description": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trivia_qa_unfiltered_guess_question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trivia_qa_unfiltered_question_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"trivia_qa_unfiltered_question_with_instruction": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"web_questions_get_the_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"web_questions_potential_correct_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"web_questions_question_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"web_questions_short_general_knowledge_q": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"web_questions_whats_the_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_bio_comprehension": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_bio_guess_person": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_bio_key_content": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_bio_what_content": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_bio_who": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_choose_best_object_affirmative_1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_choose_best_object_affirmative_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_choose_best_object_affirmative_3": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_choose_best_object_interrogative_1": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_choose_best_object_interrogative_2": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_explain_relation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_generate_object": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_generate_subject": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_hop_original_generate_subject_and_object": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Decide_good_answer": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Direct_Answer_to_Question": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Generate_Question_from_Topic": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Is_This_True_": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Jeopardy_style": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Topic_Prediction_Answer_Only": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Topic_Prediction_Question_Only": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_Topic_Prediction_Question_and_Answer_Pair": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_automatic_system": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_exercise": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiki_qa_found_on_google": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_Replace": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_Replace_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_does_underscore_refer_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_does_underscore_refer_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_fill_in_the_blank": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_fill_in_the_blank_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_stand_for": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_stand_for_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_underscore_refer_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_debiased_underscore_refer_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_Replace": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_Replace_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_does_underscore_refer_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_does_underscore_refer_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_fill_in_the_blank": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_fill_in_the_blank_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_stand_for": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_stand_for_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_underscore_refer_to": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"winogrande_winogrande_xl_underscore_refer_to_score_eval": {
"features": {
"idx": {"dtype": "int32", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"is_correct": {"dtype": "bool", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
"weight": {"dtype": "float32", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_does_the_supposed_perturbation_have_an_effect": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_effect_with_label_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_effect_with_string_answer": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_what_is_the_final_step_of_the_following_process": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_what_is_the_missing_first_step": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_what_might_be_the_first_step_of_the_process": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_what_might_be_the_last_step_of_the_process": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"wiqa_which_of_the_following_is_the_supposed_perturbation": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_DOC_boils_down_to_simple_idea_that": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_DOC_given_above_write_one_sentence": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_DOC_how_would_you_rephrase_few_words": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_DOC_tldr": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_DOC_write_summary_of_above": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_article_DOC_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_college_roommate_asked_DOC_so_I_recap": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_read_below_DOC_write_abstract": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_summarize_DOC": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"xsum_summarize_this_DOC_summary": {
"features": {
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_based_on_that": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_format_rating": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_format_score": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_format_star": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_on_a_scale": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_so_i_would": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
"yelp_review_full_this_place": {
"features": {
"answer_choices": {"dtype": "string", "shape": [None]},
"inputs": {"dtype": "int32", "shape": [None]},
"inputs_pretokenized": {"dtype": "string", "shape": []},
"targets": {"dtype": "int32", "shape": [None]},
"targets_pretokenized": {"dtype": "string", "shape": []},
},
"num_shards": 1,
"seqio_version": "0.0.6",
},
}
def find_task_splits_and_features_dict():
"""Get the task available (list was pre-computed by `print_data_split_sizes.py`), and get the features for each task."""
task_splits_and_features = defaultdict(dict)
data_split_sizes = DATA_SPLITS_SIZES
for task_name, split_sizes in data_split_sizes.items():
for split_name in split_sizes.keys():
split_info = split_infos[task_name]
features_dict = split_info["features"]
assert split_info["num_shards"] == 1 # TODO -> handle multiple shards
if not task_splits_and_features[task_name]:
task_splits_and_features[task_name] = {
"splits": [],
"features_dict": features_dict,
}
task_splits_and_features[task_name]["splits"].append(split_name)
assert features_dict == task_splits_and_features[task_name]["features_dict"]
return task_splits_and_features
_TASK_SPLITS_AND_FEATURES_DICT = find_task_splits_and_features_dict()
| NeMo-main | scripts/nlp_language_modeling/t0/tasks_splits_and_features.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script will merge prompt-specific train files into a single file per task.
"""
import json
import os
from argparse import ArgumentParser
tasks = [
'adversarial_qa',
'ag_news',
'ai2_arc_ARC_Challenge',
'ai2_arc_ARC_Easy',
'amazon_polarity',
'anli',
'app_reviews',
'cnn_dailymail_3.0.0',
'common_gen',
'cos_e_v1.11',
'cosmos_qa',
'dbpedia_14',
'dream',
'duorc_ParaphraseRC',
'duorc_SelfRC',
'gigaword',
'glue_mrpc',
'glue_qqp',
'hellaswag',
'imdb',
'kilt_tasks_hotpotqa',
'multi_news',
'openbookqa_main',
'paws_labeled_final',
'piqa',
'qasc',
'quail',
'quarel',
'quartz',
'quoref',
'race_high',
'race_middle',
'ropes',
'rotten_tomatoes',
'samsum',
'sciq',
'social_i_qa',
'squad_v2',
'super_glue_boolq',
'super_glue_cb',
'super_glue_copa',
'super_glue_multirc',
'super_glue_record',
'super_glue_rte',
'super_glue_wic',
'super_glue_wsc',
'trec',
'trivia_qa',
'web_questions',
'wiki_bio',
'wiki_hop',
'wiki_qa',
'winogrande_winogrande',
'wiqa',
'xsum',
'yelp_review_full',
]
def merge_train_folder(train_data_folder, merged_train_data_folder):
if not os.path.exists(merged_train_data_folder):
os.makedirs(merged_train_data_folder)
task_counter = {task: 0 for task in tasks}
fptrs = {task: open(os.path.join(merged_train_data_folder, task + '.jsonl'), 'w') for task in tasks}
for idx, fname in enumerate(os.listdir(train_data_folder)):
if idx % 10 == 0:
print(f'Processed {idx + 1}/{len(os.listdir(train_data_folder))} files ...')
if fname.endswith('.jsonl') and '_score_eval' not in fname:
found = False
for task in tasks:
if fname.startswith(task):
task_counter[task] += 1
found = True
with open(os.path.join(train_data_folder, fname), 'r') as f:
for line in f:
line = json.loads(line)
line['task_name_with_prompt'] = fname
if line['input'].strip() == '':
print(f'WARNING: Empty input for {fname}')
continue
if line['output'].strip() == '':
print(f'WARNING: Empty output for {fname}')
continue
fptrs[task].write(json.dumps(line) + '\n')
if not found:
print(f'WARNING: Could not find task for {fname}')
for _, v in fptrs.items():
v.close()
if task_counter[task] == 0:
print('WARNING: No files found for task: ', task)
for k, v in task_counter.items():
print(f'Task {k} had {v} prompt templates.')
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument(
"--p3_processed_train_dataset_path",
type=str,
required=True,
help="Path to the processed P3 train dataset. This is the output of the t0_dataset_preproc.py script.",
)
parser.add_argument(
"--p3_processed_merged_train_dataset_path",
type=str,
required=True,
help="Path to output folder where merged JSONL files will be written.",
)
args = parser.parse_args()
merge_train_folder(args.p3_processed_train_dataset_path, args.p3_processed_merged_train_dataset_path)
| NeMo-main | scripts/nlp_language_modeling/t0/merge_train_tasks.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Builds a .nemo file with average weights over multiple .ckpt files (assumes .ckpt files in same folder as .nemo file).
Usage example for building *-averaged.nemo for a given .nemo file:
NeMo/scripts/checkpoint_averaging/checkpoint_averaging.py my_model.nemo
Usage example for building *-averaged.nemo files for all results in sub-directories under current path:
find . -name '*.nemo' | grep -v -- "-averaged.nemo" | xargs NeMo/scripts/checkpoint_averaging/checkpoint_averaging.py
NOTE: if yout get the following error `AttributeError: Can't get attribute '???' on <module '__main__' from '???'>`
use --import_fname_list <FILE> with all files that contains missing classes.
"""
import argparse
import glob
import importlib
import os
import sys
import torch
from tqdm.auto import tqdm
from nemo.core import ModelPT
from nemo.utils import logging, model_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'model_fname_list',
metavar='NEMO_FILE_OR_FOLDER',
type=str,
nargs='+',
help='Input .nemo files (or folders who contains them) to parse',
)
parser.add_argument(
'--import_fname_list',
metavar='FILE',
type=str,
nargs='+',
default=[],
help='A list of Python file names to "from FILE import *" (Needed when some classes were defined in __main__ of a script)',
)
parser.add_argument(
'--class_path', type=str, default='', help='A path to class "module.submodule.class" (if given)',
)
args = parser.parse_args()
logging.info(
f"\n\nIMPORTANT:\nIf you get the following error:\n\t(AttributeError: Can't get attribute '???' on <module '__main__' from '???'>)\nuse:\n\t--import_fname_list\nfor all files that contain missing classes.\n\n"
)
for fn in args.import_fname_list:
logging.info(f"Importing * from {fn}")
sys.path.insert(0, os.path.dirname(fn))
globals().update(importlib.import_module(os.path.splitext(os.path.basename(fn))[0]).__dict__)
device = torch.device("cpu")
# loop over all folders with .nemo files (or .nemo files)
for model_fname_i, model_fname in enumerate(args.model_fname_list):
if not model_fname.endswith(".nemo"):
# assume model_fname is a folder which contains a .nemo file (filter .nemo files which matches with "*-averaged.nemo")
nemo_files = list(
filter(lambda fn: not fn.endswith("-averaged.nemo"), glob.glob(os.path.join(model_fname, "*.nemo")))
)
if len(nemo_files) != 1:
raise RuntimeError(f"Expected exactly one .nemo file but discovered {len(nemo_files)} .nemo files")
model_fname = nemo_files[0]
model_folder_path = os.path.dirname(model_fname)
fn, fe = os.path.splitext(model_fname)
avg_model_fname = f"{fn}-averaged{fe}"
logging.info(f"\n===> [{model_fname_i+1} / {len(args.model_fname_list)}] Parsing folder {model_folder_path}\n")
# restore model from .nemo file path
model_cfg = ModelPT.restore_from(restore_path=model_fname, return_config=True)
if args.class_path:
classpath = args.class_path
else:
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath)
logging.info(f"Loading model {model_fname}")
nemo_model = imported_class.restore_from(restore_path=model_fname, map_location=device)
# search for all checkpoints (ignore -last.ckpt)
checkpoint_paths = [
os.path.join(model_folder_path, x)
for x in os.listdir(model_folder_path)
if x.endswith('.ckpt') and not x.endswith('-last.ckpt')
]
""" < Checkpoint Averaging Logic > """
# load state dicts
n = len(checkpoint_paths)
avg_state = None
logging.info(f"Averaging {n} checkpoints ...")
for ix, path in enumerate(tqdm(checkpoint_paths, total=n, desc='Averaging checkpoints')):
checkpoint = torch.load(path, map_location=device)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
else:
raise RuntimeError(f"Checkpoint from {path} does not include a state_dict.")
if ix == 0:
# Initial state
avg_state = checkpoint
logging.info(f"Initialized average state dict with checkpoint:\n\t{path}")
else:
# Accumulated state
for k in avg_state:
avg_state[k] = avg_state[k] + checkpoint[k]
logging.info(f"Updated average state dict with state from checkpoint:\n\t{path}")
for k in avg_state:
if str(avg_state[k].dtype).startswith("torch.int"):
# For int type, not averaged, but only accumulated.
# e.g. BatchNorm.num_batches_tracked
pass
else:
avg_state[k] = avg_state[k] / n
# restore merged weights into model
nemo_model.load_state_dict(avg_state, strict=True)
# Save model
logging.info(f"Saving average model to:\n\t{avg_model_fname}")
nemo_model.save_to(avg_model_fname)
if __name__ == '__main__':
main()
| NeMo-main | scripts/checkpoint_averaging/checkpoint_averaging.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Builds a .nemo file with average weights over multiple .ckpt files (assumes .ckpt files in same folder as .nemo file).
Usage example for building *-averaged.nemo for a given .nemo file:
NeMo/scripts/checkpoint_averaging/checkpoint_averaging.py my_model.nemo
Usage example for building *-averaged.nemo files for all results in sub-directories under current path:
find . -name '*.nemo' | grep -v -- "-averaged.nemo" | xargs NeMo/scripts/checkpoint_averaging/checkpoint_averaging.py
NOTE: if yout get the following error `AttributeError: Can't get attribute '???' on <module '__main__' from '???'>`
use --import_fname_list <FILE> with all files that contains missing classes.
"""
import argparse
import glob
import importlib
import os
import sys
import torch
from omegaconf.omegaconf import OmegaConf, open_dict
from pytorch_lightning.trainer.trainer import Trainer
from nemo.collections.nlp.parts.nlp_overrides import NLPDDPStrategy, NLPSaveRestoreConnector
from nemo.core import ModelPT
from nemo.utils import logging, model_utils
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'model_fname_list',
metavar='N',
type=str,
nargs='+',
help='Input .nemo files (or folders who contains them) to parse',
)
parser.add_argument(
'--import_fname_list',
type=str,
nargs='+',
default=[],
help='A list of Python file names to "from FILE import *" (Needed when some classes were defined in __main__ of a script)',
)
parser.add_argument(
'--class_path', type=str, default='', help='A path to class "module.submodule.class" (if given)',
)
args = parser.parse_args()
logging.info(
f"\n\nIMPORTANT: Use --import_fname_list for all files that contain missing classes (AttributeError: Can't get attribute '???' on <module '__main__' from '???'>)\n\n"
)
for fn in args.import_fname_list:
logging.info(f"Importing * from {fn}")
sys.path.insert(0, os.path.dirname(fn))
globals().update(importlib.import_module(os.path.splitext(os.path.basename(fn))[0]).__dict__)
device = torch.device("cpu")
trainer = Trainer(strategy=NLPDDPStrategy(), devices=1, num_nodes=1, precision=16, accelerator='gpu')
# loop over all folders with .nemo files (or .nemo files)
for model_fname_i, model_fname in enumerate(args.model_fname_list):
if not model_fname.endswith(".nemo"):
# assume model_fname is a folder which contains a .nemo file (filter .nemo files which matches with "*-averaged.nemo")
nemo_files = list(
filter(lambda fn: not fn.endswith("-averaged.nemo"), glob.glob(os.path.join(model_fname, "*.nemo")))
)
if len(nemo_files) != 1:
raise RuntimeError(f"Expected only a single .nemo files but discovered {len(nemo_files)} .nemo files")
model_fname = nemo_files[0]
model_folder_path = os.path.dirname(model_fname)
fn, fe = os.path.splitext(model_fname)
avg_model_fname = f"{fn}-averaged{fe}"
logging.info(f"\n===> [{model_fname_i+1} / {len(args.model_fname_list)}] Parsing folder {model_folder_path}\n")
# restore model from .nemo file path
model_cfg = ModelPT.restore_from(
restore_path=model_fname,
return_config=True,
save_restore_connector=NLPSaveRestoreConnector(),
trainer=trainer,
)
if args.class_path:
classpath = args.class_path
else:
classpath = model_cfg.target # original class path
OmegaConf.set_struct(model_cfg, True)
with open_dict(model_cfg):
if model_cfg.get('megatron_amp_O2', False):
model_cfg.megatron_amp_O2 = False
imported_class = model_utils.import_class_by_path(classpath)
logging.info(f"Loading model {model_fname}")
nemo_model = imported_class.restore_from(
restore_path=model_fname,
map_location=device,
save_restore_connector=NLPSaveRestoreConnector(),
trainer=trainer,
override_config_path=model_cfg,
)
# search for all checkpoints (ignore -last.ckpt)
checkpoint_paths = [
os.path.join(model_folder_path, x)
for x in os.listdir(model_folder_path)
if x.endswith('.ckpt') and not x.endswith('-last.ckpt')
]
""" < Checkpoint Averaging Logic > """
# load state dicts
n = len(checkpoint_paths)
avg_state = None
logging.info(f"Averaging {n} checkpoints ...")
for ix, path in enumerate(checkpoint_paths):
checkpoint = torch.load(path, map_location=device)
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if ix == 0:
# Initial state
avg_state = checkpoint
logging.info(f"Initialized average state dict with checkpoint : {path}")
else:
# Accumulated state
for k in avg_state:
avg_state[k] = avg_state[k] + checkpoint[k]
logging.info(f"Updated average state dict with state from checkpoint : {path}")
for k in avg_state:
if str(avg_state[k].dtype).startswith("torch.int"):
# For int type, not averaged, but only accumulated.
# e.g. BatchNorm.num_batches_tracked
pass
else:
avg_state[k] = avg_state[k] / n
# restore merged weights into model
nemo_model.load_state_dict(avg_state, strict=True)
# Save model
logging.info(f"Saving average mdel to: {avg_model_fname}")
nemo_model.save_to(avg_model_fname)
if __name__ == '__main__':
main()
| NeMo-main | scripts/checkpoint_averaging/megatron_checkpoint_averaging.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Changes to script
Change the script to import the NeMo model class you would like to load a checkpoint for,
then update the model constructor to use this model class. This can be found by the line:
<<< Change model class here ! >>>
By default, this script imports and creates the `EncDecCTCModelBPE` class but it can be
changed to any NeMo Model.
# Run the script
## Saving a .nemo model file (loaded with ModelPT.restore_from(...))
HYDRA_FULL_ERROR=1 python average_model_checkpoints.py \
--config-path="<path to config directory>" \
--config-name="<config name>" \
name=<name of the averaged checkpoint> \
+checkpoint_dir=<OPTIONAL: directory of checkpoint> \
+checkpoint_paths=\"[/path/to/ptl_1.ckpt,/path/to/ptl_2.ckpt,/path/to/ptl_3.ckpt,...]\"
## Saving an averaged pytorch checkpoint (loaded with torch.load(...))
HYDRA_FULL_ERROR=1 python average_model_checkpoints.py \
--config-path="<path to config directory>" \
--config-name="<config name>" \
name=<name of the averaged checkpoint> \
+checkpoint_dir=<OPTIONAL: directory of checkpoint> \
+checkpoint_paths=\"[/path/to/ptl_1.ckpt,/path/to/ptl_2.ckpt,/path/to/ptl_3.ckpt,...]\" \
+save_ckpt_only=true
"""
import os
import pytorch_lightning as pl
import torch
from omegaconf import OmegaConf, open_dict
# Change this import to the model you would like to average
from nemo.collections.asr.models import EncDecCTCModelBPE
from nemo.core.config import hydra_runner
from nemo.utils import logging
def process_config(cfg: OmegaConf):
if 'name' not in cfg or cfg.name is None:
raise ValueError("`cfg.name` must be provided to save a model checkpoint")
if 'checkpoint_paths' not in cfg or cfg.checkpoint_paths is None:
raise ValueError(
"`cfg.checkpoint_paths` must be provided as a list of one or more str paths to "
"pytorch lightning checkpoints"
)
save_ckpt_only = False
with open_dict(cfg):
name_prefix = cfg.name
checkpoint_paths = cfg.pop('checkpoint_paths')
if 'checkpoint_dir' in cfg:
checkpoint_dir = cfg.pop('checkpoint_dir')
else:
checkpoint_dir = None
if 'save_ckpt_only' in cfg:
save_ckpt_only = cfg.pop('save_ckpt_only')
if type(checkpoint_paths) not in (list, tuple):
checkpoint_paths = str(checkpoint_paths).replace("[", "").replace("]", "")
checkpoint_paths = checkpoint_paths.split(",")
checkpoint_paths = [ckpt_path.strip() for ckpt_path in checkpoint_paths]
if checkpoint_dir is not None:
checkpoint_paths = [os.path.join(checkpoint_dir, path) for path in checkpoint_paths]
return name_prefix, checkpoint_paths, save_ckpt_only
@hydra_runner(config_path=None, config_name=None)
def main(cfg):
name_prefix, checkpoint_paths, save_ckpt_only = process_config(cfg)
if not save_ckpt_only:
trainer = pl.Trainer(**cfg.trainer)
# <<< Change model class here ! >>>
# Model architecture which will contain the averaged checkpoints
# Change the model constructor to the one you would like (if needed)
model = EncDecCTCModelBPE(cfg=cfg.model, trainer=trainer)
""" < Checkpoint Averaging Logic > """
# load state dicts
n = len(checkpoint_paths)
avg_state = None
logging.info(f"Averaging {n} checkpoints ...")
for ix, path in enumerate(checkpoint_paths):
checkpoint = torch.load(path, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if ix == 0:
# Initial state
avg_state = checkpoint
logging.info(f"Initialized average state dict with checkpoint : {path}")
else:
# Accumulated state
for k in avg_state:
avg_state[k] = avg_state[k] + checkpoint[k]
logging.info(f"Updated average state dict with state from checkpoint : {path}")
for k in avg_state:
if str(avg_state[k].dtype).startswith("torch.int"):
# For int type, not averaged, but only accumulated.
# e.g. BatchNorm.num_batches_tracked
pass
else:
avg_state[k] = avg_state[k] / n
# Save model
if save_ckpt_only:
ckpt_name = name_prefix + '-averaged.ckpt'
torch.save(avg_state, ckpt_name)
logging.info(f"Averaged pytorch checkpoint saved as : {ckpt_name}")
else:
# Set model state
logging.info("Loading averaged state dict in provided model")
model.load_state_dict(avg_state, strict=True)
ckpt_name = name_prefix + '-averaged.nemo'
model.save_to(ckpt_name)
logging.info(f"Averaged model saved as : {ckpt_name}")
if __name__ == '__main__':
main()
| NeMo-main | scripts/checkpoint_averaging/average_model_checkpoints.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Example: python scripts/checkpoint_averaging/average_model_checkpoints.py \
--name_prefix=<checkpoint name> \
--checkpoint_dir=<folder with mp_rank_X subfolders containing checkpoints>
will generate a new file in each of the mp_rank_X subfolders named <checkpoint name>-averaged.ckpt
Typically you should follow up this script with a call to examples/nlp/language_modeling/megatron_ckpt_to_nemo.py
to convert .ckpt checkpoint to .nemo format.
"""
import argparse
import os
import torch
from nemo.utils import logging
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--name_prefix', help='Name of the final checkpoint. Will append -averaged.ckpt automatically.',
)
parser.add_argument(
'--checkpoint_dir', help='Folder containing all mp_rank_X subfolders.',
)
args = parser.parse_args()
# repeating for all ranks
for rank_dir in os.listdir(args.checkpoint_dir):
if not rank_dir.startswith('mp_rank_'):
continue
logging.info("Processing %s", rank_dir)
full_checkpoint_dir = os.path.join(args.checkpoint_dir, rank_dir)
checkpoint_paths = [
os.path.join(full_checkpoint_dir, x)
for x in os.listdir(full_checkpoint_dir)
if x.endswith('.ckpt') and not x.endswith('-last.ckpt')
]
# everything below is copied over from average_model_checkpoints.py
""" < Checkpoint Averaging Logic > """
# load state dicts
n = len(checkpoint_paths)
avg_state = None
logging.info(f"Averaging {n} checkpoints ...")
for ix, path in enumerate(checkpoint_paths):
checkpoint = torch.load(path, map_location='cpu')
if 'state_dict' in checkpoint:
checkpoint = checkpoint['state_dict']
if ix == 0:
# Initial state
avg_state = checkpoint
logging.info(f"Initialized average state dict with checkpoint : {path}")
else:
# Accumulated state
for k in avg_state:
avg_state[k] = avg_state[k] + checkpoint[k]
logging.info(f"Updated average state dict with state from checkpoint : {path}")
for k in avg_state:
if str(avg_state[k].dtype).startswith("torch.int"):
# For int type, not averaged, but only accumulated.
# e.g. BatchNorm.num_batches_tracked
pass
else:
avg_state[k] = avg_state[k] / n
# Save model
ckpt_name = os.path.join(full_checkpoint_dir, args.name_prefix + '-averaged.ckpt')
torch.save({'state_dict': avg_state}, ckpt_name)
logging.info(f"Averaged pytorch checkpoint saved as : {ckpt_name}")
if __name__ == '__main__':
main()
| NeMo-main | scripts/checkpoint_averaging/checkpoint_averaging_model_parallel.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from argparse import ArgumentParser
from nemo.collections.nlp.models.machine_translation.mt_enc_dec_model import MTEncDecModel
"""
python preprocess_tokenization_normalization.py --input-src train.en \
--input-tgt train.zh \
--output-src train.tok.norm.en \
--output-tgt train.tok.norm.zh \
--source-lang en \
--target-lang zh
"""
logging.basicConfig(level=logging.INFO)
def tokenize_normalize(file, wfile, processor):
rptr = open(file)
wptr = open(wfile, 'w')
logging.info(f"Processing {file}")
for line in rptr:
txt = line.strip()
if processor is not None:
txt = processor.normalize(txt)
txt = processor.tokenize(txt)
wptr.write(txt + "\n")
logging.info(f"Output written to {file}")
rptr.close()
wptr.close()
def main():
parser = ArgumentParser()
parser.add_argument("--input-src", type=str, required=True, help="Path to input file in src language")
parser.add_argument("--input-tgt", type=str, required=True, help="Path to input file in tgt language")
parser.add_argument("--output-src", type=str, required=True, help="Path to write the src language output file")
parser.add_argument("--output-tgt", type=str, required=True, help="Path to write the tgt language output file")
parser.add_argument("--source-lang", type=str, required=True, help="Language for the source file")
parser.add_argument("--target-lang", type=str, required=True, help="Language for the target file")
args = parser.parse_args()
src_processor, tgt_processor = MTEncDecModel.setup_pre_and_post_processing_utils(
args.source_lang, args.target_lang, "bpe-placeholder", "bpe-placeholder"
)
tokenize_normalize(args.input_src, args.output_src, src_processor)
tokenize_normalize(args.input_tgt, args.output_tgt, tgt_processor)
if __name__ == '__main__':
main()
| NeMo-main | scripts/neural_machine_translation/preprocess_tokenization_normalization.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import multiprocessing as mp
import re
import shutil
import warnings
from pathlib import Path
from time import sleep
from tqdm import tqdm
"""
Usage:
python length_ratio_filter.py --input-src train.en \
--input-tgt train.de \
--output-src train_lang_filtered.en \
--output-tgt train_lang_filtered.de \
--removed-src train_removed.en \
--removed-tgt train_removed.de \
--min_length 1 \
--max_length 512 \
--ratio 1.3
"""
logging.basicConfig(level=logging.INFO)
def get_args():
parser = argparse.ArgumentParser(
description="""A multi-processed script for filtering a parallel corpus to remove sentences that are less than a minimum length
or longer than a maximum length. Also filters based on the length ratio between source and target sentences"""
)
parser.add_argument(
"--input-src",
"-s",
help="Path to the input file which has to contain text in language `source_lang`.",
required=True,
type=Path,
)
parser.add_argument(
"--input-tgt",
"-t",
help="Path to the input file which has to contain text in language `target_lang`. If not provided, data is "
"processed as monolingual.",
type=Path,
)
parser.add_argument(
"--output-src",
"-S",
help="Path to the file where filtered `input_src` will be saved.",
required=True,
type=Path,
)
parser.add_argument(
"--output-tgt", "-T", help="Path to the output target file", type=Path,
)
parser.add_argument(
"--removed-src", "-r", required=True, help="Path to file where removed source lines will be saved", type=Path,
)
parser.add_argument(
"--removed-tgt", "-R", help="Path to file where removed target lines will be saved", type=Path,
)
parser.add_argument(
"--num-jobs",
"-j",
type=int,
help="Number of jobs. By default, the number of jobs is equal to the number of CPU cores.",
)
parser.add_argument(
"--min-length", "-m", type=int, default=1, help="Minimum sequence length (after .split())",
)
parser.add_argument(
"--max-length", "-M", type=int, default=512, help="Maximum sequence length (after .split())",
)
parser.add_argument(
"--ratio",
"-z",
type=float,
default=1.3,
help="Ratio of the length of the source sentence to the length of the target sentence.",
)
args = parser.parse_args()
args.input_src = args.input_src.expanduser()
if args.input_tgt is not None:
args.input_tgt = args.input_tgt.expanduser()
args.output_src = args.output_src.expanduser()
if args.output_tgt is not None:
args.output_tgt = args.output_tgt.expanduser()
args.removed_src = args.removed_src.expanduser()
if args.removed_tgt is not None:
args.removed_tgt = args.removed_tgt.expanduser()
return args
def get_edges_in_1_file(fn, num_parts):
num_lines = 0
edges = [0]
with open(fn) as f:
i = 0
for l in f:
i += len(l.encode('utf-8'))
edges.append(i)
num_lines += 1
return [edges[int(i * num_lines / num_parts)] for i in range(num_parts)] + [edges[-1]], num_lines
def get_edges_and_num_lines(src_fn, tgt_fn, num_parts):
src_edges, src_num_lines = get_edges_in_1_file(src_fn, num_parts)
assert num_parts + 1 == len(src_edges)
src_edges = [(src_edges[i], src_edges[i + 1]) for i in range(len(src_edges) - 1)]
if tgt_fn is not None:
tgt_edges, tgt_num_lines = get_edges_in_1_file(tgt_fn, num_parts)
tgt_edges = [(tgt_edges[i], tgt_edges[i + 1]) for i in range(len(tgt_edges) - 1)]
if tgt_num_lines != src_num_lines:
raise ValueError(
f"Source {repr(src_fn)} and target {repr(tgt_fn)} files have different number of lines "
f"{src_num_lines} and {tgt_num_lines} correspondingly."
)
else:
tgt_edges = [None] * num_parts
assert len(src_edges) == num_parts
return src_edges, tgt_edges, src_num_lines
def filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
min_length,
max_length,
length_ratio,
rank,
):
global counter
output_src = filtered_dir_src / Path(f"rank{rank}")
output_src_removed = removed_dir_src / Path(f"rank{rank}")
output_tgt = filtered_dir_tgt / Path(f"rank{rank}")
output_tgt_removed = removed_dir_tgt / Path(f"rank{rank}")
with open(input_src) as in_src, open(input_tgt) as in_tgt, open(output_src, 'w') as out_src, open(
output_tgt, 'w'
) as out_tgt, open(output_src_removed, 'w') as out_r_src, open(output_tgt_removed, 'w') as out_r_tgt:
in_src.seek(src_edges[0])
in_tgt.seek(tgt_edges[0])
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), 0
if in_src.tell() > src_edges[1] or in_tgt.tell() > tgt_edges[1]:
return
while src_l and tgt_l:
with counter.get_lock():
counter.value += 1
src_l = src_l.strip()
tgt_l = tgt_l.strip()
src_l_len = len(src_l.split())
tgt_l_len = len(tgt_l.split())
# Length filtering
if (src_l_len < min_length or src_l_len > max_length) or (
tgt_l_len < min_length or tgt_l_len > max_length
):
out_r_src.write(src_l + "\n")
out_r_tgt.write(tgt_l + "\n")
# Ratio filtering
elif src_l_len / tgt_l_len > length_ratio or tgt_l_len / src_l_len > length_ratio:
out_r_src.write(src_l + "\n")
out_r_tgt.write(tgt_l + "\n")
else:
out_src.write(src_l + '\n')
out_tgt.write(tgt_l + '\n')
if in_src.tell() >= src_edges[1]:
if in_tgt.tell() < tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
break
if in_tgt.tell() >= tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), i + 1
with counter.get_lock():
counter.value += 1
def filter_by_length_and_ratio(args):
(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
min_length,
max_length,
length_ratio,
rank,
) = args
logging.debug(f"filter by args: {min_length}, {max_length}, {length_ratio}")
filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
min_length,
max_length,
length_ratio,
rank,
)
def _cat_results(out_file, tmp_dir):
file_name_pattern = re.compile(r"/rank([1-9][0-9]*)|0$")
with out_file.open('w') as out_f:
for f in sorted(tmp_dir.iterdir()):
if not f.is_file():
warnings.warn(f"Unexpected not file {f}")
elif not file_name_pattern.search(str(f)):
warnings.warn(f"Unexpected file {f}")
else:
with f.open('r') as in_f:
for l in in_f:
out_f.write(l)
def cat_results(out_files, tmp_dirs):
for o_f, t_d in zip(out_files, tmp_dirs):
if o_f is None or t_d is None:
if o_f is not None or t_d is not None:
warnings.warn(
f"Output file and tmp directory are expected to be `None` simultaneously whereas tmp directory "
f"is {t_d} and output file is {o_f}."
)
else:
_cat_results(o_f, t_d)
counter = None
def init(args):
global counter
counter = args
def main():
args = get_args()
tmp_dir = Path("tmp")
i = 0
while tmp_dir.exists():
tmp_dir = Path("tmp" + str(i))
i += 1
tmp_filtered = tmp_dir / Path("filtered")
tmp_filtered_src = tmp_filtered / Path("src")
tmp_filtered_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_filtered_tgt = None
else:
tmp_filtered_tgt = tmp_filtered / Path("tgt")
tmp_filtered_tgt.mkdir(parents=True, exist_ok=True)
tmp_removed = tmp_dir / Path("removed")
tmp_removed_src = tmp_removed / Path("src")
tmp_removed_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_removed_tgt = None
else:
tmp_removed_tgt = tmp_removed / Path("tgt")
tmp_removed_tgt.mkdir(parents=True, exist_ok=True)
num_jobs = mp.cpu_count() if args.num_jobs is None else args.num_jobs
src_edges, tgt_edges, num_lines = get_edges_and_num_lines(args.input_src, args.input_tgt, num_jobs)
global counter
counter = mp.Value('i', 0)
t = tqdm(total=num_lines, desc="Length Ratio Filtering")
with mp.Pool(num_jobs, initializer=init, initargs=(counter,)) as pool:
async_result = pool.map_async(
filter_by_length_and_ratio,
[
(
se,
te,
args.input_src,
args.input_tgt,
tmp_filtered_src,
tmp_filtered_tgt,
tmp_removed_src,
tmp_removed_tgt,
args.min_length,
args.max_length,
args.ratio,
rank,
)
for rank, (se, te) in enumerate(zip(src_edges, tgt_edges))
],
)
while not async_result.ready():
t.update(counter.value)
with counter.get_lock():
counter.value = 0
sleep(0.1)
t.update(counter.value)
cat_results(
[args.output_src, args.output_tgt, args.removed_src, args.removed_tgt],
[tmp_filtered_src, tmp_filtered_tgt, tmp_removed_src, tmp_removed_tgt],
)
shutil.rmtree(tmp_dir)
if __name__ == "__main__":
main()
| NeMo-main | scripts/neural_machine_translation/length_ratio_filter.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import multiprocessing as mp
import os
import numpy as np
from matplotlib import pyplot as plt
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
# =============================================================================#
# Auxiliary methods
# =============================================================================#
worker_data = {
"tokenizer": None,
}
def init_tokenizer(library, tokenizer_model):
tokenizer = get_nmt_tokenizer(library=library, tokenizer_model=tokenizer_model)
worker_data["tokenizer"] = tokenizer
def read_batch(fh, batch_size):
"""
Reads a batch (or smaller) chunk of lines.
"""
lines = []
for i in range(batch_size):
l = fh.readline()
if not l:
break
else:
lines.append(l.strip())
return lines
def tokenize_line(line, tokenizer):
"""
Returns a tokenized line
"""
line = line.rstrip("\n")
tokens = tokenizer.text_to_ids(line)
return tokens
def line_len(line, tokenizer=None):
"""
Returns a tokenized length of a text line
"""
if tokenizer is None:
tokenizer = worker_data["tokenizer"]
tokens = tokenize_line(line, tokenizer)
return len(tokens)
# =============================================================================#
# Main script
# =============================================================================#
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Collects statistics over tokenized dataset')
parser.add_argument('input_files', metavar='N', type=str, nargs='+', help='Input files to parse')
parser.add_argument(
'--tokenizer_library', type=str, required=True, help='Path to pre-trained nemo-supported tokenizer model'
)
parser.add_argument(
'--tokenizer_model', type=str, required=True, help='Path to pre-trained nemo-supported tokenizer model'
)
parser.add_argument(
'--num_workers', type=int, default=mp.cpu_count(), help='Number of workers (default to number of CPUs)'
)
parser.add_argument('--max_lines', type=int, default=-1, help='Max number of lines to parse')
parser.add_argument('--batch_size', type=int, default=10000000, help='Batch size to parse in parallel')
parser.add_argument('--out_dir', type=str, default="", help='Path to store data and plots')
args = parser.parse_args()
tokenizer = get_nmt_tokenizer(library=args.tokenizer_library, tokenizer_model=args.tokenizer_model,)
all_len = []
for fn in args.input_files:
print(f"Parsing fn = {fn}")
# read file
fh = open(fn)
# read all batches
while True:
lines = read_batch(fh, args.batch_size)
# move to next file when no lines are read
if not lines:
break
# tokenize lines
with mp.Pool(
args.num_workers, initializer=init_tokenizer, initargs=(args.tokenizer_library, args.tokenizer_model)
) as p:
all_len.extend(p.map(line_len, lines))
print(f"{fn}: Parsed {len(all_len)} lines")
# early stop, if required
if (args.max_lines > 0) and (len(all_len) >= args.max_lines):
lines = lines[: args.max_lines]
break
# early stop, if required
if (args.max_lines > 0) and (len(all_len) >= args.max_lines):
lines = lines[: args.max_lines]
break
# compute stats
# save all results
if args.out_dir:
os.makedirs(args.out_dir, exist_ok=True)
stats = {
"samples": int(len(all_len)),
"mean": float(np.mean(all_len)),
"stdev": float(np.std(all_len)),
"min": float(np.min(all_len)),
"max": float(np.max(all_len)),
"median": float(np.median(all_len)),
}
print(f"stats = \n{stats}")
# save all results
if args.out_dir:
if not os.path.exists(args.out_dir):
os.makedirs(args.out_dir, exist_ok=True)
fh = open(os.path.join(args.out_dir, "lengths.txt"), "w")
fh.writelines(["{l}\n".format(l=l) for l in all_len])
json.dump(stats, open(os.path.join(args.out_dir, "stats.json"), "w"))
fig = plt.hist(all_len)
plt.savefig(os.path.join(args.out_dir, "lengths_hist.pdf"))
| NeMo-main | scripts/neural_machine_translation/collect_tokenizer_dataset_stats.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import multiprocessing as mp
import re
import shutil
import warnings
from pathlib import Path
from time import sleep
import fasttext
from tqdm import tqdm
"""
Usage:
python filter_langs_nmt.py --input-src train.en \
--input-tgt train.de \
--output-src train_lang_filtered.en \
--output-tgt train_lang_filtered.de \
--source-lang en \
--target-lang de \
--removed-src train_garbage.en \
--removed-tgt train_garbage.de \
--fasttext-model lid.176.bin
"""
logging.basicConfig(level=logging.INFO)
# temp fix for the warning: "Warning : 'load_model' does not return WordVectorModel or SupervisedModel any more, but a 'FastText' object which is very similar."
fasttext.FastText.eprint = lambda x: None
def get_args():
parser = argparse.ArgumentParser(
description="It is a script for verifying language in machine translation data sets. If the script is used on "
"a parallel corpus, it verifies both a source and a target language. If number of jobs `--num-jobs` is bigger "
"than 1 than lines in an input file (or files if parallel corpus is checked) split equally between workers. "
"If `num_jobs > 1` is used, the best performance is achieved if dataset is shuffled and lines with different "
"lengths are distributed evenly in the input file. Filtered data is stored into `output_src`[, `--output-tgt`]"
" and removed lines are put into `removed_src`[, `--removed-tgt`] files. If language cannot be detected "
"(e.g. date), the line is removed. Working time on en-de wikimatrix (6.23M pairs: 700 MB German and 625 MB "
"English) from wmt20 on machine with 20 CPU cores: less than 1 minute."
)
parser.add_argument(
"--input-src",
"-s",
help="Path to the input file which has to contain text in language `source_lang`.",
required=True,
type=Path,
)
parser.add_argument(
"--input-tgt",
"-t",
help="Path to the input file which has to contain text in language `target_lang`. If not provided, data is "
"processed as monolingual.",
type=Path,
)
parser.add_argument(
"--output-src",
"-S",
help="Path to the file where filtered `input_src` will be saved.",
required=True,
type=Path,
)
parser.add_argument(
"--output-tgt", "-T", help="Path to the output target file", type=Path,
)
parser.add_argument(
"--source-lang",
"-l",
required=True,
help="Input language. For options see https://fasttext.cc/docs/en/language-identification.html.",
)
parser.add_argument(
"--target-lang",
"-L",
help="Output language. For options see https://fasttext.cc/docs/en/language-identification.html.",
)
parser.add_argument(
"--removed-src", "-r", required=True, help="Path to file where removed source lines will be saved", type=Path,
)
parser.add_argument(
"--removed-tgt", "-R", help="Path to file where removed target lines will be saved", type=Path,
)
parser.add_argument(
"--num-jobs",
"-j",
type=int,
help="Number of jobs. By default, the number of jobs is equal to the number of CPU cores.",
)
parser.add_argument(
"--fasttext-model",
"-m",
help="Path to fasttext model. The description and download links are here "
"https://fasttext.cc/docs/en/language-identification.html",
type=Path,
)
args = parser.parse_args()
if not (
args.output_tgt is None
and args.input_tgt is None
and args.target_lang is None
and args.removed_tgt is None
or args.output_tgt is not None
and args.input_tgt is not None
and args.target_lang is not None
and args.removed_tgt is not None
):
raise ValueError(
f"Arguments `input_tgt`, `output_tgt`, `target_lang`, `removed_tgt` have to be either `None` "
f"simultaneously or not `None` simultaneously. Given "
f"input_tgt={args.input_tgt}, output_tgt={args.output_tgt}, target_lang={args.target_lang}, "
f"removed_tgt={args.removed_tgt}"
)
args.input_src = args.input_src.expanduser()
if args.input_tgt is not None:
args.input_tgt = args.input_tgt.expanduser()
args.output_src = args.output_src.expanduser()
if args.output_tgt is not None:
args.output_tgt = args.output_tgt.expanduser()
args.removed_src = args.removed_src.expanduser()
if args.removed_tgt is not None:
args.removed_tgt = args.removed_tgt.expanduser()
args.fasttext_model = args.fasttext_model.expanduser()
return args
def get_lang(line, fasttext_model):
labels, _ = fasttext_model.predict(line, k=1)
lang = labels[0].split('__')[-1]
return lang
def get_edges_in_1_file(fn, num_parts):
num_lines = 0
edges = [0]
with open(fn) as f:
i = 0
for l in f:
i += len(l.encode('utf-8'))
edges.append(i)
num_lines += 1
return [edges[int(i * num_lines / num_parts)] for i in range(num_parts)] + [edges[-1]], num_lines
def get_edges_and_num_lines(src_fn, tgt_fn, num_parts):
src_edges, src_num_lines = get_edges_in_1_file(src_fn, num_parts)
assert num_parts + 1 == len(src_edges)
src_edges = [(src_edges[i], src_edges[i + 1]) for i in range(len(src_edges) - 1)]
if tgt_fn is not None:
tgt_edges, tgt_num_lines = get_edges_in_1_file(tgt_fn, num_parts)
tgt_edges = [(tgt_edges[i], tgt_edges[i + 1]) for i in range(len(tgt_edges) - 1)]
if tgt_num_lines != src_num_lines:
raise ValueError(
f"Source {repr(src_fn)} and target {repr(tgt_fn)} files have different number of lines "
f"{src_num_lines} and {tgt_num_lines} correspondingly."
)
else:
tgt_edges = [None] * num_parts
assert len(src_edges) == num_parts
return src_edges, tgt_edges, src_num_lines
def filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
):
global counter
fasttext_model = fasttext.load_model(str(fasttext_model))
output_src = filtered_dir_src / Path(f"rank{rank}")
output_src_removed = removed_dir_src / Path(f"rank{rank}")
output_tgt = filtered_dir_tgt / Path(f"rank{rank}")
output_tgt_removed = removed_dir_tgt / Path(f"rank{rank}")
with open(input_src) as in_src, open(input_tgt) as in_tgt, open(output_src, 'w') as out_src, open(
output_tgt, 'w'
) as out_tgt, open(output_src_removed, 'w') as out_r_src, open(output_tgt_removed, 'w') as out_r_tgt:
in_src.seek(src_edges[0])
in_tgt.seek(tgt_edges[0])
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), 0
if in_src.tell() > src_edges[1] or in_tgt.tell() > tgt_edges[1]:
return
while src_l and tgt_l:
with counter.get_lock():
counter.value += 1
src_l = src_l.strip()
tgt_l = tgt_l.strip()
src_lang = get_lang(src_l, fasttext_model)
if src_lang is not None:
tgt_lang = get_lang(tgt_l, fasttext_model)
if src_lang is None or tgt_lang is None or src_lang != source_lang or tgt_lang != target_lang:
out_r_src.write(src_l + '\n')
out_r_tgt.write(tgt_l + '\n')
else:
out_src.write(src_l + '\n')
out_tgt.write(tgt_l + '\n')
if in_src.tell() >= src_edges[1]:
if in_tgt.tell() < tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
break
if in_tgt.tell() >= tgt_edges[1]:
raise ValueError(
f"Edges of target and source has to be reached simultaneously, whereas "
f"in_src.tell()={in_src.tell()}, in_tgt.tell()={in_tgt.tell()}, "
f"src_edges[1]={src_edges[1]}, tgt_edges[1]={tgt_edges[1]}."
)
src_l, tgt_l, i = in_src.readline(), in_tgt.readline(), i + 1
with counter.get_lock():
counter.value += 1
def filter_singles(
src_edges, input_src, filtered_dir_src, removed_dir_src, source_lang, fasttext_model, rank,
):
logging.debug("filter singles")
global counter
fasttext_model = fasttext.load_model(str(fasttext_model))
output_src = filtered_dir_src / Path(f"rank{rank}")
output_src_removed = removed_dir_src / Path(f"rank{rank}")
with open(input_src) as in_f, open(output_src, 'w') as out_f, open(output_src_removed, 'w') as out_r_f:
in_f.seek(src_edges[0])
i, line = 0, in_f.readline()
if in_f.tell() > src_edges[1]:
return
while line:
with counter.get_lock():
counter.value += 1
line = line.strip()
in_lang = get_lang(line, fasttext_model)
if in_lang is None or in_lang != source_lang:
out_r_f.write(line + '\n')
else:
out_f.write(line + '\n')
if in_f.tell() >= src_edges[1]:
break
i, line = i + 1, in_f.readline()
with counter.get_lock():
counter.value += 1
def filter_by_lang(args):
(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
) = args
logging.debug(f"filter by lang input_tgt: {input_tgt}")
if input_tgt is None:
if tgt_edges is not None:
warnings.warn("If input target is not provided `tgt_edges` argument is expected to be `None`")
filter_singles(
src_edges, input_src, filtered_dir_src, removed_dir_src, source_lang, fasttext_model, rank,
)
else:
filter_pairs(
src_edges,
tgt_edges,
input_src,
input_tgt,
filtered_dir_src,
filtered_dir_tgt,
removed_dir_src,
removed_dir_tgt,
source_lang,
target_lang,
fasttext_model,
rank,
)
def _cat_results(out_file, tmp_dir):
file_name_pattern = re.compile(r"/rank([1-9][0-9]*)|0$")
with out_file.open('w') as out_f:
for f in sorted(tmp_dir.iterdir()):
if not f.is_file():
warnings.warn(f"Unexpected not file {f}")
elif not file_name_pattern.search(str(f)):
warnings.warn(f"Unexpected file {f}")
else:
with f.open('r') as in_f:
for l in in_f:
out_f.write(l)
def cat_results(out_files, tmp_dirs):
for o_f, t_d in zip(out_files, tmp_dirs):
if o_f is None or t_d is None:
if o_f is not None or t_d is not None:
warnings.warn(
f"Output file and tmp directory are expected to be `None` simultaneously whereas tmp directory "
f"is {t_d} and output file is {o_f}."
)
else:
_cat_results(o_f, t_d)
counter = None
def init(args):
global counter
counter = args
def main():
args = get_args()
tmp_dir = Path("tmp")
i = 0
while tmp_dir.exists():
tmp_dir = Path("tmp" + str(i))
i += 1
tmp_filtered = tmp_dir / Path("filtered")
tmp_filtered_src = tmp_filtered / Path("src")
tmp_filtered_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_filtered_tgt = None
else:
tmp_filtered_tgt = tmp_filtered / Path("tgt")
tmp_filtered_tgt.mkdir(parents=True, exist_ok=True)
tmp_removed = tmp_dir / Path("removed")
tmp_removed_src = tmp_removed / Path("src")
tmp_removed_src.mkdir(parents=True, exist_ok=True)
if args.input_tgt is None:
tmp_removed_tgt = None
else:
tmp_removed_tgt = tmp_removed / Path("tgt")
tmp_removed_tgt.mkdir(parents=True, exist_ok=True)
num_jobs = mp.cpu_count() if args.num_jobs is None else args.num_jobs
src_edges, tgt_edges, num_lines = get_edges_and_num_lines(args.input_src, args.input_tgt, num_jobs)
global counter
counter = mp.Value('i', 0)
t = tqdm(total=num_lines, desc="processed lines / total number of lines")
with mp.Pool(num_jobs, initializer=init, initargs=(counter,)) as pool:
async_result = pool.map_async(
filter_by_lang,
[
(
se,
te,
args.input_src,
args.input_tgt,
tmp_filtered_src,
tmp_filtered_tgt,
tmp_removed_src,
tmp_removed_tgt,
args.source_lang,
args.target_lang,
args.fasttext_model,
rank,
)
for rank, (se, te) in enumerate(zip(src_edges, tgt_edges))
],
)
while not async_result.ready():
t.update(counter.value)
with counter.get_lock():
counter.value = 0
sleep(0.1)
t.update(counter.value)
cat_results(
[args.output_src, args.output_tgt, args.removed_src, args.removed_tgt],
[tmp_filtered_src, tmp_filtered_tgt, tmp_removed_src, tmp_removed_tgt],
)
shutil.rmtree(tmp_dir)
if __name__ == "__main__":
main()
| NeMo-main | scripts/neural_machine_translation/filter_langs_nmt.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script takes as an input XXXX.json files
(i.e., the output of nmt_transformer_infer.py --write_timing)
and creates plots XXX.PLOT_NAME.png at the same path.
"""
import json
import os
import sys
from matplotlib import pyplot as plt
# =============================================================================#
# Control Variables
# =============================================================================#
PLOTS_EXT = "pdf"
PLOT_TITLE = False
PLOT_XLABEL = True
PLOT_YLABEL = True
PLOT_LABEL_FONT_SIZE = 16
PLOT_GRID = True
# =============================================================================#
# Helper functions
# =============================================================================#
def plot_timing(lengths, timings, lengths_name, timings_name, fig=None):
if fig is None:
fig = plt.figure()
plt.scatter(lengths, timings, label=timings_name)
if PLOT_XLABEL:
plt.xlabel(f"{lengths_name} [tokens]", fontsize=PLOT_LABEL_FONT_SIZE)
if PLOT_YLABEL:
plt.ylabel(f"{timings_name} [sec]", fontsize=PLOT_LABEL_FONT_SIZE)
if PLOT_GRID:
plt.grid(True)
if PLOT_TITLE:
plt.title(f"{timings_name} vs. {lengths_name}")
plt.xticks(fontsize=PLOT_LABEL_FONT_SIZE)
plt.yticks(fontsize=PLOT_LABEL_FONT_SIZE)
plt.tight_layout()
return fig
# =============================================================================#
# Main script
# =============================================================================#
if __name__ == "__main__":
print("Usage: plot_detailed_timing.py <JSON FILE> <SJON FILE> ...")
for timing_fn in sys.argv[1:]:
# load data
print(f"Parsing file = {timing_fn}")
data = json.load(open(timing_fn))
# plot data
gifs_dict = {}
gifs_dict["encoder-src_len"] = plot_timing(
lengths=data["mean_src_length"],
timings=data["encoder"],
lengths_name="src length",
timings_name="encoder",
)
gifs_dict["sampler-src_len"] = plot_timing(
lengths=data["mean_src_length"],
timings=data["sampler"],
lengths_name="src length",
timings_name="sampler",
)
gifs_dict["sampler-tgt_len"] = plot_timing(
lengths=data["mean_tgt_length"],
timings=data["sampler"],
lengths_name="tgt length",
timings_name="sampler",
)
# save data
base_fn = os.path.splitext(timing_fn)[0]
for name, fig in gifs_dict.items():
plot_fn = f"{base_fn}.{name}.{PLOTS_EXT}"
print(f"Saving pot = {plot_fn}")
fig.savefig(plot_fn)
| NeMo-main | scripts/neural_machine_translation/plot_detailed_timing.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script converts old Jasper/QuartzNet models from NeMo 0.11.* to NeMo v1.0.0*
"""
import argparse
import torch
from omegaconf import DictConfig
from ruamel.yaml import YAML
import nemo.collections.asr as nemo_asr
from nemo.utils import logging
def get_parser():
parser = argparse.ArgumentParser(description="Converts old Jasper/QuartzNet models to NeMo v1.0beta")
parser.add_argument("--config_path", default=None, required=True, help="Path to model config (NeMo v1.0beta)")
parser.add_argument("--encoder_ckpt", default=None, required=True, help="Encoder checkpoint path")
parser.add_argument("--decoder_ckpt", default=None, required=True, help="Decoder checkpoint path")
parser.add_argument("--output_path", default=None, required=True, help="Output checkpoint path (should be .nemo)")
parser.add_argument(
"--model_type",
default='asr',
type=str,
choices=['asr', 'speech_label', 'speaker'],
help="Type of decoder used by the model.",
)
return parser
def main(config_path, encoder_ckpt, decoder_ckpt, output_path, model_type):
yaml = YAML(typ='safe')
with open(config_path) as f:
params = yaml.load(f)
model = None
if model_type == 'asr':
logging.info("Creating ASR NeMo 1.0 model")
model = nemo_asr.models.EncDecCTCModel(cfg=DictConfig(params['model']))
elif model_type == 'speech_label':
logging.info("Creating speech label NeMo 1.0 model")
model = nemo_asr.models.EncDecClassificationModel(cfg=DictConfig(params['model']))
else:
logging.info("Creating Speaker Recognition NeMo 1.0 model")
model = nemo_asr.models.EncDecSpeakerLabelModel(cfg=DictConfig(params['model']))
model.encoder.load_state_dict(torch.load(encoder_ckpt))
model.decoder.load_state_dict(torch.load(decoder_ckpt))
logging.info("Succesfully ported old checkpoint")
model.save_to(output_path)
logging.info("new model saved at {}".format(output_path))
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.config_path, args.encoder_ckpt, args.decoder_ckpt, args.output_path, args.model_type)
| NeMo-main | scripts/nemo_legacy_import/asr_checkpoint_port.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
import pytorch_lightning as pl
from omegaconf import OmegaConf, open_dict
from nemo.collections.nlp.parts.nlp_overrides import NLPSaveRestoreConnector
from nemo.core import ModelPT
from nemo.core.config import TrainerConfig
def get_args(argv):
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=f"Update NLP models trained on previous versions to current version ",
)
parser.add_argument("source", help="Source .nemo file")
parser.add_argument("out", help="Location to write result to")
parser.add_argument("--megatron-legacy", help="If the source model is megatron-bert trained on NeMo < 1.5")
parser.add_argument(
"--megatron-checkpoint",
type=str,
help="Path of the MegatronBert nemo checkpoint converted from MegatronLM using megatron_lm_ckpt_to_nemo.py file (Not NLP model checkpoint)",
)
parser.add_argument("--verbose", default=None, help="Verbose level for logging, numeric")
args = parser.parse_args(argv)
return args
def nemo_convert(argv):
args = get_args(argv)
loglevel = logging.INFO
# assuming loglevel is bound to the string value obtained from the
# command line argument. Convert to upper case to allow the user to
# specify --log=DEBUG or --log=debug
if args.verbose is not None:
numeric_level = getattr(logging, args.verbose.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % numeric_level)
loglevel = numeric_level
logger = logging.getLogger(__name__)
if logger.handlers:
for handler in logger.handlers:
logger.removeHandler(handler)
logging.basicConfig(level=loglevel, format='%(asctime)s [%(levelname)s] %(message)s')
logging.info("Logging level set to {}".format(loglevel))
"""Convert a .nemo saved model trained on previous versions of nemo into a nemo fie with current version."""
nemo_in = args.source
out = args.out
# Create a PL trainer object which is required for restoring Megatron models
cfg_trainer = TrainerConfig(
gpus=1,
accelerator="ddp",
num_nodes=1,
# Need to set the following two to False as ExpManager will take care of them differently.
logger=False,
enable_checkpointing=False,
)
trainer = pl.Trainer(cfg_trainer)
logging.info("Restoring NeMo model from '{}'".format(nemo_in))
try:
# If the megatron based NLP model was trained on NeMo < 1.5, then we need to update the lm_checkpoint on the model config
if args.megatron_legacy:
if args.megatron_checkpoint:
connector = NLPSaveRestoreConnector()
model_cfg = ModelPT.restore_from(
restore_path=nemo_in, save_restore_connector=connector, trainer=trainer, return_config=True
)
OmegaConf.set_struct(model_cfg, True)
with open_dict(model_cfg):
model_cfg.language_model.lm_checkpoint = args.megatron_checkpoint
model_cfg['megatron_legacy'] = True
model_cfg['masked_softmax_fusion'] = False
model_cfg['bias_gelu_fusion'] = False
model = ModelPT.restore_from(
restore_path=nemo_in,
save_restore_connector=connector,
trainer=trainer,
override_config_path=model_cfg,
)
else:
logging.error("Megatron Checkpoint must be provided if Megatron legacy is chosen")
else:
model = ModelPT.restore_from(restore_path=nemo_in, trainer=trainer)
logging.info("Model {} restored from '{}'".format(model.cfg.target, nemo_in))
# Save the model
model.save_to(out)
logging.info("Successfully converted to {}".format(out))
del model
except Exception as e:
logging.error(
"Failed to restore model from NeMo file : {}. Please make sure you have the latest NeMo package installed with [all] dependencies.".format(
nemo_in
)
)
raise e
if __name__ == '__main__':
nemo_convert(sys.argv[1:])
| NeMo-main | scripts/nemo_legacy_import/nlp_checkpoint_port.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# This script converts an existing audio dataset with a manifest to
# a tarred and sharded audio dataset that can be read by the
# TarredAudioToTextDataLayer.
# Please make sure your audio_filepath DOES NOT CONTAIN '-sub'!
# Because we will use it to handle files which have duplicate filenames but with different offsets
# (see function create_shard for details)
# Bucketing can help to improve the training speed. You may use --buckets_num to specify the number of buckets.
# It creates multiple tarred datasets, one per bucket, based on the audio durations.
# The range of [min_duration, max_duration) is split into equal sized buckets.
# Recommend to use --sort_in_shards to speedup the training by reducing the paddings in the batches
# More info on how to use bucketing feature: https://docs.nvidia.com/deeplearning/nemo/user-guide/docs/en/main/asr/datasets.html
# If valid NVIDIA DALI version is installed, will also generate the corresponding DALI index files that need to be
# supplied to the config in order to utilize webdataset for efficient large dataset handling.
# NOTE: DALI + Webdataset is NOT compatible with Bucketing support !
# Usage:
1) Creating a new tarfile dataset
python convert_to_tarred_audio_dataset.py \
--manifest_path=<path to the manifest file> \
--target_dir=<path to output directory> \
--num_shards=<number of tarfiles that will contain the audio> \
--max_duration=<float representing maximum duration of audio samples> \
--min_duration=<float representing minimum duration of audio samples> \
--shuffle --shuffle_seed=1 \
--sort_in_shards \
--workers=-1
2) Concatenating more tarfiles to a pre-existing tarred dataset
python convert_to_tarred_audio_dataset.py \
--manifest_path=<path to the tarred manifest file> \
--metadata_path=<path to the metadata.yaml (or metadata_version_{X}.yaml) file> \
--target_dir=<path to output directory where the original tarfiles are contained> \
--max_duration=<float representing maximum duration of audio samples> \
--min_duration=<float representing minimum duration of audio samples> \
--shuffle --shuffle_seed=1 \
--sort_in_shards \
--workers=-1 \
--concat_manifest_paths \
<space separated paths to 1 or more manifest files to concatenate into the original tarred dataset>
3) Writing an empty metadata file
python convert_to_tarred_audio_dataset.py \
--target_dir=<path to output directory> \
# any other optional argument
--num_shards=8 \
--max_duration=16.7 \
--min_duration=0.01 \
--shuffle \
--workers=-1 \
--sort_in_shards \
--shuffle_seed=1 \
--write_metadata
"""
import argparse
import copy
import json
import os
import random
import tarfile
from collections import defaultdict
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any, List, Optional
from joblib import Parallel, delayed
from omegaconf import DictConfig, OmegaConf, open_dict
try:
import create_dali_tarred_dataset_index as dali_index
DALI_INDEX_SCRIPT_AVAILABLE = True
except (ImportError, ModuleNotFoundError, FileNotFoundError):
DALI_INDEX_SCRIPT_AVAILABLE = False
parser = argparse.ArgumentParser(
description="Convert an existing ASR dataset to tarballs compatible with TarredAudioToTextDataLayer."
)
parser.add_argument(
"--manifest_path", default=None, type=str, required=False, help="Path to the existing dataset's manifest."
)
parser.add_argument(
'--concat_manifest_paths',
nargs='+',
default=None,
type=str,
required=False,
help="Path to the additional dataset's manifests that will be concatenated with base dataset.",
)
# Optional arguments
parser.add_argument(
"--target_dir",
default='./tarred',
type=str,
help="Target directory for resulting tarballs and manifest. Defaults to `./tarred`. Creates the path if necessary.",
)
parser.add_argument(
"--metadata_path", required=False, default=None, type=str, help="Path to metadata file for the dataset.",
)
parser.add_argument(
"--num_shards",
default=-1,
type=int,
help="Number of shards (tarballs) to create. Used for partitioning data among workers.",
)
parser.add_argument(
'--max_duration',
default=None,
required=True,
type=float,
help='Maximum duration of audio clip in the dataset. By default, it is None and is required to be set.',
)
parser.add_argument(
'--min_duration',
default=None,
type=float,
help='Minimum duration of audio clip in the dataset. By default, it is None and will not filter files.',
)
parser.add_argument(
"--shuffle",
action='store_true',
help="Whether or not to randomly shuffle the samples in the manifest before tarring/sharding.",
)
parser.add_argument(
"--keep_files_together",
action='store_true',
help="Whether or not to keep entries from the same file (but different offsets) together when sorting before tarring/sharding.",
)
parser.add_argument(
"--sort_in_shards",
action='store_true',
help="Whether or not to sort samples inside the shards based on their duration.",
)
parser.add_argument(
"--buckets_num", type=int, default=1, help="Number of buckets to create based on duration.",
)
parser.add_argument("--shuffle_seed", type=int, default=None, help="Random seed for use if shuffling is enabled.")
parser.add_argument(
'--write_metadata',
action='store_true',
help=(
"Flag to write a blank metadata with the current call config. "
"Note that the metadata will not contain the number of shards, "
"and it must be filled out by the user."
),
)
parser.add_argument(
"--no_shard_manifests",
action='store_true',
help="Do not write sharded manifests along with the aggregated manifest.",
)
parser.add_argument('--workers', type=int, default=1, help='Number of worker processes')
args = parser.parse_args()
@dataclass
class ASRTarredDatasetConfig:
num_shards: int = -1
shuffle: bool = False
max_duration: Optional[float] = None
min_duration: Optional[float] = None
shuffle_seed: Optional[int] = None
sort_in_shards: bool = True
shard_manifests: bool = True
keep_files_together: bool = False
@dataclass
class ASRTarredDatasetMetadata:
created_datetime: Optional[str] = None
version: int = 0
num_samples_per_shard: Optional[int] = None
is_concatenated_manifest: bool = False
dataset_config: Optional[ASRTarredDatasetConfig] = ASRTarredDatasetConfig()
history: Optional[List[Any]] = field(default_factory=lambda: [])
def __post_init__(self):
self.created_datetime = self.get_current_datetime()
def get_current_datetime(self):
return datetime.now().strftime("%m-%d-%Y %H-%M-%S")
@classmethod
def from_config(cls, config: DictConfig):
obj = cls()
obj.__dict__.update(**config)
return obj
@classmethod
def from_file(cls, filepath: str):
config = OmegaConf.load(filepath)
return ASRTarredDatasetMetadata.from_config(config=config)
class ASRTarredDatasetBuilder:
"""
Helper class that constructs a tarred dataset from scratch, or concatenates tarred datasets
together and constructs manifests for them.
"""
def __init__(self):
self.config = None
def configure(self, config: ASRTarredDatasetConfig):
"""
Sets the config generated from command line overrides.
Args:
config: ASRTarredDatasetConfig dataclass object.
"""
self.config = config # type: ASRTarredDatasetConfig
if self.config.num_shards < 0:
raise ValueError("`num_shards` must be > 0. Please fill in the metadata information correctly.")
def create_new_dataset(self, manifest_path: str, target_dir: str = "./tarred/", num_workers: int = 0):
"""
Creates a new tarred dataset from a given manifest file.
Args:
manifest_path: Path to the original ASR manifest.
target_dir: Output directory.
num_workers: Integer denoting number of parallel worker processes which will write tarfiles.
Defaults to 1 - which denotes sequential worker process.
Output:
Writes tarfiles, along with the tarred dataset compatible manifest file.
Also preserves a record of the metadata used to construct this tarred dataset.
"""
if self.config is None:
raise ValueError("Config has not been set. Please call `configure(config: ASRTarredDatasetConfig)`")
if manifest_path is None:
raise FileNotFoundError("Manifest filepath cannot be None !")
config = self.config # type: ASRTarredDatasetConfig
if not os.path.exists(target_dir):
os.makedirs(target_dir)
# Read the existing manifest
entries, total_duration, filtered_entries, filtered_duration = self._read_manifest(manifest_path, config)
if len(filtered_entries) > 0:
print(f"Filtered {len(filtered_entries)} files which amounts to {filtered_duration} seconds of audio.")
print(
f"After filtering, manifest has {len(entries)} files which amounts to {total_duration} seconds of audio."
)
if len(entries) == 0:
print("No tarred dataset was created as there were 0 valid samples after filtering!")
return
if config.shuffle:
random.seed(config.shuffle_seed)
print("Shuffling...")
if config.keep_files_together:
filename_entries = defaultdict(list)
for ent in entries:
filename_entries[ent["audio_filepath"]].append(ent)
filenames = list(filename_entries.keys())
random.shuffle(filenames)
shuffled_entries = []
for filename in filenames:
shuffled_entries += filename_entries[filename]
entries = shuffled_entries
else:
random.shuffle(entries)
# Create shards and updated manifest entries
print(f"Number of samples added : {len(entries)}")
print(f"Remainder: {len(entries) % config.num_shards}")
start_indices = []
end_indices = []
# Build indices
for i in range(config.num_shards):
start_idx = (len(entries) // config.num_shards) * i
end_idx = start_idx + (len(entries) // config.num_shards)
print(f"Shard {i} has entries {start_idx} ~ {end_idx}")
files = set()
for ent_id in range(start_idx, end_idx):
files.add(entries[ent_id]["audio_filepath"])
print(f"Shard {i} contains {len(files)} files")
if i == config.num_shards - 1:
# We discard in order to have the same number of entries per shard.
print(f"Have {len(entries) - end_idx} entries left over that will be discarded.")
start_indices.append(start_idx)
end_indices.append(end_idx)
manifest_folder, _ = os.path.split(manifest_path)
with Parallel(n_jobs=num_workers, verbose=config.num_shards) as parallel:
# Call parallel tarfile construction
new_entries_list = parallel(
delayed(self._create_shard)(entries[start_idx:end_idx], target_dir, i, manifest_folder)
for i, (start_idx, end_idx) in enumerate(zip(start_indices, end_indices))
)
if config.shard_manifests:
sharded_manifests_dir = target_dir + '/sharded_manifests'
if not os.path.exists(sharded_manifests_dir):
os.makedirs(sharded_manifests_dir)
for manifest in new_entries_list:
shard_id = manifest[0]['shard_id']
new_manifest_shard_path = os.path.join(sharded_manifests_dir, f'manifest_{shard_id}.json')
with open(new_manifest_shard_path, 'w', encoding='utf-8') as m2:
for entry in manifest:
json.dump(entry, m2)
m2.write('\n')
# Flatten the list of list of entries to a list of entries
new_entries = [sample for manifest in new_entries_list for sample in manifest]
del new_entries_list
print("Total number of entries in manifest :", len(new_entries))
# Write manifest
new_manifest_path = os.path.join(target_dir, 'tarred_audio_manifest.json')
with open(new_manifest_path, 'w', encoding='utf-8') as m2:
for entry in new_entries:
json.dump(entry, m2)
m2.write('\n')
# Write metadata (default metadata for new datasets)
new_metadata_path = os.path.join(target_dir, 'metadata.yaml')
metadata = ASRTarredDatasetMetadata()
# Update metadata
metadata.dataset_config = config
metadata.num_samples_per_shard = len(new_entries) // config.num_shards
# Write metadata
metadata_yaml = OmegaConf.structured(metadata)
OmegaConf.save(metadata_yaml, new_metadata_path, resolve=True)
def create_concatenated_dataset(
self,
base_manifest_path: str,
manifest_paths: List[str],
metadata: ASRTarredDatasetMetadata,
target_dir: str = "./tarred_concatenated/",
num_workers: int = 1,
):
"""
Creates new tarfiles in order to create a concatenated dataset, whose manifest contains the data for
both the original dataset as well as the new data submitted in manifest paths.
Args:
base_manifest_path: Path to the manifest file which contains the information for the original
tarred dataset (with flattened paths).
manifest_paths: List of one or more paths to manifest files that will be concatenated with above
base tarred dataset.
metadata: ASRTarredDatasetMetadata dataclass instance with overrides from command line.
target_dir: Output directory
Output:
Writes tarfiles which with indices mapping to a "concatenated" tarred dataset,
along with the tarred dataset compatible manifest file which includes information
about all the datasets that comprise the concatenated dataset.
Also preserves a record of the metadata used to construct this tarred dataset.
"""
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if base_manifest_path is None:
raise FileNotFoundError("Base manifest filepath cannot be None !")
if manifest_paths is None or len(manifest_paths) == 0:
raise FileNotFoundError("List of additional manifest filepaths cannot be None !")
config = ASRTarredDatasetConfig(**(metadata.dataset_config))
# Read the existing manifest (no filtering here)
base_entries, _, _, _ = self._read_manifest(base_manifest_path, config)
print(f"Read base manifest containing {len(base_entries)} samples.")
# Precompute number of samples per shard
if metadata.num_samples_per_shard is None:
num_samples_per_shard = len(base_entries) // config.num_shards
else:
num_samples_per_shard = metadata.num_samples_per_shard
print("Number of samples per shard :", num_samples_per_shard)
# Compute min and max duration and update config (if no metadata passed)
print(f"Selected max duration : {config.max_duration}")
print(f"Selected min duration : {config.min_duration}")
entries = []
for new_manifest_idx in range(len(manifest_paths)):
new_entries, total_duration, filtered_new_entries, filtered_duration = self._read_manifest(
manifest_paths[new_manifest_idx], config
)
if len(filtered_new_entries) > 0:
print(
f"Filtered {len(filtered_new_entries)} files which amounts to {filtered_duration:0.2f}"
f" seconds of audio from manifest {manifest_paths[new_manifest_idx]}."
)
print(
f"After filtering, manifest has {len(entries)} files which amounts to {total_duration} seconds of audio."
)
entries.extend(new_entries)
if len(entries) == 0:
print("No tarred dataset was created as there were 0 valid samples after filtering!")
return
if config.shuffle:
random.seed(config.shuffle_seed)
print("Shuffling...")
random.shuffle(entries)
# Drop last section of samples that cannot be added onto a chunk
drop_count = len(entries) % num_samples_per_shard
total_new_entries = len(entries)
entries = entries[:-drop_count]
print(
f"Dropping {drop_count} samples from total new samples {total_new_entries} since they cannot "
f"be added into a uniformly sized chunk."
)
# Create shards and updated manifest entries
num_added_shards = len(entries) // num_samples_per_shard
print(f"Number of samples in base dataset : {len(base_entries)}")
print(f"Number of samples in additional datasets : {len(entries)}")
print(f"Number of added shards : {num_added_shards}")
print(f"Remainder: {len(entries) % num_samples_per_shard}")
start_indices = []
end_indices = []
shard_indices = []
for i in range(num_added_shards):
start_idx = (len(entries) // num_added_shards) * i
end_idx = start_idx + (len(entries) // num_added_shards)
shard_idx = i + config.num_shards
print(f"Shard {shard_idx} has entries {start_idx + len(base_entries)} ~ {end_idx + len(base_entries)}")
start_indices.append(start_idx)
end_indices.append(end_idx)
shard_indices.append(shard_idx)
manifest_folder, _ = os.path.split(base_manifest_path)
with Parallel(n_jobs=num_workers, verbose=num_added_shards) as parallel:
# Call parallel tarfile construction
new_entries_list = parallel(
delayed(self._create_shard)(entries[start_idx:end_idx], target_dir, shard_idx, manifest_folder)
for i, (start_idx, end_idx, shard_idx) in enumerate(zip(start_indices, end_indices, shard_indices))
)
if config.shard_manifests:
sharded_manifests_dir = target_dir + '/sharded_manifests'
if not os.path.exists(sharded_manifests_dir):
os.makedirs(sharded_manifests_dir)
for manifest in new_entries_list:
shard_id = manifest[0]['shard_id']
new_manifest_shard_path = os.path.join(sharded_manifests_dir, f'manifest_{shard_id}.json')
with open(new_manifest_shard_path, 'w', encoding='utf-8') as m2:
for entry in manifest:
json.dump(entry, m2)
m2.write('\n')
# Flatten the list of list of entries to a list of entries
new_entries = [sample for manifest in new_entries_list for sample in manifest]
del new_entries_list
# Write manifest
if metadata is None:
new_version = 1 # start with `1`, where `0` indicates the base manifest + dataset
else:
new_version = metadata.version + 1
print("Total number of entries in manifest :", len(base_entries) + len(new_entries))
new_manifest_path = os.path.join(target_dir, f'tarred_audio_manifest_version_{new_version}.json')
with open(new_manifest_path, 'w', encoding='utf-8') as m2:
# First write all the entries of base manifest
for entry in base_entries:
json.dump(entry, m2)
m2.write('\n')
# Finally write the new entries
for entry in new_entries:
json.dump(entry, m2)
m2.write('\n')
# Preserve historical metadata
base_metadata = metadata
# Write metadata (updated metadata for concatenated datasets)
new_metadata_path = os.path.join(target_dir, f'metadata_version_{new_version}.yaml')
metadata = ASRTarredDatasetMetadata()
# Update config
config.num_shards = config.num_shards + num_added_shards
# Update metadata
metadata.version = new_version
metadata.dataset_config = config
metadata.num_samples_per_shard = num_samples_per_shard
metadata.is_concatenated_manifest = True
metadata.created_datetime = metadata.get_current_datetime()
# Attach history
current_metadata = OmegaConf.structured(base_metadata.history)
metadata.history = current_metadata
# Write metadata
metadata_yaml = OmegaConf.structured(metadata)
OmegaConf.save(metadata_yaml, new_metadata_path, resolve=True)
def _read_manifest(self, manifest_path: str, config: ASRTarredDatasetConfig):
"""Read and filters data from the manifest"""
# Read the existing manifest
entries = []
total_duration = 0.0
filtered_entries = []
filtered_duration = 0.0
with open(manifest_path, 'r', encoding='utf-8') as m:
for line in m:
entry = json.loads(line)
if (config.max_duration is None or entry['duration'] < config.max_duration) and (
config.min_duration is None or entry['duration'] >= config.min_duration
):
entries.append(entry)
total_duration += entry["duration"]
else:
filtered_entries.append(entry)
filtered_duration += entry['duration']
return entries, total_duration, filtered_entries, filtered_duration
def _create_shard(self, entries, target_dir, shard_id, manifest_folder):
"""Creates a tarball containing the audio files from `entries`.
"""
if self.config.sort_in_shards:
entries.sort(key=lambda x: x["duration"], reverse=False)
new_entries = []
tar = tarfile.open(os.path.join(target_dir, f'audio_{shard_id}.tar'), mode='w', dereference=True)
count = dict()
for entry in entries:
# We squash the filename since we do not preserve directory structure of audio files in the tarball.
if os.path.exists(entry["audio_filepath"]):
audio_filepath = entry["audio_filepath"]
else:
audio_filepath = os.path.join(manifest_folder, entry["audio_filepath"])
if not os.path.exists(audio_filepath):
raise FileNotFoundError(f"Could not find {entry['audio_filepath']}!")
base, ext = os.path.splitext(audio_filepath)
base = base.replace('/', '_')
# Need the following replacement as long as WebDataset splits on first period
base = base.replace('.', '_')
squashed_filename = f'{base}{ext}'
if squashed_filename not in count:
tar.add(audio_filepath, arcname=squashed_filename)
to_write = squashed_filename
count[squashed_filename] = 1
else:
to_write = base + "-sub" + str(count[squashed_filename]) + ext
count[squashed_filename] += 1
new_entry = {
'audio_filepath': to_write,
'duration': entry['duration'],
'shard_id': shard_id, # Keep shard ID for recordkeeping
}
if 'label' in entry:
new_entry['label'] = entry['label']
if 'text' in entry:
new_entry['text'] = entry['text']
if 'offset' in entry:
new_entry['offset'] = entry['offset']
if 'lang' in entry:
new_entry['lang'] = entry['lang']
new_entries.append(new_entry)
tar.close()
return new_entries
@classmethod
def setup_history(cls, base_metadata: ASRTarredDatasetMetadata, history: List[Any]):
if 'history' in base_metadata.keys():
for history_val in base_metadata.history:
cls.setup_history(history_val, history)
if base_metadata is not None:
metadata_copy = copy.deepcopy(base_metadata)
with open_dict(metadata_copy):
metadata_copy.pop('history', None)
history.append(metadata_copy)
def main():
if args.buckets_num > 1:
bucket_length = (args.max_duration - args.min_duration) / float(args.buckets_num)
for i in range(args.buckets_num):
min_duration = args.min_duration + i * bucket_length
max_duration = min_duration + bucket_length
if i == args.buckets_num - 1:
# add a small number to cover the samples with exactly duration of max_duration in the last bucket.
max_duration += 1e-5
target_dir = os.path.join(args.target_dir, f"bucket{i+1}")
print(f"Creating bucket {i+1} with min_duration={min_duration} and max_duration={max_duration} ...")
print(f"Results are being saved at: {target_dir}.")
create_tar_datasets(min_duration=min_duration, max_duration=max_duration, target_dir=target_dir)
print(f"Bucket {i+1} is created.")
else:
create_tar_datasets(min_duration=args.min_duration, max_duration=args.max_duration, target_dir=args.target_dir)
def create_tar_datasets(min_duration: float, max_duration: float, target_dir: str):
builder = ASRTarredDatasetBuilder()
shard_manifests = False if args.no_shard_manifests else True
if args.write_metadata:
metadata = ASRTarredDatasetMetadata()
dataset_cfg = ASRTarredDatasetConfig(
num_shards=args.num_shards,
shuffle=args.shuffle,
max_duration=max_duration,
min_duration=min_duration,
shuffle_seed=args.shuffle_seed,
sort_in_shards=args.sort_in_shards,
shard_manifests=shard_manifests,
keep_files_together=args.keep_files_together,
)
metadata.dataset_config = dataset_cfg
output_path = os.path.join(target_dir, 'default_metadata.yaml')
OmegaConf.save(metadata, output_path, resolve=True)
print(f"Default metadata written to {output_path}")
exit(0)
if args.concat_manifest_paths is None or len(args.concat_manifest_paths) == 0:
print("Creating new tarred dataset ...")
# Create a tarred dataset from scratch
config = ASRTarredDatasetConfig(
num_shards=args.num_shards,
shuffle=args.shuffle,
max_duration=max_duration,
min_duration=min_duration,
shuffle_seed=args.shuffle_seed,
sort_in_shards=args.sort_in_shards,
shard_manifests=shard_manifests,
keep_files_together=args.keep_files_together,
)
builder.configure(config)
builder.create_new_dataset(manifest_path=args.manifest_path, target_dir=target_dir, num_workers=args.workers)
else:
if args.buckets_num > 1:
raise ValueError("Concatenation feature does not support buckets_num > 1.")
print("Concatenating multiple tarred datasets ...")
# Implicitly update config from base details
if args.metadata_path is not None:
metadata = ASRTarredDatasetMetadata.from_file(args.metadata_path)
else:
raise ValueError("`metadata` yaml file path must be provided!")
# Preserve history
history = []
builder.setup_history(OmegaConf.structured(metadata), history)
metadata.history = history
# Add command line overrides (everything other than num_shards)
metadata.dataset_config.max_duration = max_duration
metadata.dataset_config.min_duration = min_duration
metadata.dataset_config.shuffle = args.shuffle
metadata.dataset_config.shuffle_seed = args.shuffle_seed
metadata.dataset_config.sort_in_shards = args.sort_in_shards
metadata.dataset_config.shard_manifests = shard_manifests
builder.configure(metadata.dataset_config)
# Concatenate a tarred dataset onto a previous one
builder.create_concatenated_dataset(
base_manifest_path=args.manifest_path,
manifest_paths=args.concat_manifest_paths,
metadata=metadata,
target_dir=target_dir,
num_workers=args.workers,
)
if DALI_INDEX_SCRIPT_AVAILABLE and dali_index.INDEX_CREATOR_AVAILABLE:
print("Constructing DALI Tarfile Index - ", target_dir)
index_config = dali_index.DALITarredIndexConfig(tar_dir=target_dir, workers=args.workers)
dali_index.main(index_config)
if __name__ == "__main__":
main()
| NeMo-main | scripts/speech_recognition/convert_to_tarred_audio_dataset.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import logging
import os
from dataclasses import dataclass
import hydra
from hydra.core.config_store import ConfigStore
from joblib import Parallel, delayed
from omegaconf import MISSING
try:
from wds2idx import IndexCreator
INDEX_CREATOR_AVAILABLE = True
except (ImportError, ModuleNotFoundError):
INDEX_CREATOR_AVAILABLE = False
"""
python create_dali_tarred_dataset_index.py \
tar_dir=<path to the directory which contains tarred dataset> \
workers=-1
"""
logging.basicConfig(level=logging.INFO)
@dataclass
class DALITarredIndexConfig:
tar_dir: str = MISSING # Path to the existing dataset's manifest
workers: int = -1 # number of worker processes
def process_index_path(tar_paths, index_dir):
"""
Appends the folder `{index_dir}` to the filepath of all tarfiles.
Example:
/X/Y/Z/audio_0.tar -> /X/Y/Z/{index_dir}/audio_0.index
"""
index_paths = []
for path in tar_paths:
basepath, filename = os.path.split(path)
path = filename.replace('.tar', '.index')
path = os.path.join(basepath, path)
base, name = os.path.split(path)
index_path = os.path.join(index_dir, name)
index_paths.append(index_path)
return index_paths
def build_index(tarpath, indexfile):
with IndexCreator(tarpath, indexfile) as index:
index.create_index()
@hydra.main(config_path=None, config_name='index_config')
def main(cfg: DALITarredIndexConfig):
if not INDEX_CREATOR_AVAILABLE:
logging.error("`wds2idx` is not installed. Please install NVIDIA DALI >= 1.11")
exit(1)
tar_files = list(glob.glob(os.path.join(cfg.tar_dir, "*.tar")))
index_dir = os.path.join(cfg.tar_dir, "dali_index")
if not os.path.exists(index_dir):
os.makedirs(index_dir, exist_ok=True)
index_paths = process_index_path(tar_files, index_dir)
with Parallel(n_jobs=cfg.workers, verbose=len(tar_files)) as parallel:
_ = parallel(delayed(build_index)(tarpath, indexfile) for tarpath, indexfile in zip(tar_files, index_paths))
logging.info("Finished constructing index files !")
ConfigStore.instance().store(name='index_config', node=DALITarredIndexConfig)
if __name__ == '__main__':
main()
| NeMo-main | scripts/speech_recognition/create_dali_tarred_dataset_index.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Python wrapper over HuggingFace Datasets to create preprocessed NeMo ASR Datasets.
List of HuggingFace datasets : https://huggingface.co/datasets
(Please filter by task: automatic-speech-recognition)
# Setup
After installation of huggingface datasets (pip install datasets), some datasets might require authentication
- for example Mozilla Common Voice. You should go to the above link, register as a user and generate an API key.
## Authenticated Setup Steps
Website steps:
- Visit https://huggingface.co/settings/profile
- Visit "Access Tokens" on list of items.
- Create new token - provide a name for the token and "read" access is sufficient.
- PRESERVE THAT TOKEN API KEY. You can copy that key for next step.
- Visit the HuggingFace Dataset page for Mozilla Common Voice
- There should be a section that asks you for your approval.
- Make sure you are logged in and then read that agreement.
- If and only if you agree to the text, then accept the terms.
Code steps:
- Now on your machine, run `huggingface-cli login`
- Paste your preserved HF TOKEN API KEY (from above).
Now you should be logged in. When running the script, dont forget to set `use_auth_token=True` !
# Usage
The script supports two modes, but the offline mode is the preferred mechanism. The drawback of the offline mode
is that it requires 3 copies of the dataset to exist simultanously -
1) The .arrow files for HF cache
2) The extracted dataset in HF cache
3) The preprocessed audio files preserved in the output_dir provided in the script.
Due to this, make sure your HDD is large enough to store the processed dataset !
## Usage - Offline Mode
python convert_hf_dataset_to_nemo.py \
output_dir=<Path to some storage drive that will hold preprocessed audio files> \
path=<`path` argument in HF datasets, cannot be null> \
name=<`name` argument in HF datasets, can be null> \
split=<`split` argument in HF datasets, can be null> \
use_auth_token=<Can be `True` or `False` depending on whether the dataset requires authentication>
This will create an output directory of multiple sub-folders containing the preprocessed .wav files,
along with a nemo compatible JSON manifest file.
NOTE:
The JSON manifest itself is not preprocessed ! You should perform text normalization, and cleanup
inconsistent text by using NeMo Text Normalization tool and Speech Data Explorer toolkit !
## Usage - Streaming Mode
NOTE:
This mode is not well supported. It trades of speed for storage by only having one copy of the dataset in
output_dir, however the speed of processing is around 10x slower than offline mode. Some datasets (such as MCV)
fail to run entirely.
DO NOT USE if you have sufficient disk space.
python convert_hf_dataset_to_nemo.py \
... all the arguments from above \
streaming=True
"""
import json
import os
import traceback
from dataclasses import dataclass, is_dataclass
from typing import Optional
import hydra
import librosa
import soundfile
import tqdm
from datasets import Audio, Dataset, IterableDataset, load_dataset
from hydra.conf import HydraConf, RunDir
from hydra.core.config_store import ConfigStore
from omegaconf import OmegaConf
@dataclass
class HFDatasetConversionConfig:
# Nemo Dataset info
output_dir: str # path to output directory where the files will be saved
# HF Dataset info
path: str # HF dataset path
name: Optional[str] = None # name of the dataset subset
split: Optional[str] = None # split of the dataset subset
use_auth_token: bool = False # whether authentication token should be passed or not (Required for MCV)
# NeMo dataset conversion
sampling_rate: int = 16000
streaming: bool = False # Whether to use Streaming dataset API. [NOT RECOMMENDED]
num_proc: int = -1
ensure_ascii: bool = True # When saving the JSON entry, whether to ensure ascii.
# Placeholders. Generated internally.
resolved_output_dir: str = ''
split_output_dir: Optional[str] = None
hydra: HydraConf = HydraConf(run=RunDir(dir="."))
def prepare_output_dirs(cfg: HFDatasetConversionConfig):
"""
Prepare output directories and subfolders as needed.
Also prepare the arguments of the config with these directories.
"""
output_dir = os.path.abspath(cfg.output_dir)
output_dir = os.path.join(output_dir, cfg.path)
if cfg.name is not None:
output_dir = os.path.join(output_dir, cfg.name)
if not os.path.exists(output_dir):
os.makedirs(output_dir, exist_ok=True)
cfg.resolved_output_dir = output_dir
cfg.split_output_dir = None
def infer_dataset_segments(batch):
"""
Helper method to run in batch mode over a mapped Dataset.
Infers the path of the subdirectories for the dataset, removing {extracted/HASH}.
Returns:
A cleaned list of path segments
"""
segments = []
segment, path = os.path.split(batch['audio']['path'])
segments.insert(0, path)
while segment not in ('', os.path.sep):
segment, path = os.path.split(segment)
segments.insert(0, path)
if 'extracted' in segments:
index_of_basedir = segments.index("extracted")
segments = segments[(index_of_basedir + 1 + 1) :] # skip .../extracted/{hash}/
return segments
def prepare_audio_filepath(audio_filepath):
"""
Helper method to run in batch mode over a mapped Dataset.
Prepares the audio filepath and its subdirectories. Remaps the extension to .wav file.
Args:
audio_filepath: String path to the audio file.
Returns:
Cleaned filepath renamed to be a wav file.
"""
audio_basefilepath = os.path.split(audio_filepath)[0]
if not os.path.exists(audio_basefilepath):
os.makedirs(audio_basefilepath, exist_ok=True)
# Remove temporary fmt file
if os.path.exists(audio_filepath):
os.remove(audio_filepath)
# replace any ext with .wav
audio_filepath, ext = os.path.splitext(audio_filepath)
audio_filepath = audio_filepath + '.wav'
# Remove previous run file
if os.path.exists(audio_filepath):
os.remove(audio_filepath)
return audio_filepath
def build_map_dataset_to_nemo_func(cfg: HFDatasetConversionConfig, basedir):
"""
Helper method to run in batch mode over a mapped Dataset.
Creates a function that can be passed to Dataset.map() containing the config and basedir.
Useful to map a HF dataset to NeMo compatible format in an efficient way for offline processing.
Returns:
A function pointer which can be used for Dataset.map()
"""
def map_dataset_to_nemo(batch):
# Write audio file to correct path
if cfg.streaming:
batch['audio_filepath'] = batch['audio']['path'].split("::")[0].replace("zip://", "")
else:
segments = infer_dataset_segments(batch)
audio_filepath = os.path.join(*segments)
batch['audio_filepath'] = audio_filepath
batch['audio_filepath'] = os.path.abspath(os.path.join(basedir, batch['audio_filepath']))
audio_filepath = batch['audio_filepath']
audio_filepath = prepare_audio_filepath(audio_filepath)
batch['audio_filepath'] = audio_filepath # update filepath with prepared path
soundfile.write(audio_filepath, batch['audio']['array'], samplerate=cfg.sampling_rate, format='wav')
batch['duration'] = librosa.get_duration(y=batch['audio']['array'], sr=batch['audio']['sampling_rate'])
return batch
return map_dataset_to_nemo
def convert_offline_dataset_to_nemo(
dataset: Dataset, cfg: HFDatasetConversionConfig, basedir: str, manifest_filepath: str,
):
"""
Converts a HF dataset to a audio-preprocessed Nemo dataset in Offline mode.
Also writes out a nemo compatible manifest file.
Args:
dataset: Iterable HF Dataset.
cfg: HFDatasetConvertionConfig.
basedir: Base output directory.
manifest_filepath: Filepath of manifest.
"""
num_proc = cfg.num_proc
if num_proc < 0:
num_proc = max(1, os.cpu_count() // 2)
dataset = dataset.map(build_map_dataset_to_nemo_func(cfg, basedir), num_proc=num_proc)
ds_iter = iter(dataset)
with open(manifest_filepath, 'w') as manifest_f:
for idx, sample in enumerate(
tqdm.tqdm(
ds_iter, desc=f'Processing {cfg.path} (split : {cfg.split}):', total=len(dataset), unit=' samples'
)
):
# remove large components from sample
del sample['audio']
if 'file' in sample:
del sample['file']
manifest_f.write(f"{json.dumps(sample, ensure_ascii=cfg.ensure_ascii)}\n")
def convert_streaming_dataset_to_nemo(
dataset: IterableDataset, cfg: HFDatasetConversionConfig, basedir: str, manifest_filepath: str
):
"""
Converts a HF dataset to a audio-preprocessed Nemo dataset in Streaming mode.
Also writes out a nemo compatible manifest file.
Args:
dataset: Iterable HF Dataset.
cfg: HFDatasetConvertionConfig.
basedir: Base output directory.
manifest_filepath: Filepath of manifest.
"""
# Disable until fix https://github.com/huggingface/datasets/pull/3556 is merged
# dataset = dataset.map(build_map_dataset_to_nemo_func(cfg, basedir))
ds_iter = iter(dataset)
with open(manifest_filepath, 'w') as manifest_f:
for idx, sample in enumerate(
tqdm.tqdm(ds_iter, desc=f'Processing {cfg.path} (split: {cfg.split}):', unit=' samples')
):
audio_filepath = sample['audio']['path'].split("::")[0].replace("zip://", "")
audio_filepath = os.path.abspath(os.path.join(basedir, audio_filepath))
audio_filepath = prepare_audio_filepath(audio_filepath)
soundfile.write(audio_filepath, sample['audio']['array'], samplerate=cfg.sampling_rate, format='wav')
manifest_line = {
'audio_filepath': audio_filepath,
'text': sample['text'],
'duration': librosa.get_duration(sample['audio']['array'], sr=cfg.sampling_rate),
}
# remove large components from sample
del sample['audio']
del sample['text']
if 'file' in sample:
del sample['file']
manifest_line.update(sample)
manifest_f.write(f"{json.dumps(sample, ensure_ascii=cfg.ensure_ascii)}\n")
def process_dataset(dataset: IterableDataset, cfg: HFDatasetConversionConfig):
"""
Top level method that processes a given IterableDataset to Nemo compatible dataset.
It also writes out a nemo compatible manifest file.
Args:
dataset: HF Dataset.
cfg: HFDatasetConvertionConfig
"""
dataset = dataset.cast_column("audio", Audio(cfg.sampling_rate, mono=True))
# for Common Voice, "sentence" is used instead of "text" to store the transcript.
if 'sentence' in dataset.features:
dataset = dataset.rename_column("sentence", "text")
if cfg.split_output_dir is None:
basedir = cfg.resolved_output_dir
manifest_filename = f"{cfg.path.replace('/', '_')}_manifest.json"
else:
basedir = cfg.split_output_dir
split = os.path.split(cfg.split_output_dir)[-1]
manifest_filename = f"{split}_{cfg.path.replace('/', '_')}_manifest.json"
if not os.path.exists(cfg.split_output_dir):
os.makedirs(cfg.split_output_dir, exist_ok=True)
cfg.split = split
manifest_filepath = os.path.abspath(os.path.join(basedir, manifest_filename))
if cfg.streaming:
convert_streaming_dataset_to_nemo(dataset, cfg, basedir=basedir, manifest_filepath=manifest_filepath)
else:
convert_offline_dataset_to_nemo(dataset, cfg, basedir=basedir, manifest_filepath=manifest_filepath)
print()
print("Dataset conversion finished !")
@hydra.main(config_name='hfds_config', config_path=None)
def main(cfg: HFDatasetConversionConfig):
# Convert dataclass to omegaconf
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
# Prepare output subdirs
prepare_output_dirs(cfg)
# Load dataset in offline/streaming mode
dataset = None
try:
dataset = load_dataset(
path=cfg.path,
name=cfg.name,
split=cfg.split,
cache_dir=None,
streaming=cfg.streaming,
use_auth_token=cfg.use_auth_token,
)
except Exception as e:
print(
"HuggingFace datasets failed due to some reason (stack trace below). \nFor certain datasets (eg: MCV), "
"it may be necessary to login to the huggingface-cli (via `huggingface-cli login`).\n"
"Once logged in, you need to set `use_auth_token=True` when calling this script.\n\n"
"Traceback error for reference :\n"
)
print(traceback.format_exc())
exit(1)
# Multiple datasets were provided at once, process them one by one into subdirs.
if isinstance(dataset, dict):
print()
print("Multiple splits found for dataset", cfg.path, ":", list(dataset.keys()))
keys = list(dataset.keys())
for key in keys:
ds_split = dataset[key]
print(f"Processing split {key} for dataset {cfg.path}")
cfg.split_output_dir = os.path.join(cfg.resolved_output_dir, key)
process_dataset(ds_split, cfg)
del dataset[key], ds_split
# reset the split output directory
cfg.split_output_dir = None
else:
# Single dataset was found, process into resolved directory.
print("Single split found for dataset", cfg.path, "| Split chosen =", cfg.split)
if cfg.split is not None:
cfg.split_output_dir = os.path.join(cfg.resolved_output_dir, cfg.split)
process_dataset(dataset, cfg)
# Register the dataclass as a valid config
ConfigStore.instance().store(name='hfds_config', node=HFDatasetConversionConfig)
if __name__ == '__main__':
main()
| NeMo-main | scripts/speech_recognition/convert_hf_dataset_to_nemo.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from dataclasses import dataclass, is_dataclass
from pathlib import Path
from typing import Optional
import pytorch_lightning as pl
import torch
from omegaconf import MISSING, OmegaConf
from sklearn.model_selection import ParameterGrid
from nemo.collections.asr.metrics.rnnt_wer import RNNTDecodingConfig
from nemo.collections.asr.metrics.wer import CTCDecodingConfig
from nemo.collections.asr.models import ASRModel, EncDecRNNTModel
from nemo.collections.asr.parts.utils.asr_confidence_benchmarking_utils import (
apply_confidence_parameters,
run_confidence_benchmark,
)
from nemo.collections.asr.parts.utils.asr_confidence_utils import ConfidenceConfig
from nemo.core.config import hydra_runner
from nemo.utils import logging
"""
Get confidence metrics and curve plots for a given model, dataset, and confidence parameters.
# Arguments
model_path: Path to .nemo ASR checkpoint
pretrained_name: Name of pretrained ASR model (from NGC registry)
dataset_manifest: Path to dataset JSON manifest file (in NeMo format)
output_dir: Output directory to store a report and curve plot directories
batch_size: batch size during inference
num_workers: number of workers during inference
cuda: Optional int to enable or disable execution of model on certain CUDA device
amp: Bool to decide if Automatic Mixed Precision should be used during inference
audio_type: Str filetype of the audio. Supported = wav, flac, mp3
target_level: Word- or token-level confidence. Supported = word, token, auto (for computing both word and token)
confidence_cfg: Config with confidence parameters
grid_params: Dictionary with lists of parameters to iteratively benchmark on
# Usage
ASR model can be specified by either "model_path" or "pretrained_name".
Data for transcription are defined with "dataset_manifest".
Results are returned as a benchmark report and curve plots.
python benchmark_asr_confidence.py \
model_path=null \
pretrained_name=null \
dataset_manifest="" \
output_dir="" \
batch_size=64 \
num_workers=8 \
cuda=0 \
amp=True \
target_level="word" \
confidence_cfg.exclude_blank=False \
'grid_params="{\"aggregation\": [\"min\", \"prod\"], \"alpha\": [0.33, 0.5]}"'
"""
def get_experiment_params(cfg):
"""Get experiment parameters from a confidence config and generate the experiment name.
Returns:
List of experiment parameters.
String with the experiment name.
"""
blank = "no_blank" if cfg.exclude_blank else "blank"
aggregation = cfg.aggregation
method_name = cfg.measure_cfg.name
alpha = cfg.measure_cfg.alpha
if method_name == "entropy":
entropy_type = cfg.measure_cfg.entropy_type
entropy_norm = cfg.measure_cfg.entropy_norm
experiment_param_list = [
aggregation,
str(cfg.exclude_blank),
method_name,
entropy_type,
entropy_norm,
str(alpha),
]
experiment_str = "-".join([aggregation, blank, method_name, entropy_type, entropy_norm, str(alpha)])
else:
experiment_param_list = [aggregation, str(cfg.exclude_blank), method_name, "-", "-", str(alpha)]
experiment_str = "-".join([aggregation, blank, method_name, str(alpha)])
return experiment_param_list, experiment_str
@dataclass
class ConfidenceBenchmarkingConfig:
# Required configs
model_path: Optional[str] = None # Path to a .nemo file
pretrained_name: Optional[str] = None # Name of a pretrained model
dataset_manifest: str = MISSING
output_dir: str = MISSING
# General configs
batch_size: int = 32
num_workers: int = 4
# Set `cuda` to int to define CUDA device. If 'None', will look for CUDA
# device anyway, and do inference on CPU only if CUDA device is not found.
# If `cuda` is a negative number, inference will be on CPU only.
cuda: Optional[int] = None
amp: bool = False
audio_type: str = "wav"
# Confidence configs
target_level: str = "auto" # Choices: "word", "token", "auto" (for both word- and token-level confidence)
confidence_cfg: ConfidenceConfig = ConfidenceConfig(preserve_word_confidence=True, preserve_token_confidence=True)
grid_params: Optional[str] = None # a dictionary with lists of parameters to iteratively benchmark on
@hydra_runner(config_name="ConfidenceBenchmarkingConfig", schema=ConfidenceBenchmarkingConfig)
def main(cfg: ConfidenceBenchmarkingConfig):
torch.set_grad_enabled(False)
logging.info(f'Hydra config: {OmegaConf.to_yaml(cfg)}')
if is_dataclass(cfg):
cfg = OmegaConf.structured(cfg)
if cfg.model_path is None and cfg.pretrained_name is None:
raise ValueError("Both cfg.model_path and cfg.pretrained_name cannot be None!")
# setup GPU
if cfg.cuda is None:
if torch.cuda.is_available():
device = [0] # use 0th CUDA device
accelerator = 'gpu'
else:
device = 1
accelerator = 'cpu'
else:
device = [cfg.cuda]
accelerator = 'gpu'
map_location = torch.device('cuda:{}'.format(device[0]) if accelerator == 'gpu' else 'cpu')
# setup model
if cfg.model_path is not None:
# restore model from .nemo file path
model_cfg = ASRModel.restore_from(restore_path=cfg.model_path, return_config=True)
classpath = model_cfg.target # original class path
imported_class = model_utils.import_class_by_path(classpath) # type: ASRModel
logging.info(f"Restoring model : {imported_class.__name__}")
asr_model = imported_class.restore_from(
restore_path=cfg.model_path, map_location=map_location
) # type: ASRModel
else:
# restore model by name
asr_model = ASRModel.from_pretrained(
model_name=cfg.pretrained_name, map_location=map_location
) # type: ASRModel
trainer = pl.Trainer(devices=device, accelerator=accelerator)
asr_model.set_trainer(trainer)
asr_model = asr_model.eval()
# Check if ctc or rnnt model
is_rnnt = isinstance(asr_model, EncDecRNNTModel)
# Check that the model has the `change_decoding_strategy` method
if not hasattr(asr_model, 'change_decoding_strategy'):
raise RuntimeError("The asr_model you are using must have the `change_decoding_strategy` method.")
# get filenames and reference texts from manifest
filepaths = []
reference_texts = []
if os.stat(cfg.dataset_manifest).st_size == 0:
logging.error(f"The input dataset_manifest {cfg.dataset_manifest} is empty. Exiting!")
return None
manifest_dir = Path(cfg.dataset_manifest).parent
with open(cfg.dataset_manifest, 'r') as f:
for line in f:
item = json.loads(line)
audio_file = Path(item['audio_filepath'])
if not audio_file.is_file() and not audio_file.is_absolute():
audio_file = manifest_dir / audio_file
filepaths.append(str(audio_file.absolute()))
reference_texts.append(item['text'])
# setup AMP (optional)
autocast = None
if cfg.amp and torch.cuda.is_available() and hasattr(torch.cuda, 'amp') and hasattr(torch.cuda.amp, 'autocast'):
logging.info("AMP enabled!\n")
autocast = torch.cuda.amp.autocast
# do grid-based benchmarking if grid_params is provided, otherwise a regular one
work_dir = Path(cfg.output_dir)
os.makedirs(work_dir, exist_ok=True)
report_legend = (
",".join(
[
"model_type",
"aggregation",
"blank",
"method_name",
"entropy_type",
"entropy_norm",
"alpha",
"target_level",
"auc_roc",
"auc_pr",
"auc_nt",
"nce",
"ece",
"auc_yc",
"std_yc",
"max_yc",
]
)
+ "\n"
)
model_typename = "RNNT" if is_rnnt else "CTC"
report_file = work_dir / Path("report.csv")
if cfg.grid_params:
asr_model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy_batch", confidence_cfg=cfg.confidence_cfg)
if is_rnnt
else CTCDecodingConfig(confidence_cfg=cfg.confidence_cfg)
)
params = json.loads(cfg.grid_params)
hp_grid = ParameterGrid(params)
hp_grid = list(hp_grid)
logging.info(f"==============================Running a benchmarking with grid search=========================")
logging.info(f"Grid search size: {len(hp_grid)}")
logging.info(f"Results will be written to:\nreport file `{report_file}`\nand plot directories near the file")
logging.info(f"==============================================================================================")
with open(report_file, "tw", encoding="utf-8") as f:
f.write(report_legend)
f.flush()
for i, hp in enumerate(hp_grid):
logging.info(f"Run # {i + 1}, grid: `{hp}`")
asr_model.change_decoding_strategy(apply_confidence_parameters(asr_model.cfg.decoding, hp))
param_list, experiment_name = get_experiment_params(asr_model.cfg.decoding.confidence_cfg)
plot_dir = work_dir / Path(experiment_name)
results = run_confidence_benchmark(
asr_model,
cfg.target_level,
filepaths,
reference_texts,
cfg.batch_size,
cfg.num_workers,
plot_dir,
autocast,
)
for level, result in results.items():
f.write(f"{model_typename},{','.join(param_list)},{level},{','.join([str(r) for r in result])}\n")
f.flush()
else:
asr_model.change_decoding_strategy(
RNNTDecodingConfig(fused_batch_size=-1, strategy="greedy_batch", confidence_cfg=cfg.confidence_cfg)
if is_rnnt
else CTCDecodingConfig(confidence_cfg=cfg.confidence_cfg)
)
param_list, experiment_name = get_experiment_params(asr_model.cfg.decoding.confidence_cfg)
plot_dir = work_dir / Path(experiment_name)
logging.info(f"==============================Running a single benchmarking===================================")
logging.info(f"Results will be written to:\nreport file `{report_file}`\nand plot directory `{plot_dir}`")
with open(report_file, "tw", encoding="utf-8") as f:
f.write(report_legend)
f.flush()
results = run_confidence_benchmark(
asr_model,
cfg.batch_size,
cfg.num_workers,
cfg.target_level,
filepaths,
reference_texts,
plot_dir,
autocast,
)
for level, result in results.items():
f.write(f"{model_typename},{','.join(param_list)},{level},{','.join([str(r) for r in result])}\n")
logging.info(f"===========================================Done===============================================")
if __name__ == '__main__':
main()
| NeMo-main | scripts/speech_recognition/confidence/benchmark_asr_confidence.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import random
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
# Checks -
# (Recommendation) Please normalize the text for each language (avoid numbers, special characters, punctuation)
# Please ensure that the audio_filepaths are absolute locations
parser = argparse.ArgumentParser(description='Create synthetic code-switching data manifest from monolingual data')
parser.add_argument("--manifest_language1", default=None, type=str, help='Manifest file for language 1', required=True)
parser.add_argument("--manifest_language2", default=None, type=str, help='Manifest file for language 2', required=True)
parser.add_argument(
"--manifest_save_path", default=None, type=str, help='Path to save created CS indermediate manifest', required=True
)
parser.add_argument(
"--id_language1", default=None, type=str, help='Identifier for language 1, eg: en, es, hi', required=True
)
parser.add_argument(
"--id_language2", default=None, type=str, help='Identifier for language 2, eg: en, es, hi', required=True
)
parser.add_argument("--max_sample_duration_sec", default=19, type=int, help='Maximum duration of sample (sec)')
parser.add_argument("--min_sample_duration_sec", default=16, type=int, help='Minimum duration of sample (sec)')
parser.add_argument("--dataset_size_required_hrs", default=1, type=int, help='Duration of dataset required (hrs)')
args = parser.parse_args()
def create_cs_manifest(
data_lang_0: list,
data_lang_1: list,
lid_lang_0: str,
lid_lang_1: str,
max_sample_duration_sec: int,
min_sample_duration_sec: int,
data_requirement_hrs: int,
):
"""
Args:
data_lang_0: Manifest entries from first langauge
data_lang_1: Manifest entries from second langauge
lid_lang_0: Language ID marker for first langauge
lid_lang_1: Language ID marker for second langauge
max_sample_duration_sec: Maximum permissible duration of generated CS sample in sec
min_sample_duration_sec: Minimum permissible duration of generated CS sample in sec
data_requirement_hrs: Required size of generated corpus
Returns:
Created synthetic CS manifest as list
"""
total_duration = 0
constructed_data = []
sample_id = 0
num_samples_lang0 = len(data_lang_0)
num_samples_lang1 = len(data_lang_1)
while total_duration < (data_requirement_hrs * 3600):
created_sample_duration_sec = 0
created_sample_dict = {}
created_sample_dict['lang_ids'] = []
created_sample_dict['texts'] = []
created_sample_dict['paths'] = []
created_sample_dict['durations'] = []
while created_sample_duration_sec < min_sample_duration_sec:
lang_selection = random.randint(0, 1)
if lang_selection == 0:
index = random.randint(0, num_samples_lang0 - 1)
sample = data_lang_0[index]
lang_id = lid_lang_0
else:
index = random.randint(0, num_samples_lang1 - 1)
sample = data_lang_1[index]
lang_id = lid_lang_1
if (created_sample_duration_sec + sample['duration']) > max_sample_duration_sec:
continue
else:
created_sample_duration_sec += sample['duration']
created_sample_dict['lang_ids'].append(lang_id)
created_sample_dict['texts'].append(sample['text'])
created_sample_dict['paths'].append(sample['audio_filepath'])
created_sample_dict['durations'].append(sample['duration'])
created_sample_dict['total_duration'] = created_sample_duration_sec
# adding a uid which will be used to save the generated audio file later
created_sample_dict['uid'] = sample_id
sample_id += 1
constructed_data.append(created_sample_dict)
total_duration += created_sample_duration_sec
return constructed_data
def main():
manifest0 = args.manifest_language1
manifest1 = args.manifest_language2
lid0 = args.id_language1
lid1 = args.id_language2
min_sample_duration = args.min_sample_duration_sec
max_sample_duration = args.max_sample_duration_sec
dataset_requirement = args.dataset_size_required_hrs
manifest_save_path = args.manifest_save_path
# Sanity Checks
if (manifest0 is None) or (not os.path.exists(manifest0)):
logging.error('Manifest for language 1 is incorrect')
exit
if (manifest1 is None) or (not os.path.exists(manifest1)):
logging.error('Manifest for language 2 is incorrect')
exit
if lid0 is None:
logging.error('Please provide correct language code for language 1')
exit
if lid1 is None:
logging.error('Please provide correct language code for language 2')
exit
if manifest_save_path is None:
logging.error('Please provide correct manifest save path')
exit
if min_sample_duration >= max_sample_duration:
logging.error('Please ensure max_sample_duration > min_sample_duration')
exit
# Reading data
logging.info('Reading manifests')
data_language0 = read_manifest(manifest0)
data_language1 = read_manifest(manifest1)
# Creating the CS data Manifest
logging.info('Creating CS manifest')
constructed_data = create_cs_manifest(
data_language0, data_language1, lid0, lid1, max_sample_duration, min_sample_duration, dataset_requirement
)
# Saving Manifest
logging.info('saving manifest')
write_manifest(manifest_save_path, constructed_data)
print("Synthetic CS manifest saved at :", manifest_save_path)
logging.info('Done!')
if __name__ == "__main__":
main()
| NeMo-main | scripts/speech_recognition/code_switching/code_switching_manifest_creation.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import librosa
import numpy as np
from joblib import Parallel, delayed
from scipy.io import wavfile
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
parser = argparse.ArgumentParser(description='Create synthetic code-switching data audio data from monolingual data')
parser.add_argument("--manifest_path", default=None, type=str, help='Path to CS indermediate manifest', required=True)
parser.add_argument(
"--audio_save_folder_path",
default=None,
type=str,
help='Path to directory where created synthetic set would be saved',
required=True,
)
parser.add_argument(
"--manifest_save_path", default=None, type=str, help='Path to save the created manifest', required=True
)
parser.add_argument(
"--audio_normalized_amplitude", default=15000, type=int, help='Normalized amplitdue of audio samples'
)
parser.add_argument(
"--cs_data_sampling_rate",
default=16000,
type=int,
help='Desired sampling rate for the audios in the generated dataset',
)
parser.add_argument(
"--sample_beginning_pause_msec",
default=20,
type=int,
help='Pause to be added at the beginning of the sample (msec)',
)
parser.add_argument(
"--sample_joining_pause_msec",
default=100,
type=int,
help='Pause to be added between different phrases of the sample (msec)',
)
parser.add_argument(
"--sample_end_pause_msec", default=20, type=int, help='Pause to be added at the end of the sample (msec)'
)
parser.add_argument(
"--is_lid_manifest",
default=True,
type=bool,
help='If true, generate manifest in the multi-sample lid format, else the standard manifest format',
)
parser.add_argument("--workers", default=1, type=int, help='Number of worker processes')
args = parser.parse_args()
def split_list(input_list: list, num_splits: int):
"""
Args:
input_list: the input list to split
num_splits: number of splits required
Returns:
iterator of split lists
"""
k, m = divmod(len(input_list), num_splits)
return (input_list[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(num_splits))
def combine_manifests(manifest_save_path: str, num_split: int):
"""
Args:
manifest_save_path: absolute path to save the combined manifest
num_splits: number of splits of manifest
Returns:
num_samples_combined: the total number of samples in the generated dataset
"""
num_samples_combined = 0
base_directory = os.path.dirname(manifest_save_path)
with open(manifest_save_path, 'w') as outfile:
for i in range(num_split):
split_manifest_path = base_directory + '/temp_' + str(i) + '.json'
data_split = read_manifest(split_manifest_path)
for elem in data_split:
s = json.dumps(elem)
outfile.write(s + '\n')
num_samples_combined += 1
# removing the intermediate file
os.remove(split_manifest_path)
return num_samples_combined
def create_cs_data(
intermediate_cs_manifest_list: list,
audio_save_folder: str,
manfest_save_path: str,
audio_amplitude_normalization: int,
pause_beg_msec: int,
pause_join_msec: int,
pause_end_msec: int,
cs_data_sampling_rate: int,
is_lid_manifest: bool,
):
"""
Args:
intermediate_cs_manifest_list: the intermediate cs manifest obtained from code_switching_manifest_creation.py as a list
audio_save_folder: Absolute path to save the generated audio samples
manfest_save_path: Absolute path to save the corresponding manifest
audio_amplitude_normalization: The amplitude to scale to after normalization
pause_beg_msec: Pause to be added at the beginning of the sample (msec)
pause_join_msec: Pause to be added between different phrases of the sample (msec)
pause_end_msec: Pause to be added at the end of the sample (msec)
cs_data_sampling_rate: Desired sampling rate of the generated samples
is_lid_manifest: If true, generate manifest in the multi-sample lid format, else the standard manifest format
Returns:
"""
fs = cs_data_sampling_rate
incorrect_sample_flag = 0
with open(manfest_save_path, 'w') as outfile:
for data in tqdm(intermediate_cs_manifest_list):
combined_audio = []
staring_pause = np.zeros(int(pause_beg_msec * fs / 1000))
combined_audio += list(staring_pause)
text_entry_list = []
for index in range(len(data['lang_ids'])):
phrase_entry = {}
# dictionary to store the phrase information which will be added to the complete sentence
data_sample, fs_sample = librosa.load(data['paths'][index], sr=fs)
# Alternative- fs_sample, data_sample = wavfile.read(data['paths'][index])
if fs_sample != fs:
logging.error('Sampling rate error inside create_cs_data function')
exit
# Remove leading and trailing zeros
data_sample = np.trim_zeros(data_sample)
# take care of empty arrays: rare
if data_sample.size == 0:
incorrect_sample_flag = 1
continue
# normalizing data
data_sample_norm = (
data_sample
/ np.maximum(np.abs(data_sample.max()), np.abs(data_sample.min()))
* audio_amplitude_normalization
)
combined_audio += list(data_sample_norm)
phrase_entry['str'] = data['texts'][index]
phrase_entry['lang'] = data['lang_ids'][index]
text_entry_list.append(phrase_entry)
# adding small pause between semgments
if index != (len(data['lang_ids']) - 1):
pause = np.zeros(int(pause_join_msec * fs / 1000))
combined_audio += list(pause)
if incorrect_sample_flag == 1:
incorrect_sample_flag = 0
continue
ending_pause = np.zeros(int(pause_end_msec * fs / 1000))
combined_audio += list(ending_pause)
sample_id = data['uid']
audio_file_path = audio_save_folder + '/' + str(sample_id) + ".wav"
# saving audio
wavfile.write(audio_file_path, fs, np.array(combined_audio).astype(np.int16))
# Alternative- librosa.output.write_wav(audio_file_path, combined_audio, fs)
metadata_json = {}
metadata_json['audio_filepath'] = audio_file_path
metadata_json['duration'] = float(len(combined_audio) / fs)
if is_lid_manifest:
metadata_json['text'] = text_entry_list
else:
metadata_json['text'] = ' '.join(data['texts'])
metadata_json['language_ids'] = data['lang_ids']
metadata_json['original_texts'] = data['texts']
metadata_json['original_paths'] = data['paths']
metadata_json['original_durations'] = data['durations']
s = json.dumps(metadata_json)
outfile.write(s + '\n')
def main():
cs_intermediate_manifest_path = args.manifest_path
audio_save_folder = args.audio_save_folder_path
manifest_save_path = args.manifest_save_path
audio_amplitude_normalization = args.audio_normalized_amplitude
pause_beg_msec = args.sample_beginning_pause_msec
pause_join_msec = args.sample_joining_pause_msec
pause_end_msec = args.sample_end_pause_msec
cs_data_sampling_rate = args.cs_data_sampling_rate
is_lid_manifest = args.is_lid_manifest
num_process = args.workers
# Sanity Checks
if (cs_intermediate_manifest_path is None) or (not os.path.exists(cs_intermediate_manifest_path)):
logging.error('Please provide correct CS manifest (obtained from code_switching_manifest_creation.py)')
exit
if (audio_save_folder is None) or (not os.path.exists(audio_save_folder)):
logging.error('audio_save_folder_path is incorrect or does not exist')
exit
if manifest_save_path is None:
logging.error('Please provide valid manifest_save_path')
exit
# Reading data
logging.info('Reading manifests')
intermediate_cs_manifest = read_manifest(cs_intermediate_manifest_path)
# Spliting the data
data_split = split_list(intermediate_cs_manifest, num_process)
# Creating Audio data
logging.info('Creating synthetic audio data')
base_directory = os.path.dirname(manifest_save_path)
Parallel(n_jobs=num_process)(
delayed(create_cs_data)(
split_manifest,
audio_save_folder,
base_directory + '/temp_' + str(idx) + '.json',
audio_amplitude_normalization,
pause_beg_msec,
pause_join_msec,
pause_end_msec,
cs_data_sampling_rate,
is_lid_manifest,
)
for idx, split_manifest in enumerate(data_split)
)
# Combining manifests
num_samples_combined = combine_manifests(manifest_save_path, num_process)
print("Synthetic CS audio data saved at :", audio_save_folder)
print("Synthetic CS manifest saved at :", manifest_save_path)
print("Total number of samples in the generated dataset :", str(num_samples_combined))
logging.info('Done!')
if __name__ == "__main__":
main()
| NeMo-main | scripts/speech_recognition/code_switching/code_switching_audio_data_creation.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import string
from collections import Counter
from rouge_score import rouge_scorer
"""
This script can be used to calcualte exact match and F1 scores for many different tasks, not just squad.
Example command for T5 Preds
```
python squad_metric_calc.py \
--ground-truth squad_test_gt.jsonl \
--preds squad_preds_t5.txt
```
Example command for GPT Preds
```
python squad_metric_calc.py \
--ground-truth squad_test_gt.jsonl \
--preds squad_preds_gpt.txt \
--split-string "answer:"
```
In this case, the prediction file will be split on "answer: " when looking for the LM's predicted answer.
"""
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def main():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument(
'--ground-truth',
type=str,
help="ground truth .jsonl file made from /NeMo/scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py",
)
parser.add_argument(
'--preds',
type=str,
help="Text file with test set prompts + model predictions. Prediction file can be made by running NeMo/examples/nlp/language_modeling/megatron_gpt_prompt_learning_eval.py",
)
parser.add_argument(
'--split-string',
type=str,
help="The text at the end of the prompt, write before the predicted answer. This will be used to find the model's predictions in pred files when the pred file containers both the prompt and prediction.",
default=None,
) # If the pred file only has preditions, just pass none
parser.add_argument(
'--answer-field',
type=str,
help="The field in the json file that contains the ground truth tokens",
default="answer",
)
args = parser.parse_args()
ground_truth_file = args.ground_truth
pred_file = args.preds
scorer = rouge_scorer.RougeScorer(['rougeL'], use_stemmer=True)
preds = open(pred_file, encoding="utf-8").readlines()
ground_truth = open(ground_truth_file).readlines()
f1 = exact_match = total = r_score = 0
for i in range(len(preds)):
truth = json.loads(ground_truth[i])
pred_answer = json.loads(preds[i])
# Need to separate out preditions from prompt, spliting on the provided "split string"
if args.split_string is not None:
pred_answer = pred_answer["sentence"].split(args.split_string)[-1].strip()
true_answers = truth[args.answer_field]
if not isinstance(true_answers, list):
true_answers = [true_answers]
r_scores = []
for ta in true_answers:
r_scores.append(scorer.score(ta, pred_answer)['rougeL'].fmeasure)
r_score += max(r_scores)
exact_match += metric_max_over_ground_truths(exact_match_score, pred_answer, true_answers)
f1 += metric_max_over_ground_truths(f1_score, pred_answer, true_answers)
total += 1
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
r_score = 100 * (r_score / total)
res = {'exact_match': exact_match, 'f1': f1, "rougeL": r_score, 'total': total}
print('\t'.join([f"{k} {v:.3f}" for k, v in res.items()]))
if __name__ == "__main__":
main()
| NeMo-main | scripts/metric_calculation/peft_metric_calc.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import numpy as np
from rouge_score import rouge_scorer, scoring
"""
Example command for T5 Preds
```
python compute_rouge.py \
--ground-truth dialogsum_test_gt.jsonl \
--preds dialogsum_preds_t5.txt \
--answer-field "answer"
```
Example command for GPT Preds
```
python compute_rouge.py \
--ground-truth dialogsum_test_gt.jsonl \
--preds dialogsum_preds_gpt.txt \
--answer-field "answer" \
--split-string "summary:"
```
"""
ROUGE_KEYS = ["rouge1", "rouge2", "rougeL", "rougeLsum"]
def calculate_rouge(output_lns, reference_lns, use_stemmer=True):
scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer)
aggregator = scoring.BootstrapAggregator()
for reference_ln, output_ln in zip(reference_lns, output_lns):
ln_scores = []
for possible_ln in reference_ln:
scores = scorer.score(possible_ln, output_ln)
ln_scores.append(scores)
best_index = np.argmax([score_dict["rouge1"][-1] for score_dict in ln_scores])
aggregator.add_scores(ln_scores[best_index])
result = aggregator.aggregate()
return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()}
def load_ref(filename, answer_field):
lines = open(filename).readlines()
all_answers = []
for line in lines:
line = line.strip()
line = json.loads(line)
answers = line[answer_field]
if isinstance(answers, str):
answers = [answers]
all_answers.append(answers)
return all_answers
def load_preds(filename, split_string):
with open(filename) as f:
lines = [line.split(split_string)[-1].strip() for line in f.readlines()]
return lines
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--ground-truth', type=str, help="ground truth .jsonl")
parser.add_argument('--preds', type=str, help="Text file with test set prompts + model predictions.")
parser.add_argument(
'--answer-field',
type=str,
help="The key in the ground truth json object containing specifying the correct answer.",
default="answer",
)
parser.add_argument(
'--split-string',
type=str,
help="The text at the end of the prompt, write before the predicted answer. This will be used to find the model's predictions in pred files when the pred file containers both the prompt and prediction.",
default=None,
) # If the pred file only has preditions, just pass none
args = parser.parse_args()
pred_file = args.preds
ref_filename = args.ground_truth
answer_field = args.answer_field # The field in the ground truth json that contains the answer
split_string = args.split_string # The final few tokens of the prompt right before the generated answer
output_lns = load_preds(pred_file, split_string)
reference_lns = load_ref(ref_filename, answer_field)
assert len(output_lns) == len(reference_lns)
print("Calculating Rouge")
scores = calculate_rouge(output_lns=output_lns, reference_lns=reference_lns)
print(scores)
| NeMo-main | scripts/metric_calculation/compute_rouge.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from argparse import ArgumentParser
import numpy as np
from nemo.collections.asr.parts.utils.vad_utils import prepare_manifest
from nemo.utils import logging
"""
This script is designed for inference of frame level Voice Activity Detection (VAD)
This script serves three goals:
(1) Write audio files to manifest
(2) Split audio file for avoiding CUDA memory issue
(3) Take care of joint of seperate json line for an audio file
Usage:
python write_long_audio_manifest.py --inp_dir=<FULL PATH OF FOLDER OF AUDIO FILES> --split_duration=300 --window_length_in_sec=0.63 --num_worker=10
"""
def main():
parser = ArgumentParser()
parser.add_argument("--inp_dir", type=str, required=True, help="(full path) folder of files to be processed")
parser.add_argument(
"--inp_list", type=str, help="(full path) a file contains NAME of files inside inp_dir to be processed"
)
parser.add_argument("--out_dir", type=str, default=".", help="(full path) location to store generated json file")
parser.add_argument("--manifest_name", type=str, default="generated_manifest", help="name of generated json file")
parser.add_argument("--split_duration", type=int, required=True, help="max duration of each audio clip/line")
parser.add_argument(
"--window_length_in_sec",
type=float,
default=0.63,
help="window length in sec for VAD context input , default is 0.63s",
)
parser.add_argument("--num_workers", type=int, default=4, help="number of workers for multiprocessing")
args = parser.parse_args()
if not args.inp_list:
input_audios = []
for root, dirs, files in os.walk(args.inp_dir):
for basename in files:
if basename.endswith('.wav'):
filename = os.path.join(root, basename)
input_audios.append(filename)
else:
name_list = np.loadtxt(args.inp_list, dtype='str')
input_audios = [os.path.join(args.inp_dir, name + ".wav") for name in name_list]
input_list = []
for i in input_audios:
input_list.append({'audio_filepath': i, "offset": 0, "duration": None})
logging.info(f"Number of wav files to be processed: {len(input_audios)}")
output_path = os.path.join(args.out_dir, args.manifest_name + '.json')
logging.info("Split long audio file to avoid CUDA memory issue")
logging.debug("Try smaller split_duration if you still have CUDA memory issue")
config = {
'input': input_list,
'window_length_in_sec': args.window_length_in_sec,
'split_duration': args.split_duration,
'num_workers': args.num_workers,
'prepared_manfiest_vad_input': output_path,
}
manifest_vad_input = prepare_manifest(config)
logging.info(f"Done! Save to {manifest_vad_input}")
if __name__ == '__main__':
main()
| NeMo-main | scripts/voice_activity_detection/write_long_audio_manifest.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from argparse import ArgumentParser
from nemo.collections.asr.parts.utils.vad_utils import generate_overlap_vad_seq, generate_vad_segment_table
from nemo.utils import logging
"""
Note you can use NeMo/examples/asr/speech_classification/vad_infer.py which includes the functionalities appeared in this function directly.
You are encouraged to use this script if you want to try overlapped mean/median smoothing filter and postprocessing technique without perform costly NN inference several times.
You can also use this script to write RTTM-like files if you have frame level prediction already.
This script serves two purposes:
1) gen_overlap_seq:
Generate predictions with overlapping input segments by using the frame level prediction from NeMo/examples/asr/speech_classification/vad_infer.py.
Then a smoothing filter is applied to decide the label for a frame spanned by multiple segments.
2)gen_seg_table:
Converting frame level prediction to speech/no-speech segment in start and end times format with postprocessing technique.
Usage:
python vad_overlap_posterior.py --gen_overlap_seq --gen_seg_table --frame_folder=<FULL PATH OF YOU STORED FRAME LEVEL PREDICTION> --method='median' --overlap=0.875 --num_workers=20
You can play with different postprocesing parameters. Here we just show the simpliest condition onset=offset=threshold=0.5
See more details about postprocesing in function binarization and filtering in NeMo/nemo/collections/asr/parts/utils/vad_utils
"""
postprocessing_params = {"onset": 0.5, "offset": 0.5}
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--gen_overlap_seq", default=False, action='store_true')
parser.add_argument("--gen_seg_table", default=False, action='store_true')
parser.add_argument("--frame_folder", type=str, required=True)
parser.add_argument(
"--method",
type=str,
required=True,
help="Use mean/median for overlapped prediction. Use frame for gen_seg_table of frame prediction",
)
parser.add_argument("--overlap_out_dir", type=str)
parser.add_argument("--table_out_dir", type=str)
parser.add_argument("--overlap", type=float, default=0.875, help="Overlap percentatge. Default is 0.875")
parser.add_argument("--window_length_in_sec", type=float, default=0.63)
parser.add_argument("--shift_length_in_sec", type=float, default=0.01)
parser.add_argument("--num_workers", type=int, default=4)
args = parser.parse_args()
if args.gen_overlap_seq:
start = time.time()
logging.info("Generating predictions with overlapping input segments")
overlap_out_dir = generate_overlap_vad_seq(
frame_pred_dir=args.frame_folder,
smoothing_method=args.method,
overlap=args.overlap,
window_length_in_sec=args.window_length_in_sec,
shift_length_in_sec=args.shift_length_in_sec,
num_workers=args.num_workers,
out_dir=args.overlap_out_dir,
)
logging.info(
f"Finish generating predictions with overlapping input segments with smoothing_method={args.method} and overlap={args.overlap}"
)
end = time.time()
logging.info(f"Generate overlapped prediction takes {end-start:.2f} seconds!\n Save to {overlap_out_dir}")
if args.gen_seg_table:
start = time.time()
logging.info("Converting frame level prediction to speech/no-speech segment in start and end times format.")
frame_length_in_sec = args.shift_length_in_sec
if args.gen_overlap_seq:
logging.info("Use overlap prediction. Change if you want to use basic frame level prediction")
vad_pred_dir = overlap_out_dir
frame_length_in_sec = 0.01
else:
logging.info("Use basic frame level prediction")
vad_pred_dir = args.frame_folder
table_out_dir = generate_vad_segment_table(
vad_pred_dir=vad_pred_dir,
postprocessing_params=postprocessing_params,
frame_length_in_sec=frame_length_in_sec,
num_workers=args.num_workers,
out_dir=args.table_out_dir,
)
logging.info(f"Finish generating speech semgents table with postprocessing_params: {postprocessing_params}")
end = time.time()
logging.info(
f"Generating rttm-like tables for {vad_pred_dir} takes {end-start:.2f} seconds!\n Save to {table_out_dir}"
)
| NeMo-main | scripts/voice_activity_detection/vad_overlap_posterior.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import numpy as np
from nemo.collections.asr.parts.utils.vad_utils import vad_tune_threshold_on_dev
from nemo.utils import logging
"""
This script is designed for thresholds tuning for postprocessing of VAD
See details about it in nemo/collections/asr/parts/utils/vad_utils/binarization and filtering
Usage:
python vad_tune_threshold.py \
--onset_range="0,1,0.2" --offset_range="0,1,0.2" --min_duration_on_range="0.1,0.8,0.05" --min_duration_off_range="0.1,0.8,0.05" --not_filter_speech_first \
--vad_pred=<FULL PATH OF FOLDER OF FRAME LEVEL PREDICTION FILES> \
--groundtruth_RTTM=<DIRECTORY OF VAD PREDICTIONS OR A FILE CONTAINS THE PATHS OF THEM> \
--vad_pred_method="median"
"""
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--onset_range", help="range of onset in list 'START,END,STEP' to be tuned on", type=str)
parser.add_argument("--offset_range", help="range of offset in list 'START,END,STEP' to be tuned on", type=str)
parser.add_argument(
"--pad_onset_range",
help="range of pad_onset in list 'START,END,STEP' to be tuned on. pad_onset could be negative float",
type=str,
)
parser.add_argument(
"--pad_offset_range",
help="range of pad_offset in list 'START,END,STEP' to be tuned on. pad_offset could be negative float",
type=str,
)
parser.add_argument(
"--min_duration_on_range", help="range of min_duration_on in list 'START,END,STEP' to be tuned on", type=str
)
parser.add_argument(
"--min_duration_off_range", help="range of min_duration_off in list 'START,END,STEP' to be tuned on", type=str
)
parser.add_argument(
"--not_filter_speech_first",
help="Whether to filter short speech first during filtering, should be either True or False!",
action='store_true',
)
parser.add_argument(
"--vad_pred", help="Directory of vad predictions or a file contains the paths of them.", required=True
)
parser.add_argument(
"--groundtruth_RTTM",
help="Directory of groundtruch rttm files or a file contains the paths of them",
type=str,
required=True,
)
parser.add_argument(
"--result_file", help="Filename of txt to store results", default="res",
)
parser.add_argument(
"--vad_pred_method",
help="suffix of prediction file. Should be either in 'frame', 'mean' or 'median'",
required=True,
)
parser.add_argument(
"--focus_metric",
help="metrics we care most when tuning threshold. Should be either in 'DetER', 'FA', 'MISS' ",
type=str,
default='DetER',
)
parser.add_argument(
"--frame_length_in_sec", help="frame_length_in_sec ", type=float, default=0.01,
)
args = parser.parse_args()
params = {}
try:
# if not input range for values of parameters, use default value defined in function binarization and filtering in nemo/collections/asr/parts/utils/vad_utils.py
if args.onset_range:
start, stop, step = [float(i) for i in args.onset_range.split(",")]
onset = np.arange(start, stop, step)
params['onset'] = onset
if args.offset_range:
start, stop, step = [float(i) for i in args.offset_range.split(",")]
offset = np.arange(start, stop, step)
params['offset'] = offset
if args.pad_onset_range:
start, stop, step = [float(i) for i in args.pad_onset_range.split(",")]
pad_onset = np.arange(start, stop, step)
params['pad_onset'] = pad_onset
if args.pad_offset_range:
start, stop, step = [float(i) for i in args.pad_offset_range.split(",")]
pad_offset = np.arange(start, stop, step)
params['pad_offset'] = pad_offset
if args.min_duration_on_range:
start, stop, step = [float(i) for i in args.min_duration_on_range.split(",")]
min_duration_on = np.arange(start, stop, step)
params['min_duration_on'] = min_duration_on
if args.min_duration_off_range:
start, stop, step = [float(i) for i in args.min_duration_off_range.split(",")]
min_duration_off = np.arange(start, stop, step)
params['min_duration_off'] = min_duration_off
if args.not_filter_speech_first:
params['filter_speech_first'] = False
except:
raise ValueError(
"Theshold input is invalid! Please enter it as a 'START,STOP,STEP' for onset, offset, min_duration_on and min_duration_off, and enter True/False for filter_speech_first"
)
best_threhsold, optimal_scores = vad_tune_threshold_on_dev(
params,
args.vad_pred,
args.groundtruth_RTTM,
args.result_file,
args.vad_pred_method,
args.focus_metric,
args.frame_length_in_sec,
)
logging.info(
f"Best combination of thresholds for binarization selected from input ranges is {best_threhsold}, and the optimal score is {optimal_scores}"
)
| NeMo-main | scripts/voice_activity_detection/vad_tune_threshold.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example Run Command: python make_supdata.py --ssl_model_ckpt_path <PATH TO CKPT> --manifest_path <PATH TO MANIFEST>
import argparse
import json
import os
import time
from multiprocessing import Pool
from pathlib import Path
import hydra.utils
import librosa
import numpy as np
import torch
from omegaconf import open_dict
from tqdm import tqdm
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
from nemo.collections.tts.models import ssl_tts
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_base_dir
from nemo.core.classes import Dataset
from nemo.utils import logging
class AudioDataset(Dataset):
def __init__(
self,
manifest_paths,
min_duration=0.5,
max_duration=16.0,
pad_multiple=1024,
sample_rate=22050,
sup_data_dir=None,
):
self.data = []
for manifest_path in manifest_paths:
with open(manifest_path, "r") as f:
for line in f:
record = json.loads(line)
if record['duration'] < min_duration or record['duration'] > max_duration:
continue
self.data.append(json.loads(line))
self.base_data_dir = get_base_dir([item["audio_filepath"] for item in self.data])
if sup_data_dir is not None:
self.sup_data_dir = sup_data_dir
else:
self.sup_data_dir = os.path.join(self.base_data_dir, "sup_data")
if not os.path.exists(self.sup_data_dir):
os.makedirs(self.sup_data_dir)
self.pad_multiple = pad_multiple
self.sample_rate = sample_rate
def __len__(self):
return len(self.data)
def _get_wav_from_filepath(self, audio_filepath):
features = AudioSegment.segment_from_file(
audio_filepath, target_sr=self.sample_rate, n_segments=-1, trim=False,
)
audio_samples = features.samples
audio, audio_length = torch.tensor(audio_samples), torch.tensor(audio_samples.shape[0]).long()
# pad audio to a multiple of self.pad_multiple
if audio.shape[0] % self.pad_multiple != 0:
audio = torch.cat(
[audio, torch.zeros(self.pad_multiple - audio.shape[0] % self.pad_multiple, dtype=torch.float)]
)
audio_length = torch.tensor(audio.shape[0]).long()
return audio, audio_length
def pad_collate_fn(self, batch):
final_batch = {}
for row in batch:
for key in row:
if key not in final_batch:
final_batch[key] = []
final_batch[key].append(row[key])
max_audio_len = max([_audio_len.item() for _audio_len in final_batch["audio_len"]])
audios_padded = []
for audio in final_batch["audio"]:
audio_padded = torch.nn.functional.pad(audio, (0, max_audio_len - audio.size(0)), value=0)
audios_padded.append(audio_padded)
final_batch["audio"] = audios_padded
for key in final_batch:
if key not in ["rel_audio_path_as_text_id", "wav_path"]:
final_batch[key] = torch.stack(final_batch[key])
return final_batch
def __getitem__(self, index):
sample = self.data[index]
rel_audio_path = Path(sample["audio_filepath"]).relative_to(self.base_data_dir).with_suffix("")
rel_audio_path_as_text_id = str(rel_audio_path).replace("/", "_")
speaker = torch.tensor(sample["speaker"]).long()
audio, audio_length = self._get_wav_from_filepath(sample["audio_filepath"])
return {
"audio": audio,
"audio_len": audio_length,
"rel_audio_path_as_text_id": rel_audio_path_as_text_id,
"wav_path": sample["audio_filepath"],
"speaker": speaker,
}
def segment_wav(wav, segment_length, segment_hop_size, min_segment_length):
if len(wav) < segment_length:
pad = torch.zeros(segment_length - len(wav))
segment = torch.cat([wav, pad])
return [segment]
else:
si = 0
segments = []
while si < len(wav) - min_segment_length:
segment = wav[si : si + segment_length]
if len(segment) < segment_length:
pad = torch.zeros(segment_length - len(segment))
segment = torch.cat([segment, pad])
segments.append(segment)
si += segment_hop_size
return segments
def segment_batch(batch, segment_length=44100, segment_hop_size=22050, min_segment_length=22050):
all_segments = []
segment_indices = []
si = 0
for bidx in range(len(batch['audio'])):
audio = batch['audio'][bidx]
audio_length = batch['audio_len'][bidx]
audio_actual = audio[:audio_length]
audio_segments = segment_wav(audio_actual, segment_length, segment_hop_size, min_segment_length)
all_segments += audio_segments
segment_indices.append((si, si + len(audio_segments) - 1))
si += len(audio_segments)
return torch.stack(all_segments), segment_indices
def get_mel_spectrogram(fb, wav, stft_params):
EPSILON = 1e-9
window_fn = torch.hann_window
spec = torch.stft(
input=wav,
n_fft=stft_params['n_fft'], # 1024
hop_length=stft_params['hop_length'], # 256
win_length=stft_params['win_length'], # 1024
window=window_fn(stft_params['win_length'], periodic=False).to(torch.float).to('cuda') if window_fn else None,
return_complex=True,
center=True,
)
if spec.dtype in [torch.cfloat, torch.cdouble]:
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + EPSILON)
mel = torch.matmul(fb.to(spec.dtype), spec)
log_mel = torch.log(torch.clamp(mel, min=torch.finfo(mel.dtype).tiny))
return log_mel
def load_wav(wav_path, sample_rate=22050, pad_multiple=1024):
wav = AudioSegment.segment_from_file(wav_path, target_sr=sample_rate, n_segments=-1, trim=False,).samples
if wav.shape[0] % pad_multiple != 0:
wav = np.concatenate([wav, np.zeros(pad_multiple - wav.shape[0] % pad_multiple)])
wav = wav[:-1]
return wav
def save_pitch_contour(record):
wav_path = record['wav_path']
wav_text_id = record['wav_id']
sup_data_dir = record['sup_data_dir']
stft_params = record['stft_params']
wav = load_wav(wav_path, stft_params['sample_rate'], stft_params['pad_multiple'])
pitch_contour_fn = f"pitch_contour_{wav_text_id}.pt"
pitch_contour_fp = os.path.join(sup_data_dir, pitch_contour_fn)
f0, _, _ = librosa.pyin(
wav,
fmin=librosa.note_to_hz('C2'),
fmax=stft_params['yin_fmax'],
frame_length=stft_params['win_length'],
hop_length=stft_params['hop_length'],
sr=stft_params['sample_rate'],
center=True,
fill_na=0.0,
)
pitch_contour = torch.tensor(f0, dtype=torch.float32)
torch.save(pitch_contour, pitch_contour_fp)
logging.info("saved {}".format(pitch_contour_fp))
return pitch_contour
def compute_pitch_stats(records):
def _is_valid_pitch(pitch_mean, pitch_std):
c1 = pitch_mean > 0 and pitch_mean < 1000
c2 = pitch_std > 0 and pitch_std < 1000
return c1 and c2
speaker_wise_pitch_contours = {}
for item in records:
wav_id = item['wav_id']
speaker = item['speaker']
sup_data_dir = item['sup_data_dir']
pitch_contour_fn = f"pitch_contour_{wav_id}.pt"
pitch_contour_fp = os.path.join(sup_data_dir, pitch_contour_fn)
if speaker not in speaker_wise_pitch_contours:
speaker_wise_pitch_contours[speaker] = []
speaker_wise_pitch_contours[speaker].append(pitch_contour_fp)
speaker_pitch_stats = {}
for speaker in speaker_wise_pitch_contours:
non_zero_pc = []
for pitch_contour_fp in speaker_wise_pitch_contours[speaker][:50]:
pitch_contour = torch.load(pitch_contour_fp)
pitch_contour_nonzero = pitch_contour[pitch_contour != 0]
if len(pitch_contour_nonzero) > 0:
non_zero_pc.append(pitch_contour_nonzero)
if len(non_zero_pc) > 0:
non_zero_pc = torch.cat(non_zero_pc)
pitch_mean = non_zero_pc.mean().item()
pitch_std = non_zero_pc.std().item()
valid = True
if not _is_valid_pitch(pitch_mean, pitch_std):
logging.warning("invalid pitch: {}".format(speaker))
pitch_mean = 212.0
pitch_std = 70.0
valid = "False"
else:
logging.warning("could not find pitch contour for speaker {}".format(speaker))
valid = "False"
pitch_mean = 212.0
pitch_std = 70.0
speaker_pitch_stats[speaker] = {"pitch_mean": pitch_mean, "pitch_std": pitch_std, "valid": valid}
with open(os.path.join(sup_data_dir, "speaker_pitch_stats.json"), "w") as f:
json.dump(speaker_pitch_stats, f)
def main():
parser = argparse.ArgumentParser(description='Evaluate the model')
parser.add_argument(
'--ssl_model_ckpt_path', type=str, required=True,
)
parser.add_argument('--manifest_paths', type=str, required=True)
parser.add_argument('--sup_data_dir', type=str, default=None)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--ssl_content_emb_type', type=str, default="embedding_and_probs")
parser.add_argument('--use_unique_tokens', type=int, default=1)
parser.add_argument('--num_workers', type=int, default=8)
parser.add_argument('--pool_workers', type=int, default=30)
parser.add_argument('--compute_pitch_contours', type=int, default=1)
parser.add_argument('--num_pitch_per_speaker', type=int, default=None) # saves time.
parser.add_argument('--sample_rate', type=int, default=22050)
parser.add_argument('--pad_multiple', type=int, default=1024)
parser.add_argument('--ssl_downsampling_factor', type=int, default=4)
parser.add_argument('--stft_n_fft', type=int, default=1024)
parser.add_argument('--stft_hop_length', type=int, default=256)
parser.add_argument('--stft_win_length', type=int, default=1024)
parser.add_argument('--stft_n_mel', type=int, default=80)
parser.add_argument('--stft_fmin', type=int, default=0)
parser.add_argument('--stft_fmax', type=int, default=8000)
parser.add_argument('--yin_fmax', type=int, default=500)
parser.add_argument('--segment_length', type=int, default=44100)
parser.add_argument('--segment_hop_size', type=int, default=22050)
parser.add_argument('--min_segment_length', type=int, default=22050)
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
manifest_paths = args.manifest_paths.split(",")
ssl_model_ckpt_path = args.ssl_model_ckpt_path
dataset = AudioDataset(
manifest_paths, pad_multiple=args.pad_multiple, sample_rate=args.sample_rate, sup_data_dir=args.sup_data_dir
)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=False,
collate_fn=dataset.pad_collate_fn,
num_workers=args.num_workers,
)
ssl_model = ssl_tts.SSLDisentangler.load_from_checkpoint(ssl_model_ckpt_path, strict=False)
with open_dict(ssl_model.cfg):
ssl_model.cfg.preprocessor.exact_pad = True
ssl_model.preprocessor = hydra.utils.instantiate(ssl_model.cfg.preprocessor)
ssl_model.preprocessor_disentangler = ssl_model.preprocessor
ssl_model.eval()
ssl_model.to(device)
sample_rate = args.sample_rate
stft_params = {
"n_fft": args.stft_n_fft,
"hop_length": args.stft_hop_length,
"win_length": args.stft_win_length,
"n_mel": args.stft_n_mel,
"sample_rate": sample_rate,
"pad_multiple": args.pad_multiple,
"fmin": args.stft_fmin,
"fmax": args.stft_fmax,
"yin_fmax": args.yin_fmax,
}
fb = (
torch.tensor(
librosa.filters.mel(
sr=sample_rate,
n_fft=stft_params['n_fft'],
n_mels=stft_params['n_mel'],
fmin=stft_params['fmin'],
fmax=stft_params['fmax'],
),
dtype=torch.float,
)
.unsqueeze(0)
.to(device)
)
st = time.time()
bidx = 0
wav_and_id_list = []
for batch in tqdm(dataloader):
bidx += 1
with torch.no_grad():
(
_,
_,
batch_content_embedding,
batch_content_log_probs,
batch_encoded_len,
) = ssl_model.forward_for_export(
input_signal=batch['audio'].to(device),
input_signal_length=batch['audio_len'].to(device),
normalize_content=True,
)
batch_mel_specs = get_mel_spectrogram(fb, batch['audio'][:, :-1].to(device), stft_params)
audio_segmented, segment_indices = segment_batch(
batch, args.segment_length, args.segment_hop_size, args.min_segment_length
)
audio_seg_len = torch.tensor([len(segment) for segment in audio_segmented]).to(device).long()
_, batch_speaker_embeddings, _, _, _ = ssl_model.forward_for_export(
input_signal=audio_segmented.to(device), input_signal_length=audio_seg_len, normalize_content=True,
)
for idx in range(batch['audio'].shape[0]):
_speaker = batch['speaker'][idx].item()
wav_path = batch['wav_path'][idx]
wav_id = batch['rel_audio_path_as_text_id'][idx]
wav_and_id_list.append((wav_path, wav_id, _speaker))
content_embedding = batch_content_embedding[idx].detach()
content_log_probs = batch_content_log_probs[:, idx, :].detach() # (content lob prob is (t, b, c))
encoded_len = batch_encoded_len[idx].detach()
content_embedding = content_embedding[: encoded_len.item()]
content_embedding = content_embedding.t()
content_log_probs = content_log_probs[: encoded_len.item()]
content_log_probs = content_log_probs.t()
content_probs = torch.exp(content_log_probs)
duration = torch.ones(content_embedding.shape[1]) * args.ssl_downsampling_factor
bsi_start = segment_indices[idx][0]
bsi_end = segment_indices[idx][1]
speaker_embedding = torch.mean(batch_speaker_embeddings[bsi_start : bsi_end + 1], dim=0)
l2_norm = torch.norm(speaker_embedding, p=2)
speaker_embedding = speaker_embedding / l2_norm
if args.ssl_content_emb_type == "probs":
# content embedding is only character probabilities
final_content_embedding = content_probs
elif args.ssl_content_emb_type == "embedding":
# content embedding is only output of content head of SSL backbone
final_content_embedding = content_embedding
elif args.ssl_content_emb_type == "log_probs":
# content embedding is only log of character probabilities
final_content_embedding = content_log_probs
elif args.ssl_content_emb_type == "embedding_and_probs":
# content embedding is the concatenation of character probabilities and output of content head of SSL backbone
final_content_embedding = torch.cat([content_embedding, content_probs], dim=0)
if args.use_unique_tokens == 1:
# group content embeddings with same predicted token (by averaging) and add the durations of the grouped embeddings
# Eg. By default each content embedding corresponds to 4 frames of spectrogram (ssl_downsampling_factor)
# If we group 3 content embeddings, the duration of the grouped embedding will be 12 frames.
# This is useful for adapting the duration during inference based on the speaker.
token_predictions = torch.argmax(content_probs, dim=0)
content_buffer = [final_content_embedding[:, 0]]
unique_content_embeddings = []
unique_tokens = []
durations = []
for _t in range(1, final_content_embedding.shape[1]):
if token_predictions[_t] == token_predictions[_t - 1]:
content_buffer.append(final_content_embedding[:, _t])
else:
durations.append(len(content_buffer) * args.ssl_downsampling_factor)
unique_content_embeddings.append(torch.mean(torch.stack(content_buffer), dim=0))
content_buffer = [final_content_embedding[:, _t]]
unique_tokens.append(token_predictions[_t].item())
if len(content_buffer) > 0:
durations.append(len(content_buffer) * args.ssl_downsampling_factor)
unique_content_embeddings.append(torch.mean(torch.stack(content_buffer), dim=0))
unique_tokens.append(token_predictions[_t].item())
unique_content_embedding = torch.stack(unique_content_embeddings)
final_content_embedding = unique_content_embedding.t()
duration = torch.tensor(durations).float()
mel_len = int(batch['audio_len'][idx].item() / stft_params['hop_length'])
item_mel = batch_mel_specs[idx][:, :mel_len]
wav_text_id = batch["rel_audio_path_as_text_id"][idx]
content_emb_fn = f"{args.ssl_content_emb_type}_content_embedding_{wav_text_id}.pt"
speaker_emb_fn = f"speaker_embedding_{wav_text_id}.pt"
duration_fn = f"duration_embedding_{wav_text_id}.pt" # embedding just for namesake
content_emb_fp = os.path.join(dataset.sup_data_dir, content_emb_fn)
speaker_emb_fp = os.path.join(dataset.sup_data_dir, speaker_emb_fn)
duration_fp = os.path.join(dataset.sup_data_dir, duration_fn)
mel_spec_fn = f"mel_spec_{wav_text_id}.pt"
mel_spec_fp = os.path.join(dataset.sup_data_dir, mel_spec_fn)
torch.save(item_mel.cpu(), mel_spec_fp)
torch.save(final_content_embedding.cpu(), content_emb_fp)
torch.save(speaker_embedding.cpu(), speaker_emb_fp)
torch.save(duration.cpu(), duration_fp)
et = time.time()
logging.info(
"Processed Batch {} of {} | Time per batch: {:.4f} s".format(
bidx + 1, len(dataloader), (et - st) / bidx
)
)
if args.compute_pitch_contours == 1:
speaker_wise_records = {}
for row in wav_and_id_list:
wav_path, wav_id, speaker = row
if speaker not in speaker_wise_records:
speaker_wise_records[speaker] = []
speaker_wise_records[speaker].append(
{
"wav_path": wav_path,
"wav_id": wav_id,
"sup_data_dir": dataset.sup_data_dir,
"stft_params": stft_params,
"speaker": speaker,
}
)
filtered_records = []
for speaker in speaker_wise_records:
if args.num_pitch_per_speaker is not None:
filtered_records += speaker_wise_records[speaker][: args.num_pitch_per_speaker]
else:
filtered_records += speaker_wise_records[speaker]
with Pool(args.pool_workers) as p:
p.map(save_pitch_contour, filtered_records)
compute_pitch_stats(filtered_records)
if __name__ == '__main__':
main()
| NeMo-main | scripts/ssl_tts/make_supdata.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Example Run Command: python ssl_tts_vc.py --ssl_model_ckpt_path <PATH TO CKPT> --hifi_ckpt_path <PATH TO CKPT> \
# --fastpitch_ckpt_path <PATH TO CKPT> --source_audio_path <SOURCE CONTENT WAV PATH> --target_audio_path \
# <TARGET SPEAKER WAV PATH> --out_path <PATH TO OUTPUT WAV>
import argparse
import os
import librosa
import soundfile
import torch
from nemo.collections.asr.parts.preprocessing.features import WaveformFeaturizer
from nemo.collections.tts.models import fastpitch_ssl, hifigan, ssl_tts
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_base_dir
def load_wav(wav_path, wav_featurizer, pad_multiple=1024):
wav = wav_featurizer.process(wav_path)
if (wav.shape[0] % pad_multiple) != 0:
wav = torch.cat([wav, torch.zeros(pad_multiple - wav.shape[0] % pad_multiple, dtype=torch.float)])
wav = wav[:-1]
return wav
def get_pitch_contour(wav, pitch_mean=None, pitch_std=None, compute_mean_std=False, sample_rate=22050):
f0, _, _ = librosa.pyin(
wav.numpy(),
fmin=librosa.note_to_hz('C2'),
fmax=librosa.note_to_hz('C7'),
frame_length=1024,
hop_length=256,
sr=sample_rate,
center=True,
fill_na=0.0,
)
pitch_contour = torch.tensor(f0, dtype=torch.float32)
_pitch_mean = pitch_contour.mean().item()
_pitch_std = pitch_contour.std().item()
if compute_mean_std:
pitch_mean = _pitch_mean
pitch_std = _pitch_std
if (pitch_mean is not None) and (pitch_std is not None):
pitch_contour = pitch_contour - pitch_mean
pitch_contour[pitch_contour == -pitch_mean] = 0.0
pitch_contour = pitch_contour / pitch_std
return pitch_contour
def segment_wav(wav, segment_length=44100, hop_size=44100, min_segment_size=22050):
if len(wav) < segment_length:
pad = torch.zeros(segment_length - len(wav))
segment = torch.cat([wav, pad])
return [segment]
else:
si = 0
segments = []
while si < len(wav) - min_segment_size:
segment = wav[si : si + segment_length]
if len(segment) < segment_length:
pad = torch.zeros(segment_length - len(segment))
segment = torch.cat([segment, pad])
segments.append(segment)
si += hop_size
return segments
def get_speaker_embedding(ssl_model, wav_featurizer, audio_paths, duration=None, device="cpu"):
all_segments = []
all_wavs = []
for audio_path in audio_paths:
wav = load_wav(audio_path, wav_featurizer)
segments = segment_wav(wav)
all_segments += segments
all_wavs.append(wav)
if duration is not None and len(all_segments) >= duration:
# each segment is 2 seconds with one second overlap.
# so 10 segments would mean 0 to 2, 1 to 3.. 9 to 11 (11 seconds.)
all_segments = all_segments[: int(duration)]
break
signal_batch = torch.stack(all_segments)
signal_length_batch = torch.stack([torch.tensor(signal_batch.shape[1]) for _ in range(len(all_segments))])
signal_batch = signal_batch.to(device)
signal_length_batch = signal_length_batch.to(device)
_, speaker_embeddings, _, _, _ = ssl_model.forward_for_export(
input_signal=signal_batch, input_signal_length=signal_length_batch, normalize_content=True
)
speaker_embedding = torch.mean(speaker_embeddings, dim=0)
l2_norm = torch.norm(speaker_embedding, p=2)
speaker_embedding = speaker_embedding / l2_norm
return speaker_embedding[None]
def get_ssl_features_disentangled(
ssl_model, wav_featurizer, audio_path, emb_type="embedding_and_probs", use_unique_tokens=False, device="cpu"
):
"""
Extracts content embedding, speaker embedding and duration tokens to be used as inputs for FastPitchModel_SSL
synthesizer. Content embedding and speaker embedding extracted using SSLDisentangler model.
Args:
ssl_model: SSLDisentangler model
wav_featurizer: WaveformFeaturizer object
audio_path: path to audio file
emb_type: Can be one of embedding_and_probs, embedding, probs, log_probs
use_unique_tokens: If True, content embeddings with same predicted token are grouped and duration is different.
device: device to run the model on
Returns:
content_embedding, speaker_embedding, duration
"""
wav = load_wav(audio_path, wav_featurizer)
audio_signal = wav[None]
audio_signal_length = torch.tensor([wav.shape[0]])
audio_signal = audio_signal.to(device)
audio_signal_length = audio_signal_length.to(device)
_, speaker_embedding, content_embedding, content_log_probs, encoded_len = ssl_model.forward_for_export(
input_signal=audio_signal, input_signal_length=audio_signal_length, normalize_content=True
)
content_embedding = content_embedding[0, : encoded_len[0].item()]
content_log_probs = content_log_probs[: encoded_len[0].item(), 0, :]
content_embedding = content_embedding.t()
content_log_probs = content_log_probs.t()
content_probs = torch.exp(content_log_probs)
ssl_downsampling_factor = ssl_model._cfg.encoder.subsampling_factor
if emb_type == "probs":
# content embedding is only character probabilities
final_content_embedding = content_probs
elif emb_type == "embedding":
# content embedding is only output of content head of SSL backbone
final_content_embedding = content_embedding
elif emb_type == "log_probs":
# content embedding is only log of character probabilities
final_content_embedding = content_log_probs
elif emb_type == "embedding_and_probs":
# content embedding is the concatenation of character probabilities and output of content head of SSL backbone
final_content_embedding = torch.cat([content_embedding, content_probs], dim=0)
else:
raise ValueError(
f"{emb_type} is not valid. Valid emb_type includes probs, embedding, log_probs or embedding_and_probs."
)
duration = torch.ones(final_content_embedding.shape[1]) * ssl_downsampling_factor
if use_unique_tokens:
# group content embeddings with same predicted token (by averaging) and add the durations of the grouped embeddings
# Eg. By default each content embedding corresponds to 4 frames of spectrogram (ssl_downsampling_factor)
# If we group 3 content embeddings, the duration of the grouped embedding will be 12 frames.
# This is useful for adapting the duration during inference based on the speaker.
token_predictions = torch.argmax(content_probs, dim=0)
content_buffer = [final_content_embedding[:, 0]]
unique_content_embeddings = []
unique_tokens = []
durations = []
for _t in range(1, final_content_embedding.shape[1]):
if token_predictions[_t] == token_predictions[_t - 1]:
content_buffer.append(final_content_embedding[:, _t])
else:
durations.append(len(content_buffer) * ssl_downsampling_factor)
unique_content_embeddings.append(torch.mean(torch.stack(content_buffer), dim=0))
content_buffer = [final_content_embedding[:, _t]]
unique_tokens.append(token_predictions[_t].item())
if len(content_buffer) > 0:
durations.append(len(content_buffer) * ssl_downsampling_factor)
unique_content_embeddings.append(torch.mean(torch.stack(content_buffer), dim=0))
unique_tokens.append(token_predictions[_t].item())
unique_content_embedding = torch.stack(unique_content_embeddings)
final_content_embedding = unique_content_embedding.t()
duration = torch.tensor(durations).float()
duration = duration.to(device)
return final_content_embedding[None], speaker_embedding, duration[None]
def main():
parser = argparse.ArgumentParser(description='Evaluate the model')
parser.add_argument('--ssl_model_ckpt_path', type=str)
parser.add_argument('--hifi_ckpt_path', type=str)
parser.add_argument('--fastpitch_ckpt_path', type=str)
parser.add_argument('--source_audio_path', type=str)
parser.add_argument('--target_audio_path', type=str) # can be a list seperated by comma
parser.add_argument('--out_path', type=str)
parser.add_argument('--source_target_out_pairs', type=str)
parser.add_argument('--use_unique_tokens', type=int, default=0)
parser.add_argument('--compute_pitch', type=int, default=0)
parser.add_argument('--compute_duration', type=int, default=0)
args = parser.parse_args()
device = "cuda:0" if torch.cuda.is_available() else "cpu"
if args.source_target_out_pairs is not None:
assert args.source_audio_path is None, "source_audio_path and source_target_out_pairs are mutually exclusive"
assert args.target_audio_path is None, "target_audio_path and source_target_out_pairs are mutually exclusive"
assert args.out_path is None, "out_path and source_target_out_pairs are mutually exclusive"
with open(args.source_target_out_pairs, "r") as f:
lines = f.readlines()
source_target_out_pairs = [line.strip().split(";") for line in lines]
else:
assert args.source_audio_path is not None, "source_audio_path is required"
assert args.target_audio_path is not None, "target_audio_path is required"
if args.out_path is None:
source_name = os.path.basename(args.source_audio_path).split(".")[0]
target_name = os.path.basename(args.target_audio_path).split(".")[0]
args.out_path = "swapped_{}_{}.wav".format(source_name, target_name)
source_target_out_pairs = [(args.source_audio_path, args.target_audio_path, args.out_path)]
out_paths = [r[2] for r in source_target_out_pairs]
out_dir = get_base_dir(out_paths)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
ssl_model = ssl_tts.SSLDisentangler.load_from_checkpoint(args.ssl_model_ckpt_path, strict=False)
ssl_model = ssl_model.to(device)
ssl_model.eval()
vocoder = hifigan.HifiGanModel.load_from_checkpoint(args.hifi_ckpt_path).to(device)
vocoder.eval()
fastpitch_model = fastpitch_ssl.FastPitchModel_SSL.load_from_checkpoint(args.fastpitch_ckpt_path, strict=False)
fastpitch_model = fastpitch_model.to(device)
fastpitch_model.eval()
fastpitch_model.non_trainable_models = {'vocoder': vocoder}
fpssl_sample_rate = fastpitch_model._cfg.sample_rate
wav_featurizer = WaveformFeaturizer(sample_rate=fpssl_sample_rate, int_values=False, augmentor=None)
use_unique_tokens = args.use_unique_tokens == 1
compute_pitch = args.compute_pitch == 1
compute_duration = args.compute_duration == 1
for source_target_out in source_target_out_pairs:
source_audio_path = source_target_out[0]
target_audio_paths = source_target_out[1].split(",")
out_path = source_target_out[2]
with torch.no_grad():
content_embedding1, _, duration1 = get_ssl_features_disentangled(
ssl_model,
wav_featurizer,
source_audio_path,
emb_type="embedding_and_probs",
use_unique_tokens=use_unique_tokens,
device=device,
)
speaker_embedding2 = get_speaker_embedding(
ssl_model, wav_featurizer, target_audio_paths, duration=None, device=device
)
pitch_contour1 = None
if not compute_pitch:
pitch_contour1 = get_pitch_contour(
load_wav(source_audio_path, wav_featurizer), compute_mean_std=True, sample_rate=fpssl_sample_rate
)[None]
pitch_contour1 = pitch_contour1.to(device)
wav_generated = fastpitch_model.generate_wav(
content_embedding1,
speaker_embedding2,
pitch_contour=pitch_contour1,
compute_pitch=compute_pitch,
compute_duration=compute_duration,
durs_gt=duration1,
dataset_id=0,
)
wav_generated = wav_generated[0][0]
soundfile.write(out_path, wav_generated, fpssl_sample_rate)
if __name__ == "__main__":
main()
| NeMo-main | scripts/ssl_tts/ssl_tts_vc.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python get_openslr_rir_data.py --data_root=<where to put data>
# Data is downloaded from OpenSLR's "Room Impulse Response and Noise Database"
# RIRs in multichannel files are separated into single channel files and
# a json file that can be used as in input to NeMo is created
import argparse
import glob
import json
import logging
import os
import subprocess
import urllib.request
from shutil import copy, move
from zipfile import ZipFile
from tqdm import tqdm
parser = argparse.ArgumentParser(description="OpenSLR RIR Data download and process")
parser.add_argument("--data_root", required=True, default=None, type=str)
args = parser.parse_args()
URLS = {
"SLR28": ("http://www.openslr.org/resources/28/rirs_noises.zip"),
}
def __retrieve_with_progress(source: str, filename: str):
"""
Downloads source to destination
Displays progress bar
Args:
source: url of resource
destination: local filepath
Returns:
"""
with open(filename, "wb") as f:
response = urllib.request.urlopen(source)
total = response.length
if total is None:
f.write(response.content)
else:
with tqdm(total=total, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response:
f.write(data)
pbar.update(len(data))
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URLS[source]
if not os.path.exists(destination):
logging.info("{0} does not exist. Downloading ...".format(destination))
__retrieve_with_progress(source, filename=destination + ".tmp")
os.rename(destination + ".tmp", destination)
logging.info("Downloaded {0}.".format(destination))
else:
logging.info("Destination {0} exists. Skipping.".format(destination))
return destination
def __extract_file(filepath: str, data_dir: str):
try:
with ZipFile(filepath, "r") as zipObj:
zipObj.extractall(data_dir)
except Exception:
logging.info("Not extracting. Maybe already there?")
def __process_data(data_folder: str, dst_folder: str, manifest_file: str):
"""
Converts flac to wav and build manifests's json
Args:
data_folder: source with flac files
dst_folder: where wav files will be stored
manifest_file: where to store manifest
Returns:
"""
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
real_rir_list = os.path.join(data_folder, "RIRS_NOISES", "real_rirs_isotropic_noises", "rir_list")
rirfiles = []
with open(real_rir_list, "r") as rir_f:
for line in rir_f:
rirfiles.append(os.path.join(data_folder, line.rstrip().split(" ")[4]))
real_rir_folder = os.path.join(dst_folder, "real_rirs")
if not os.path.exists(real_rir_folder):
os.makedirs(real_rir_folder)
# split multi-channel rir files to single channel
for rir_f in rirfiles:
n_chans = int(subprocess.check_output("soxi -c {0}".format(rir_f), shell=True))
if n_chans == 1:
copy(rir_f, real_rir_folder)
else:
for chan in range(1, n_chans + 1):
chan_file_name = os.path.join(
real_rir_folder, os.path.splitext(os.path.basename(rir_f))[0] + "-" + str(chan) + ".wav",
)
_ = subprocess.check_output(f"sox {rir_f} {chan_file_name} remix {chan}", shell=True)
# move simulated rirs to processed
if not os.path.exists(os.path.join(dst_folder, "simulated_rirs")):
move(os.path.join(data_folder, "RIRS_NOISES", "simulated_rirs"), dst_folder)
os.chdir(dst_folder)
all_rirs = glob.glob("**/*.wav", recursive=True)
with open(manifest_file, "w") as man_f:
entry = {}
for rir in all_rirs:
rir_file = os.path.join(dst_folder, rir)
duration = subprocess.check_output("soxi -D {0}".format(rir_file), shell=True)
entry["audio_filepath"] = rir_file
entry["duration"] = float(duration)
entry["offset"] = 0
entry["text"] = "_"
man_f.write(json.dumps(entry) + "\n")
print("Done!")
def main():
data_root = os.path.abspath(args.data_root)
data_set = "slr28"
logging.getLogger().setLevel(logging.INFO)
logging.info("\n\nWorking on: {0}".format(data_set))
filepath = os.path.join(data_root, data_set + ".zip")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(filepath, data_set.upper())
logging.info("Extracting {0}".format(data_set))
__extract_file(filepath, data_root)
logging.info("Processing {0}".format(data_set))
__process_data(
data_root,
os.path.join(os.path.join(data_root, "processed")),
os.path.join(os.path.join(data_root, "processed", "rir.json")),
)
logging.info("Done!")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/get_openslr_rir_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python get_aishell_data.py --data_root=<where to put data>
import argparse
import json
import logging
import os
import subprocess
import tarfile
import urllib.request
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Aishell Data download")
parser.add_argument("--data_root", required=True, default=None, type=str)
args = parser.parse_args()
URL = {"data_aishell": "http://www.openslr.org/resources/33/data_aishell.tgz"}
def __retrieve_with_progress(source: str, filename: str):
"""
Downloads source to destination
Displays progress bar
Args:
source: url of resource
destination: local filepath
Returns:
"""
with open(filename, "wb") as f:
response = urllib.request.urlopen(source)
total = response.length
if total is None:
f.write(response.content)
else:
with tqdm(total=total, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response:
f.write(data)
pbar.update(len(data))
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URL[source]
if not os.path.exists(destination):
logging.info("{0} does not exist. Downloading ...".format(destination))
__retrieve_with_progress(source, filename=destination + ".tmp")
os.rename(destination + ".tmp", destination)
logging.info("Downloaded {0}.".format(destination))
else:
logging.info("Destination {0} exists. Skipping.".format(destination))
return destination
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_root)
audio_dir = os.path.join(data_dir, "wav")
for subfolder, _, filelist in os.walk(audio_dir):
for ftar in filelist:
extract_file(os.path.join(subfolder, ftar), subfolder)
else:
logging.info("Skipping extracting. Data already there %s" % data_dir)
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info("Not extracting. Maybe already there?")
def __process_data(data_folder: str, dst_folder: str):
"""
To generate manifest
Args:
data_folder: source with wav files
dst_folder: where manifest files will be stored
Returns:
"""
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
transcript_file = os.path.join(data_folder, "transcript", "aishell_transcript_v0.8.txt")
transcript_dict = {}
with open(transcript_file, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
audio_id, text = line.split(" ", 1)
# remove white space
text = text.replace(" ", "")
transcript_dict[audio_id] = text
data_types = ["train", "dev", "test"]
vocab_count = {}
for dt in data_types:
json_lines = []
audio_dir = os.path.join(data_folder, "wav", dt)
for sub_folder, _, file_list in os.walk(audio_dir):
for fname in file_list:
audio_path = os.path.join(sub_folder, fname)
audio_id = fname.strip(".wav")
if audio_id not in transcript_dict:
continue
text = transcript_dict[audio_id]
for li in text:
vocab_count[li] = vocab_count.get(li, 0) + 1
duration = subprocess.check_output("soxi -D {0}".format(audio_path), shell=True)
duration = float(duration)
json_lines.append(
json.dumps(
{"audio_filepath": os.path.abspath(audio_path), "duration": duration, "text": text,},
ensure_ascii=False,
)
)
manifest_path = os.path.join(dst_folder, dt + ".json")
with open(manifest_path, "w", encoding="utf-8") as fout:
for line in json_lines:
fout.write(line + "\n")
vocab = sorted(vocab_count.items(), key=lambda k: k[1], reverse=True)
vocab_file = os.path.join(dst_folder, "vocab.txt")
with open(vocab_file, "w", encoding="utf-8") as f:
for v, c in vocab:
f.write(v + "\n")
def main():
data_root = args.data_root
data_set = "data_aishell"
logging.info("\n\nWorking on: {0}".format(data_set))
file_path = os.path.join(data_root, data_set + ".tgz")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(file_path, data_set)
logging.info("Extracting {0}".format(data_set))
data_folder = os.path.join(data_root, data_set)
__extract_all_files(file_path, data_root, data_folder)
logging.info("Processing {0}".format(data_set))
__process_data(data_folder, data_folder)
logging.info("Done!")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/get_aishell_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE:
# python fisher_audio_to_wav.py \
# --data_root=<FisherEnglishTrainingSpeech root> \
# --dest_root=<destination dir root>
#
# Converts all .sph audio files in the Fisher dataset to .wav.
# Requires sph2pipe to be installed.
import argparse
import concurrent.futures
import glob
import logging
import os
import subprocess
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Convert Fisher .sph to .wav')
parser.add_argument(
"--data_root", default=None, type=str, required=True, help="The path to the root Fisher dataset folder.",
)
parser.add_argument(
"--dest_root", default=None, type=str, required=True, help="Path to the destination root directory.",
)
args = parser.parse_args()
def __convert_audio(in_path, out_path):
"""
Helper function that's called per thread, converts sph to wav.
Args:
in_path: source sph file to convert
out_path: destination for wav file
"""
cmd = ["sph2pipe", "-f", "wav", "-p", in_path, out_path]
subprocess.run(cmd)
def __process_set(data_root, dst_root):
"""
Finds and converts all sph audio files in the given directory to wav.
Args:
data_folder: source directory with sph files to convert
dst_root: where wav files will be stored
"""
sph_list = glob.glob(data_root)
if not os.path.exists(dst_root):
os.makedirs(dst_root)
# Set up and execute concurrent audio conversion
tp = concurrent.futures.ProcessPoolExecutor(max_workers=64)
futures = []
for sph_path in tqdm(sph_list, desc="Submitting sph futures", unit="file"):
audio_id, _ = os.path.splitext(os.path.basename(sph_path))
out_path = os.path.join(dst_root, "{}.wav".format(audio_id))
futures.append(tp.submit(__convert_audio, sph_path, out_path))
pbar = tqdm(total=len(sph_list), desc="Converting sph files", unit="file")
count = 0
for f in concurrent.futures.as_completed(futures):
count += 1
pbar.update()
tp.shutdown()
pbar.close()
def main():
data_root = args.data_root
dest_root = args.dest_root
logging.info("\n\nConverting audio for Part 1")
__process_set(
os.path.join(data_root, "LDC2004S13-Part1", "fisher_eng_tr_sp_d*", "audio", "*", "*.sph",),
os.path.join(dest_root, "LDC2004S13-Part1", "audio_wav"),
)
logging.info("\n\nConverting audio for Part 2")
__process_set(
os.path.join(data_root, "LDC2005S13-Part2", "fe_03_p2_sph*", "audio", "*", "*.sph",),
os.path.join(dest_root, "LDC2005S13-Part2", "audio_wav"),
)
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/fisher_audio_to_wav.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python process_aishell2_data.py
# --audio_folder=<source data>
# --dest_folder=<where to store the results>
import argparse
import json
import os
import subprocess
parser = argparse.ArgumentParser(description="Processing Aishell2 Data")
parser.add_argument("--audio_folder", default=None, type=str, required=True, help="Audio (wav) data directory.")
parser.add_argument("--dest_folder", default=None, type=str, required=True, help="Destination directory.")
args = parser.parse_args()
def __process_data(data_folder: str, dst_folder: str):
"""
To generate manifest
Args:
data_folder: source with wav files
dst_folder: where manifest files will be stored
Returns:
"""
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
data_type = ['dev', 'test', 'train']
for data in data_type:
dst_file = os.path.join(dst_folder, data + ".json")
uttrances = []
wav_dir = os.path.join(data_folder, "wav", data)
transcript_file = os.path.join(data_folder, "transcript", data, "trans.txt")
trans_text = {}
with open(transcript_file, "r", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
utterance_id, text = line[0], " ".join(line[1:])
trans_text[utterance_id] = text.upper()
session_list = os.listdir(wav_dir)
for sessions in session_list:
cur_dir = os.path.join(wav_dir, sessions)
for wavs in os.listdir(cur_dir):
audio_id = wavs.strip(".wav")
audio_filepath = os.path.abspath(os.path.join(cur_dir, wavs))
duration = subprocess.check_output('soxi -D {0}'.format(audio_filepath), shell=True)
duration = float(duration)
text = trans_text[audio_id]
uttrances.append(
json.dumps(
{"audio_filepath": audio_filepath, "duration": duration, "text": text}, ensure_ascii=False
)
)
with open(dst_file, "w") as f:
for line in uttrances:
f.write(line + "\n")
def __get_vocab(data_folder: str, des_dir: str):
"""
To generate the vocabulary file
Args:
data_folder: source with the transcript file
dst_folder: where the file will be stored
Returns:
"""
if not os.path.exists(des_dir):
os.makedirs(des_dir)
trans_file = os.path.join(data_folder, "transcript", "train", "trans.txt")
vocab_dict = {}
with open(trans_file, "r", encoding='utf-8') as f:
for line in f:
line = line.strip().split()
text = " ".join(line[1:])
for i in text.upper():
if i in vocab_dict:
vocab_dict[i] += 1
else:
vocab_dict[i] = 1
vocab_dict = sorted(vocab_dict.items(), key=lambda k: k[1], reverse=True)
vocab = os.path.join(des_dir, "vocab.txt")
vocab = open(vocab, "w", encoding='utf-8')
for k in vocab_dict:
vocab.write(k[0] + "\n")
vocab.close()
def main():
source_data = args.audio_folder
des_dir = args.dest_folder
print("begin to process data...")
__process_data(source_data, des_dir)
__get_vocab(source_data, des_dir)
print("finish all!")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/process_aishell2_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE:
# python process_fisher_data.py \
# --audio_root=<audio (.wav) directory>
# --transcript_root=<LDC Fisher dataset directory> \
# --dest_root=<destination directory> \
# --data_sets=LDC2004S13-Part1,LDC2005S13-Part2 \
# --remove_noises
#
# Matches Fisher dataset transcripts to the corresponding audio file (.wav),
# and slices them into min_slice_duration segments with one speaker.
# Also performs some other processing on transcripts.
#
# Heavily derived from Patter's Fisher processing script.
import argparse
import glob
import json
import os
import re
from math import ceil, floor
import numpy as np
import scipy.io.wavfile as wavfile
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Fisher Data Processing")
parser.add_argument(
"--audio_root", default=None, type=str, required=True, help="The path to the root of the audio (wav) data folder.",
)
parser.add_argument(
"--transcript_root",
default=None,
type=str,
required=True,
help="The path to the root of the transcript data folder.",
)
parser.add_argument(
"--dest_root", default=None, type=str, required=True, help="Path to the destination root directory.",
)
# Optional arguments
parser.add_argument(
"--min_slice_duration", default=10.0, type=float, help="Minimum audio slice duration after processing.",
)
parser.add_argument(
"--keep_low_conf", action="store_true", help="Keep all utterances with low confidence transcripts",
)
parser.add_argument(
"--remove_noises", action="store_true", help="Removes transcripted noises such as [laughter].",
)
parser.add_argument(
"--noises_to_emoji", action="store_true", help="Converts transcripts for noises to an emoji character.",
)
args = parser.parse_args()
# Total number of files before segmenting, and train/val/test splits
NUM_FILES = 5850 + 5849
TRAIN_END_IDX = int(NUM_FILES * 0.8)
VAL_END_IDX = int(NUM_FILES * 0.9)
# Known transcription errors and their fixes (from Mozilla)
TRANSCRIPT_BUGS = {
"fe_03_00265-B-3353-3381": "correct",
"fe_03_00991-B-52739-52829": "that's one of those",
"fe_03_10282-A-34442-34484.wav": "they don't want",
"fe_03_10677-B-10104-10641": "uh my mine yeah the german shepherd "
+ "pitbull mix he snores almost as loud "
+ "as i do",
"fe_03_00027-B-39380-39405": None,
"fe_03_11487-B-3109-23406": None,
"fe_03_01326-A-30742-30793": None,
}
TRANSCRIPT_NUMBERS = {
"401k": "four o one k",
"f16": "f sixteen",
"m16": "m sixteen",
"ak47": "a k forty seven",
"v8": "v eight",
"y2k": "y two k",
"mp3": "m p three",
"vh1": "v h one",
"90210": "nine o two one o",
"espn2": "e s p n two",
"u2": "u two",
"dc3s": "d c threes",
"book 2": "book two",
"s2b": "s two b",
"3d": "three d",
}
TAG_MAP = {
"[laughter]": "🤣",
"[laugh]": "🤣",
"[noise]": "😕",
"[sigh]": "😕",
"[cough]": "😕",
"[mn]": "😕",
"[breath]": "😕",
"[lipsmack]": "😕",
"[[skip]]": "",
"[pause]": "",
"[sneeze]": "😕",
}
def __write_sample(dest, file_id, count, file_count, sample_rate, audio, duration, transcript):
"""
Writes one slice to the given target directory.
Args:
dest: the destination directory
file_id: name of the transcript/audio file for this block
count: the count of segments in the file so far
file_count: the total number of filse processed so far
sample rate: sample rate of the audio data
audio: audio data of the current sample
duration: audio duration of the current sample
transcript: transcript of the current sample
"""
partition = __partition_name(file_count)
audio_path = os.path.join(dest, partition, f"{file_id}_{count:03}.wav")
# Write audio
wavfile.write(audio_path, sample_rate, audio)
# Write transcript info
transcript = {
"audio_filepath": audio_path,
"duration": duration,
"text": transcript,
}
# Append to manifest
manifest_path = os.path.join(dest, f"manifest_{partition}.json")
with open(manifest_path, 'a') as f:
json.dump(transcript, f)
f.write('\n')
def __normalize(utt):
replace_table = str.maketrans(dict.fromkeys('()*;:"!&{},.-?'))
utt = (
utt.lower()
.replace('[uh]', 'uh')
.replace('[um]', 'um')
.replace('<noise>', '[noise]')
.replace('<spoken_noise>', '[vocalized-noise]')
.replace('.period', 'period')
.replace('.dot', 'dot')
.replace('-hyphen', 'hyphen')
.replace('._', ' ')
.translate(replace_table)
)
utt = re.sub(r"'([a-z]+)'", r'\1', utt) # Unquote quoted words
return utt
def __process_utterance(file_id, trans_path, line, keep_low_conf, rem_noises, emojify):
"""
Processes one utterance (one line of a transcript).
Args:
file_id: the ID of the transcript file
trans_path: transcript path
line: one line in the transcript file
keep_low_conf: whether to keep low confidence lines
rem_noises: whether to remove noise symbols
emojify: whether to convert noise symbols to emoji, lower precedence
"""
# Check for lines to skip (comments, empty, low confidence)
if line.startswith('#') or not line.strip() or (not keep_low_conf and '((' in line):
return None, None, None, None
# Data and sanity checks
line = line.split()
t_start, t_end = float(line[0]), float(line[1])
if (t_start < 0) or (t_end < t_start):
print(f"Invalid time: {t_start} to {t_end} in {trans_path}")
return None, None, None, None
channel = line[2]
idx = 0 if line[2] == 'A:' else 1
if channel not in ('A:', 'B:'):
print(f"Could not read channel info ({channel}) in {trans_path}")
return None, None, None, None
# Replacements as necessary
line_id = '-'.join([file_id, channel[0], str(t_start * 10), str(t_end * 10)])
content = TRANSCRIPT_BUGS.get(line_id, ' '.join(line[3:]))
if content is None:
return None, None, None, None
for tag, newtag in TRANSCRIPT_NUMBERS.items():
content = content.replace(tag, newtag)
content = __normalize(content)
if rem_noises:
for k, _ in TAG_MAP.items():
content = content.replace(k, '')
elif emojify:
for k, v in TAG_MAP.items():
content = content.replace(k, v)
return t_start, t_end, idx, content
def __process_one_file(
trans_path,
sample_rate,
audio_data,
file_id,
dst_root,
min_slice_duration,
file_count,
keep_low_conf,
rem_noises,
emojify,
):
"""
Creates one block of audio slices and their corresponding transcripts.
Args:
trans_path: filepath to transcript
sample_rate: sample rate of the audio
audio_data: numpy array of shape [samples, channels]
file_id: identifying label, e.g. 'fe_03_01102'
dst_root: path to destination directory
min_slice_duration: min number of seconds for an audio slice
file_count: total number of files processed so far
keep_low_conf: keep utterances with low-confidence transcripts
rem_noises: remove noise symbols
emojify: convert noise symbols into emoji characters
"""
count = 0
with open(trans_path, encoding="utf-8") as fin:
fin.readline() # Comment w/ corresponding sph filename
fin.readline() # Comment about transcriber
transcript_buffers = ['', ''] # [A buffer, B buffer]
audio_buffers = [[], []]
buffer_durations = [0.0, 0.0]
for line in fin:
t_start, t_end, idx, content = __process_utterance(
file_id, trans_path, line, keep_low_conf, rem_noises, emojify
)
if content is None or not content:
continue
duration = t_end - t_start
# Append utterance to buffer
transcript_buffers[idx] += content
audio_buffers[idx].append(
audio_data[floor(t_start * sample_rate) : ceil(t_end * sample_rate), idx,]
)
buffer_durations[idx] += duration
if buffer_durations[idx] < min_slice_duration:
transcript_buffers[idx] += ' '
else:
# Write out segment and transcript
count += 1
__write_sample(
dst_root,
file_id,
count,
file_count,
sample_rate,
np.concatenate(audio_buffers[idx], axis=0),
buffer_durations[idx],
transcript_buffers[idx],
)
# Clear buffers
transcript_buffers[idx] = ''
audio_buffers[idx] = []
buffer_durations[idx] = 0.0
# Note: We drop any shorter "scraps" at the end of the file, if
# they end up shorter than min_slice_duration.
def __partition_name(file_count):
if file_count >= VAL_END_IDX:
return "test"
elif file_count >= TRAIN_END_IDX:
return "val"
else:
return "train"
def __process_data(
audio_root, transcript_root, dst_root, min_slice_duration, file_count, keep_low_conf, rem_noises, emojify,
):
"""
Converts Fisher wav files to numpy arrays, segments audio and transcripts.
Args:
audio_root: source directory with the wav files
transcript_root: source directory with the transcript files
(can be the same as audio_root)
dst_root: where the processed and segmented files will be stored
min_slice_duration: minimum number of seconds for a slice of output
file_count: total number of files processed so far
keep_low_conf: whether or not to keep low confidence transcriptions
rem_noises: whether to remove noise symbols
emojify: whether to convert noise symbols to emoji, lower precedence
Assumes:
1. There is exactly one transcripts directory in data_folder
2. Audio files are all: <audio_root>/audio-wav/fe_03_xxxxx.wav
"""
transcript_list = glob.glob(os.path.join(transcript_root, "fe_03_p*_tran*", "data", "trans", "*", "*.txt"))
print("Found {} transcripts.".format(len(transcript_list)))
count = file_count
# Grab audio file associated with each transcript, and slice
for trans_path in tqdm(transcript_list, desc="Matching and segmenting"):
file_id, _ = os.path.splitext(os.path.basename(trans_path))
audio_path = os.path.join(audio_root, "audio_wav", file_id + ".wav")
sample_rate, audio_data = wavfile.read(audio_path)
# Create a set of segments (a block) for each file
__process_one_file(
trans_path,
sample_rate,
audio_data,
file_id,
dst_root,
min_slice_duration,
count,
keep_low_conf,
rem_noises,
emojify,
)
count += 1
return count
def main():
# Arguments to the script
audio_root = args.audio_root
transcript_root = args.transcript_root
dest_root = args.dest_root
min_slice_duration = args.min_slice_duration
keep_low_conf = args.keep_low_conf
rem_noises = args.remove_noises
emojify = args.noises_to_emoji
print(f"Expected number of files to segment: {NUM_FILES}")
print("With a 80/10/10 split:")
print(f"Number of training files: {TRAIN_END_IDX}")
print(f"Number of validation files: {VAL_END_IDX - TRAIN_END_IDX}")
print(f"Number of test files: {NUM_FILES - VAL_END_IDX}")
if not os.path.exists(os.path.join(dest_root, 'train/')):
os.makedirs(os.path.join(dest_root, 'train/'))
os.makedirs(os.path.join(dest_root, 'val/'))
os.makedirs(os.path.join(dest_root, 'test/'))
else:
# Wipe manifest contents first
open(os.path.join(dest_root, "manifest_train.json"), 'w').close()
open(os.path.join(dest_root, "manifest_val.json"), 'w').close()
open(os.path.join(dest_root, "manifest_test.json"), 'w').close()
file_count = 0
for data_set in ['LDC2004S13-Part1', 'LDC2005S13-Part2']:
print(f"\n\nWorking on dataset: {data_set}")
file_count = __process_data(
os.path.join(audio_root, data_set),
os.path.join(transcript_root, data_set),
dest_root,
min_slice_duration,
file_count,
keep_low_conf,
rem_noises,
emojify,
)
print(f"Total file count so far: {file_count}")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/process_fisher_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import pandas as pd
from nemo.utils import logging
def main():
parser = argparse.ArgumentParser(description="Convert kaldi data folder to manifest.json")
parser.add_argument(
"--data_dir", required=True, type=str, help="data in kaldi format",
)
parser.add_argument(
"--manifest", required=True, type=str, help="path to store the manifest file",
)
parser.add_argument(
"--with_aux_data",
default=False,
action="store_true",
help="whether to include auxiliary data in the manifest",
)
args = parser.parse_args()
kaldi_folder = args.data_dir
required_data = {
"audio_filepath": os.path.join(kaldi_folder, "wav.scp"),
"duration": os.path.join(kaldi_folder, "segments"),
"text": os.path.join(kaldi_folder, "text"),
}
aux_data = {
"speaker": os.path.join(kaldi_folder, "utt2spk"),
"gender": os.path.join(kaldi_folder, "utt2gender"),
}
output_names = list(required_data.keys())
# check if required files exist
for name, file in required_data.items():
if not os.path.exists(file):
raise ValueError(f"{os.path.basename(file)} is not in {kaldi_folder}.")
# read wav.scp
wavscp = pd.read_csv(required_data["audio_filepath"], sep=" ", header=None)
if wavscp.shape[1] > 2:
logging.warning(
f"""More than two columns in 'wav.scp': {wavscp.shape[1]}.
Maybe it contains pipes? Pipe processing can be slow at runtime."""
)
wavscp = pd.read_csv(
required_data["audio_filepath"],
sep="^([^ ]+) ",
engine="python",
header=None,
usecols=[1, 2],
names=["wav_label", "audio_filepath"],
)
else:
wavscp = wavscp.rename(columns={0: "wav_label", 1: "audio_filepath"})
# read text
text = pd.read_csv(
required_data["text"], sep="^([^ ]+) ", engine="python", header=None, usecols=[1, 2], names=["label", "text"],
)
# read segments
segments = pd.read_csv(
required_data["duration"], sep=" ", header=None, names=["label", "wav_label", "offset", "end"],
)
# add offset if needed
if len(segments.offset) > len(segments.offset[segments.offset == 0.0]):
logging.info("Adding offset field.")
output_names.insert(2, "offset")
segments["duration"] = (segments.end - segments.offset).round(decimals=3)
# merge data
wav_segments_text = pd.merge(
pd.merge(segments, wavscp, how="inner", on="wav_label"), text, how="inner", on="label",
)
if args.with_aux_data:
# check if auxiliary data is present
for name, aux_file in aux_data.items():
if os.path.exists(aux_file):
logging.info(f"Adding info from '{os.path.basename(aux_file)}'.")
wav_segments_text = pd.merge(
wav_segments_text,
pd.read_csv(aux_file, sep=" ", header=None, names=["label", name]),
how="left",
on="label",
)
output_names.append(name)
else:
logging.info(f"'{os.path.basename(aux_file)}' does not exist. Skipping ...")
# write data to .json
entries = wav_segments_text[output_names].to_dict(orient="records")
with open(args.manifest, "w", encoding="utf-8") as fout:
for m in entries:
fout.write(json.dumps(m, ensure_ascii=False) + "\n")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/kaldi2json.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python get_demand_data.py --data_root=<where to put data>
# --data_set=<datasets_to_download>
# where <datasets_to_download> can be: one or more of the 16 kHz noise profiles
# listed at https://zenodo.org/record/1227121#.Ygb4avXMKJk ,
# or ALL
# You can put more than one data_set comma-separated:
# --data_sets=DKITCHEN,DLIVING,NRIVER
import argparse
import glob
import json
import logging
import os
import shutil
import subprocess
import urllib.request
parser = argparse.ArgumentParser(description='LibriSpeech Data download')
parser.add_argument("--data_root", required=True, default=None, type=str)
parser.add_argument("--data_sets", default="ALL", type=str)
parser.add_argument('--log', dest='log', action='store_true', default=False)
args = parser.parse_args()
URLS = {
'DKITCHEN': ("https://zenodo.org/record/1227121/files/DKITCHEN_16k.zip"),
'DLIVING': ("https://zenodo.org/record/1227121/files/DLIVING_16k.zip"),
'DWASHING': ("https://zenodo.org/record/1227121/files/DWASHING_16k.zip"),
'NFIELD': ("https://zenodo.org/record/1227121/files/NFIELD_16k.zip"),
'NPARK': ("https://zenodo.org/record/1227121/files/NPARK_16k.zip"),
'NRIVER': ("https://zenodo.org/record/1227121/files/NRIVER_16k.zip"),
'OHALLWAY': ("https://zenodo.org/record/1227121/files/OHALLWAY_16k.zip"),
'OMEETING': ("https://zenodo.org/record/1227121/files/OMEETING_16k.zip"),
'OOFFICE': ("https://zenodo.org/record/1227121/files/OOFFICE_16k.zip"),
'PCAFETER': ("https://zenodo.org/record/1227121/files/PCAFETER_16k.zip"),
'PRESTO': ("https://zenodo.org/record/1227121/files/PRESTO_16k.zip"),
'PSTATION': ("https://zenodo.org/record/1227121/files/PSTATION_16k.zip"),
'SPSQUARE': ("https://zenodo.org/record/1227121/files/SPSQUARE_16k.zip"),
'STRAFFIC': ("https://zenodo.org/record/1227121/files/STRAFFIC_16k.zip"),
'TBUS': ("https://zenodo.org/record/1227121/files/TBUS_16k.zip"),
'TCAR': ("https://zenodo.org/record/1227121/files/TCAR_16k.zip"),
'TMETRO': ("https://zenodo.org/record/1227121/files/TMETRO_16k.zip"),
}
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URLS[source]
if not os.path.exists(destination):
logging.info("{0} does not exist. Downloading ...".format(destination))
urllib.request.urlretrieve(source, filename=destination + '.tmp')
os.rename(destination + '.tmp', destination)
logging.info("Downloaded {0}.".format(destination))
else:
logging.info("Destination {0} exists. Skipping.".format(destination))
return destination
def __extract_file(filepath: str, data_dir: str):
shutil.unpack_archive(filepath, data_dir)
def __create_manifest(dst_folder: str):
"""
Create manifests for the noise files
Args:
file_path: path to a source transcript with flac sources
dst_folder: path where manifests will be created
Returns:
a list of metadata entries for processed files.
"""
# Read directory
# Get all wav file names
# create line per wav file in manifest
noise_name = os.path.basename(dst_folder)
wav_files = glob.glob(dst_folder + "/*.wav")
wav_files.sort()
os.makedirs(os.path.join(os.path.dirname(dst_folder), "manifests"), exist_ok=True)
with open(os.path.join(os.path.dirname(dst_folder), "manifests", noise_name + ".json"), "w") as mfst_f:
for wav_f in wav_files:
dur = subprocess.check_output("soxi -D {0}".format(wav_f), shell=True)
row = {"audio_filepath": wav_f, "text": "", "duration": float(dur)}
mfst_f.write(json.dumps(row) + "\n")
def main():
data_root = args.data_root
data_sets = args.data_sets
if args.log:
print("here")
logging.basicConfig(level=logging.INFO)
if not os.path.exists(data_root):
os.makedirs(data_root)
if data_sets == "ALL":
data_sets = URLS.keys()
else:
data_sets = data_sets.split(',')
for data_set in data_sets:
if data_set not in URLS.keys():
raise ValueError(f"{data_sets} is not part of demand noise database")
logging.info("\n\nWorking on: {0}".format(data_set))
filepath = os.path.join(data_root, data_set + "_16k.zip")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(filepath, data_set.upper())
logging.info("Extracting {0}".format(data_set))
__extract_file(filepath, data_root)
logging.info("Processing {0}".format(data_set))
__create_manifest(os.path.join(data_root, data_set))
logging.info('Done!')
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/get_demand_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2020, SeanNaren. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# To convert mp3 files to wav using sox, you must have installed sox with mp3 support
# For example sudo apt-get install libsox-fmt-mp3
import argparse
import csv
import json
import logging
import multiprocessing
import os
import subprocess
import sys
import tarfile
from multiprocessing.pool import ThreadPool
from pathlib import Path
from typing import List
import sox
from sox import Transformer
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Downloads and processes Mozilla Common Voice dataset.')
parser.add_argument("--data_root", default='CommonVoice_dataset/', type=str, help="Directory to store the dataset.")
parser.add_argument('--manifest_dir', default='./', type=str, help='Output directory for manifests')
parser.add_argument("--num_workers", default=multiprocessing.cpu_count(), type=int, help="Workers to process dataset.")
parser.add_argument('--sample_rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--n_channels', default=1, type=int, help='Number of channels for output wav files')
parser.add_argument("--log", dest="log", action="store_true", default=False)
parser.add_argument("--cleanup", dest="cleanup", action="store_true", default=False)
parser.add_argument(
'--files_to_process',
nargs='+',
default=['test.tsv', 'dev.tsv', 'train.tsv'],
type=str,
help='list of *.csv file names to process',
)
parser.add_argument(
'--version',
default='cv-corpus-5.1-2020-06-22',
type=str,
help='Version of the dataset (obtainable via https://commonvoice.mozilla.org/en/datasets',
)
parser.add_argument(
'--language',
default='en',
type=str,
help='Which language to download.(default english,'
'check https://commonvoice.mozilla.org/en/datasets for more language codes',
)
args = parser.parse_args()
COMMON_VOICE_URL = (
f"https://voice-prod-bundler-ee1969a6ce8178826482b88e843c335139bd3fb4.s3.amazonaws.com/"
"{}/{}.tar.gz".format(args.version, args.language)
)
def create_manifest(data: List[tuple], output_name: str, manifest_path: str):
output_file = Path(manifest_path) / output_name
output_file.parent.mkdir(exist_ok=True, parents=True)
with output_file.open(mode='w') as f:
for wav_path, duration, text in tqdm(data, total=len(data)):
if wav_path != '':
# skip invalid input files that could not be converted
f.write(
json.dumps({'audio_filepath': os.path.abspath(wav_path), "duration": duration, 'text': text})
+ '\n'
)
def process_files(csv_file, data_root, num_workers):
""" Read *.csv file description, convert mp3 to wav, process text.
Save results to data_root.
Args:
csv_file: str, path to *.csv file with data description, usually start from 'cv-'
data_root: str, path to dir to save results; wav/ dir will be created
"""
wav_dir = os.path.join(data_root, 'wav/')
os.makedirs(wav_dir, exist_ok=True)
audio_clips_path = os.path.dirname(csv_file) + '/clips/'
def process(x):
file_path, text = x
file_name = os.path.splitext(os.path.basename(file_path))[0]
text = text.lower().strip()
audio_path = os.path.join(audio_clips_path, file_path)
if os.path.getsize(audio_path) == 0:
logging.warning(f'Skipping empty audio file {audio_path}')
return '', '', ''
output_wav_path = os.path.join(wav_dir, file_name + '.wav')
if not os.path.exists(output_wav_path):
tfm = Transformer()
tfm.rate(samplerate=args.sample_rate)
tfm.channels(n_channels=args.n_channels)
tfm.build(input_filepath=audio_path, output_filepath=output_wav_path)
duration = sox.file_info.duration(output_wav_path)
return output_wav_path, duration, text
logging.info('Converting mp3 to wav for {}.'.format(csv_file))
with open(csv_file) as csvfile:
reader = csv.DictReader(csvfile, delimiter='\t')
next(reader, None) # skip the headers
data = []
for row in reader:
file_name = row['path']
# add the mp3 extension if the tsv entry does not have it
if not file_name.endswith('.mp3'):
file_name += '.mp3'
data.append((file_name, row['sentence']))
with ThreadPool(num_workers) as pool:
data = list(tqdm(pool.imap(process, data), total=len(data)))
return data
def main():
if args.log:
logging.basicConfig(level=logging.INFO)
data_root = args.data_root
os.makedirs(data_root, exist_ok=True)
target_unpacked_dir = os.path.join(data_root, "CV_unpacked")
if os.path.exists(target_unpacked_dir):
logging.info('Find existing folder {}'.format(target_unpacked_dir))
else:
logging.info("Could not find Common Voice, Downloading corpus...")
# some dataset versions are packaged in different named files, so forcing
output_archive_filename = args.language + '.tar.gz'
output_archive_filename = os.path.join(data_root, output_archive_filename)
commands = [
'wget',
'--user-agent',
'"Mozilla/5.0 (Windows NT 10.0; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"',
'-O',
output_archive_filename,
f'{COMMON_VOICE_URL}',
]
commands = " ".join(commands)
subprocess.run(commands, shell=True, stderr=sys.stderr, stdout=sys.stdout, capture_output=False)
filename = f"{args.language}.tar.gz"
target_file = os.path.join(data_root, os.path.basename(filename))
os.makedirs(target_unpacked_dir, exist_ok=True)
logging.info("Unpacking corpus to {} ...".format(target_unpacked_dir))
tar = tarfile.open(target_file)
tar.extractall(target_unpacked_dir)
tar.close()
if args.cleanup:
logging.info("removing tar archive to save space")
os.remove(target_file)
folder_path = os.path.join(target_unpacked_dir, args.version + f'/{args.language}/')
if not os.path.isdir(folder_path):
# try without language
folder_path = os.path.join(target_unpacked_dir, args.version)
if not os.path.isdir(folder_path):
# try without version
folder_path = target_unpacked_dir
if not os.path.isdir(folder_path):
logging.error(f'unable to locate unpacked files in {folder_path}')
sys.exit()
for csv_file in args.files_to_process:
data = process_files(
csv_file=os.path.join(folder_path, csv_file),
data_root=os.path.join(data_root, os.path.splitext(csv_file)[0]),
num_workers=args.num_workers,
)
logging.info('Creating manifests...')
create_manifest(
data=data,
output_name=f'commonvoice_{os.path.splitext(csv_file)[0]}_manifest.json',
manifest_path=args.manifest_dir,
)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/get_commonvoice_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# USAGE: python get_librispeech_data.py --data_root=<where to put data>
# --data_set=<datasets_to_download> --num_workers=<number of parallel workers>
# where <datasets_to_download> can be: dev_clean, dev_other, test_clean,
# test_other, train_clean_100, train_clean_360, train_other_500 or ALL
# You can also put more than one data_set comma-separated:
# --data_set=dev_clean,train_clean_100
import argparse
import fnmatch
import functools
import json
import logging
import multiprocessing
import os
import subprocess
import tarfile
import urllib.request
from sox import Transformer
from tqdm import tqdm
parser = argparse.ArgumentParser(description="LibriSpeech Data download")
parser.add_argument("--data_root", required=True, default=None, type=str)
parser.add_argument("--data_sets", default="dev_clean", type=str)
parser.add_argument("--num_workers", default=4, type=int)
parser.add_argument("--log", dest="log", action="store_true", default=False)
args = parser.parse_args()
URLS = {
"TRAIN_CLEAN_100": ("http://www.openslr.org/resources/12/train-clean-100.tar.gz"),
"TRAIN_CLEAN_360": ("http://www.openslr.org/resources/12/train-clean-360.tar.gz"),
"TRAIN_OTHER_500": ("http://www.openslr.org/resources/12/train-other-500.tar.gz"),
"DEV_CLEAN": "http://www.openslr.org/resources/12/dev-clean.tar.gz",
"DEV_OTHER": "http://www.openslr.org/resources/12/dev-other.tar.gz",
"TEST_CLEAN": "http://www.openslr.org/resources/12/test-clean.tar.gz",
"TEST_OTHER": "http://www.openslr.org/resources/12/test-other.tar.gz",
"DEV_CLEAN_2": "https://www.openslr.org/resources/31/dev-clean-2.tar.gz",
"TRAIN_CLEAN_5": "https://www.openslr.org/resources/31/train-clean-5.tar.gz",
}
def __retrieve_with_progress(source: str, filename: str):
"""
Downloads source to destination
Displays progress bar
Args:
source: url of resource
destination: local filepath
Returns:
"""
with open(filename, "wb") as f:
response = urllib.request.urlopen(source)
total = response.length
if total is None:
f.write(response.content)
else:
with tqdm(total=total, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response:
f.write(data)
pbar.update(len(data))
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URLS[source]
if not os.path.exists(destination):
logging.info("{0} does not exist. Downloading ...".format(destination))
__retrieve_with_progress(source, filename=destination + ".tmp")
os.rename(destination + ".tmp", destination)
logging.info("Downloaded {0}.".format(destination))
else:
logging.info("Destination {0} exists. Skipping.".format(destination))
return destination
def __extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info("Not extracting. Maybe already there?")
def __process_transcript(file_path: str, dst_folder: str):
"""
Converts flac files to wav from a given transcript, capturing the metadata.
Args:
file_path: path to a source transcript with flac sources
dst_folder: path where wav files will be stored
Returns:
a list of metadata entries for processed files.
"""
entries = []
root = os.path.dirname(file_path)
with open(file_path, encoding="utf-8") as fin:
for line in fin:
id, text = line[: line.index(" ")], line[line.index(" ") + 1 :]
transcript_text = text.lower().strip()
# Convert FLAC file to WAV
flac_file = os.path.join(root, id + ".flac")
wav_file = os.path.join(dst_folder, id + ".wav")
if not os.path.exists(wav_file):
Transformer().build(flac_file, wav_file)
# check duration
duration = subprocess.check_output("soxi -D {0}".format(wav_file), shell=True)
entry = {}
entry["audio_filepath"] = os.path.abspath(wav_file)
entry["duration"] = float(duration)
entry["text"] = transcript_text
entries.append(entry)
return entries
def __process_data(data_folder: str, dst_folder: str, manifest_file: str, num_workers: int):
"""
Converts flac to wav and build manifests's json
Args:
data_folder: source with flac files
dst_folder: where wav files will be stored
manifest_file: where to store manifest
num_workers: number of parallel workers processing files
Returns:
"""
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
files = []
entries = []
for root, dirnames, filenames in os.walk(data_folder):
for filename in fnmatch.filter(filenames, "*.trans.txt"):
files.append(os.path.join(root, filename))
with multiprocessing.Pool(num_workers) as p:
processing_func = functools.partial(__process_transcript, dst_folder=dst_folder)
results = p.imap(processing_func, files)
for result in tqdm(results, total=len(files)):
entries.extend(result)
with open(manifest_file, "w") as fout:
for m in entries:
fout.write(json.dumps(m) + "\n")
def main():
data_root = args.data_root
data_sets = args.data_sets
num_workers = args.num_workers
if args.log:
logging.basicConfig(level=logging.INFO)
if data_sets == "ALL":
data_sets = "dev_clean,dev_other,train_clean_100,train_clean_360,train_other_500,test_clean,test_other"
if data_sets == "mini":
data_sets = "dev_clean_2,train_clean_5"
for data_set in data_sets.split(","):
logging.info("\n\nWorking on: {0}".format(data_set))
filepath = os.path.join(data_root, data_set + ".tar.gz")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(filepath, data_set.upper())
logging.info("Extracting {0}".format(data_set))
__extract_file(filepath, data_root)
logging.info("Processing {0}".format(data_set))
__process_data(
os.path.join(os.path.join(data_root, "LibriSpeech"), data_set.replace("_", "-"),),
os.path.join(os.path.join(data_root, "LibriSpeech"), data_set.replace("_", "-"),) + "-processed",
os.path.join(data_root, data_set + ".json"),
num_workers=num_workers,
)
logging.info("Done!")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/get_librispeech_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
python process_slurp_data.py \
--data_dir=<directory to store the data> \
--text_key=<data to store in the 'text' field of manifests, choices=['semantics', 'transcript']> \
--suffix=<suffix to be added to manifest filenames, e.g., 'slu' or 'asr'> \
Note that use text_key=semantics for end-to-end SLU, use text_key=transcript for trainng ASR models on SLURP
"""
import argparse
import json
import multiprocessing
import os
import tarfile
from pathlib import Path
import librosa
import pandas as pd
import soundfile as sf
import wget
from tqdm import tqdm
from tqdm.contrib.concurrent import process_map
sampling_rate = 16000
AUDIO_URLS = [
"https://zenodo.org/record/4274930/files/slurp_real.tar.gz",
"https://zenodo.org/record/4274930/files/slurp_synth.tar.gz",
]
ANNO_URLS = [
"https://github.com/pswietojanski/slurp/raw/master/dataset/slurp/test.jsonl",
"https://github.com/pswietojanski/slurp/raw/master/dataset/slurp/devel.jsonl",
"https://github.com/pswietojanski/slurp/raw/master/dataset/slurp/train_synthetic.jsonl",
"https://github.com/pswietojanski/slurp/raw/master/dataset/slurp/train.jsonl",
]
FIELD_AUDIO = "audio_filepath"
FIELD_TEXT = "text"
FIELD_DATA_DIR = "data_dir"
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
if not os.path.exists(destination):
print(f"{destination} does not exist. Downloading ...")
wget.download(source, destination)
print(f"Downloaded {destination}.")
else:
print(f"Destination {destination} exists. Skipping.")
return destination
def __extract_all_files(filepath: str, data_dir: str):
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
def download_slurp(data_dir: str, anno_dir: str):
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
anno_dir = Path(anno_dir)
anno_dir.mkdir(parents=True, exist_ok=True)
print("Downloading and extracting audio files, this may take a long time...")
for url in AUDIO_URLS:
target_file = url.split("/")[-1]
destination = str(data_dir / Path(target_file))
print(f"Getting {target_file}")
__maybe_download_file(destination, url)
print(f"Extracting {target_file}")
__extract_all_files(destination, data_dir)
print("Downloading annotation files...")
for url in ANNO_URLS:
target_file = url.split("/")[-1]
destination = str(anno_dir / Path(target_file))
print(f"Getting {target_file}")
__maybe_download_file(destination, url)
print("Finished downloading data.")
def process_raw_annotations(anno_dir: str, text_key: str = "semantics", suffix: str = "slu"):
anno_dir = Path(anno_dir)
splits = [
"train",
"train_synthetic",
"devel",
"test",
]
id = 0
for split in splits:
tag = "_" + suffix if suffix else ""
new_filename = f"{os.path.join(anno_dir, split)}{tag}.json"
print(f"Preparing {new_filename}...")
IDs = []
slurp_id = []
audio = []
audio_format = []
audio_opts = []
semantics = []
semantics_format = []
semantics_opts = []
transcript = []
transcript_format = []
transcript_opts = []
jsonl_path = os.path.join(anno_dir, split + ".jsonl")
with open(jsonl_path, "r") as fin:
for line in fin.readlines():
line = line.strip()
if len(line) == 0:
continue
obj = json.loads(line)
sid = obj["slurp_id"]
scenario = obj["scenario"]
action = obj["action"]
sentence_annotation = obj["sentence_annotation"]
num_entities = sentence_annotation.count("[")
entities = []
for slot in range(num_entities):
type = sentence_annotation.split("[")[slot + 1].split("]")[0].split(":")[0].strip()
filler = sentence_annotation.split("[")[slot + 1].split("]")[0].split(":")[1].strip()
entities.append({"type": type.lower(), "filler": filler.lower()})
for recording in obj["recordings"]:
IDs.append(id)
slurp_id.append(sid)
if "synthetic" in split:
audio_folder = "slurp_synth/"
else:
audio_folder = "slurp_real/"
path = os.path.join(audio_folder, recording["file"])
audio.append(path)
audio_format.append("flac")
audio_opts.append(None)
transcript.append(obj["sentence"])
transcript_format.append("string")
transcript_opts.append(None)
semantics_dict = {
"scenario": scenario,
"action": action,
"entities": entities,
}
semantics_ = str(semantics_dict)
semantics.append(semantics_)
semantics_format.append("string")
semantics_opts.append(None)
id += 1
df = pd.DataFrame(
{"ID": IDs, "slurp_id": slurp_id, "audio": audio, "semantics": semantics, "transcript": transcript,}
)
if text_key not in ["transcript", "semantics"]:
text_key = "transcript"
with open(new_filename, "w") as fout:
for idx in tqdm(range(len(df))):
item = {
"id": str(df["ID"][idx]),
"slurp_id": str(df["slurp_id"][idx]),
"audio_filepath": df["audio"][idx],
"transcript": df["transcript"][idx],
"semantics": df["semantics"][idx],
"text": df[text_key][idx],
}
fout.write(json.dumps(item) + "\n")
print(f"Saved output to: {new_filename}")
def process(x: dict) -> dict:
if not isinstance(x[FIELD_TEXT], str):
x[FIELD_TEXT] = ''
else:
x[FIELD_TEXT] = x[FIELD_TEXT].lower().strip()
data_dir = x[FIELD_DATA_DIR]
input_file = Path(x[FIELD_AUDIO])
if not input_file.is_absolute():
input_file_path = str(data_dir / input_file)
else:
input_file_path = str(input_file)
output_file = Path(input_file.stem + ".wav")
if "slurp_real" in input_file_path:
output_dir = Path("wavs/slurp_real")
else:
output_dir = Path("wavs/slurp_synth")
output_file_path = str(data_dir / output_dir / output_file)
if not os.path.exists(output_file_path):
y, _ = librosa.load(input_file_path, sr=sampling_rate)
sf.write(output_file_path, y, sampling_rate)
y, _ = librosa.load(output_file_path, sr=sampling_rate)
x['duration'] = librosa.get_duration(y=y, sr=sampling_rate)
x[FIELD_AUDIO] = str(output_dir / output_file)
del x[FIELD_DATA_DIR]
return x
def load_data(manifest: str, data_dir: str):
data = []
with open(manifest, 'r') as f:
for line in tqdm(f):
item = json.loads(line)
item[FIELD_DATA_DIR] = Path(data_dir)
data.append(item)
return data
def decode_resample_slurp(data_dir: str, anno_dir: str):
wavs_dir = Path(data_dir) / Path("wavs")
wavs_dir.mkdir(parents=True, exist_ok=True)
wavs_real_dir = wavs_dir / Path("slurp_real")
wavs_real_dir.mkdir(parents=True, exist_ok=True)
wavs_synth_dir = wavs_dir / Path("slurp_synth")
wavs_synth_dir.mkdir(parents=True, exist_ok=True)
manifest_path = Path(anno_dir)
if manifest_path.is_dir():
manifest_list = list(manifest_path.glob("*.json"))
else:
manifest_list = [str(manifest_path)]
print(f"Found {len(manifest_list)} manifests to be processed.")
for manifest in manifest_list:
print(f"Processing manifest: {manifest}")
data = load_data(str(manifest), data_dir)
data_new = process_map(process, data, max_workers=multiprocessing.cpu_count(), chunksize=100)
output_file = Path(data_dir) / Path(manifest.name)
with output_file.open("w") as f:
for item in tqdm(data_new):
f.write(json.dumps(item) + '\n')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="slurp_data", help="Root directory for dataset")
parser.add_argument(
"--text_key",
type=str,
default="semantics",
help="Data to be put in the text field, choices=[semantics,transcript]",
)
parser.add_argument("--suffix", type=str, default="slu", help="Suffix to be added to the manifest filenames")
args = parser.parse_args()
data_dir = args.data_dir
anno_dir = str(Path(data_dir) / Path("raw_annotations"))
download_slurp(data_dir=data_dir, anno_dir=anno_dir)
process_raw_annotations(anno_dir=anno_dir, text_key=args.text_key, suffix=args.suffix)
decode_resample_slurp(data_dir=data_dir, anno_dir=anno_dir)
print("All done!")
| NeMo-main | scripts/dataset_processing/process_slurp_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
python process_vad_data.py \
--out_dir=<output path to where the generated manifest should be stored> \
--speech_data_root=<path where the speech data are stored> \
--background_data_root=<path where the background data are stored> \
--rebalance_method=<'under' or 'over' or 'fixed'> \
--log
(Optional --demo (for demonstration in tutorial). If you want to use your own background noise data, make sure to delete --demo)
"""
import argparse
import glob
import json
import logging
import os
import tarfile
import urllib.request
import librosa
import numpy as np
import soundfile as sf
from sklearn.model_selection import train_test_split
sr = 16000
# google speech command v2
URL = "http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz"
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
if not os.path.exists(destination):
logging.info(f"{destination} does not exist. Downloading ...")
urllib.request.urlretrieve(source, filename=destination + '.tmp')
os.rename(destination + '.tmp', destination)
logging.info(f"Downloaded {destination}.")
else:
logging.info(f"Destination {destination} exists. Skipping.")
return destination
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info('Not extracting. Maybe already there?')
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_dir)
else:
logging.info(f'Skipping extracting. Data already there {data_dir}')
def split_train_val_test(data_dir, file_type, test_size=0.1, val_size=0.1, demo=False):
X = []
if file_type == "speech":
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)) and o.split("/")[-1] != "_background_noise_":
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
if demo:
logging.info(
f"For Demonstration, we use {int(len(X)/100)}/{len(X)} speech data. Make sure to remove --demo flag when you actually train your model!"
)
X = np.random.choice(X, int(len(X) / 100), replace=False)
else:
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)):
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
else: # for using "_background_noise_" from google speech commands as background data
if o.endswith(".wav"):
X.append(os.path.join(data_dir, o))
X_train, X_test = train_test_split(X, test_size=test_size, random_state=1)
val_size_tmp = val_size / (1 - test_size)
X_train, X_val = train_test_split(X_train, test_size=val_size_tmp, random_state=1)
with open(os.path.join(data_dir, file_type + "_training_list.txt"), "w") as outfile:
outfile.write("\n".join(X_train))
with open(os.path.join(data_dir, file_type + "_testing_list.txt"), "w") as outfile:
outfile.write("\n".join(X_test))
with open(os.path.join(data_dir, file_type + "_validation_list.txt"), "w") as outfile:
outfile.write("\n".join(X_val))
logging.info(f'Overall: {len(X)}, Train: {len(X_train)}, Validatoin: {len(X_val)}, Test: {len(X_test)}')
logging.info(f"Finish spliting train, val and test for {file_type}. Write to files!")
def process_google_speech_train(data_dir):
X = []
for o in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, o)) and o.split("/")[-1] != "_background_noise_":
X.extend(glob.glob(os.path.join(data_dir, o) + '/*.wav'))
short_files = [i.split(data_dir)[1] for i in files]
with open(os.path.join(data_dir, 'testing_list.txt'), 'r') as allfile:
testing_list = allfile.read().splitlines()
with open(os.path.join(data_dir, 'validation_list.txt'), 'r') as allfile:
validation_list = allfile.read().splitlines()
exist_set = set(testing_list).copy()
exist_set.update(set(validation_list))
training_list = [i for i in short_files if i not in exist_set]
with open(os.path.join(data_dir, "training_list.txt"), "w") as outfile:
outfile.write("\n".join(training_list))
logging.info(
f'Overall: {len(files)}, Train: {len(training_list)}, Validatoin: {len(validation_list)}, Test: {len(testing_list)}'
)
def write_manifest(
out_dir,
files,
prefix,
manifest_name,
start=0.0,
end=None,
duration_stride=1.0,
duration_max=None,
duration_limit=100.0,
filter_long=False,
):
"""
Given a list of files, segment each file and write them to manifest with restrictions.
Args:
out_dir: directory of generated manifest
files: list of files to be processed
prefix: label of samples
manifest_name: name of generated manifest
start: beginning of audio of generating segment
end: end of audio of generating segment
duration_stride: stride for segmenting audio samples
duration_max: duration for each segment
duration_limit: duration threshold for filtering out long audio samples
filter_long: boolean to determine whether to filter out long audio samples
Returns:
"""
seg_num = 0
skip_num = 0
if duration_max is None:
duration_max = 1e9
if not os.path.exists(out_dir):
logging.info(f'Outdir {out_dir} does not exist. Creat directory.')
os.mkdir(out_dir)
output_path = os.path.join(out_dir, manifest_name + '.json')
with open(output_path, 'w') as fout:
for file in files:
label = prefix
try:
x, _sr = librosa.load(file, sr=sr)
duration = librosa.get_duration(y=x, sr=sr)
except Exception:
continue
if filter_long and duration > duration_limit:
skip_num += 1
continue
offsets = []
durations = []
if duration > duration_max:
current_offset = start
while current_offset < duration:
if end is not None and current_offset > end:
break
difference = duration - current_offset
if difference < duration_max:
break
offsets.append(current_offset)
durations.append(duration_max)
current_offset += duration_stride
else:
# Duration is not long enough! Skip
skip_num += 1
for duration, offset in zip(durations, offsets):
metadata = {
'audio_filepath': file,
'duration': duration,
'label': label,
'text': '_', # for compatibility with ASRAudioText
'offset': offset,
}
json.dump(metadata, fout)
fout.write('\n')
fout.flush()
seg_num += 1
return skip_num, seg_num, output_path
def load_list_write_manifest(
data_dir,
out_dir,
filename,
prefix,
start,
end,
duration_stride=1.0,
duration_max=1.0,
duration_limit=100.0,
filter_long=True,
):
filename = prefix + '_' + filename
file_path = os.path.join(data_dir, filename)
with open(file_path, 'r') as allfile:
files = allfile.read().splitlines()
manifest_name = filename.split('_list.txt')[0] + '_manifest'
skip_num, seg_num, output_path = write_manifest(
out_dir,
files,
prefix,
manifest_name,
start,
end,
duration_stride,
duration_max,
duration_limit,
filter_long=True,
)
return skip_num, seg_num, output_path
def rebalance_json(data_dir, data_json, num, prefix):
data = []
seg = 0
with open(data_json, 'r') as f:
for line in f:
data.append(json.loads(line))
filename = data_json.split('/')[-1]
fout_path = os.path.join(data_dir, prefix + "_" + filename)
if len(data) >= num:
selected_sample = np.random.choice(data, num, replace=False)
else:
selected_sample = np.random.choice(data, num, replace=True)
with open(fout_path, 'a') as fout:
for i in selected_sample:
seg += 1
json.dump(i, fout)
fout.write('\n')
fout.flush()
logging.info(f'Get {seg}/{num} to {fout_path} from {data_json}')
return fout_path
def generate_variety_noise(data_dir, filename, prefix):
curr_dir = data_dir.split("_background_noise_")[0]
silence_path = os.path.join(curr_dir, "_background_noise_more")
if not os.path.exists(silence_path):
os.mkdir(silence_path)
silence_stride = 1000 # stride = 1/16 seconds
sampling_rate = 16000
silence_files = []
rng = np.random.RandomState(0)
filename = prefix + '_' + filename
file_path = os.path.join(data_dir, filename)
with open(file_path, 'r') as allfile:
files = allfile.read().splitlines()
for file in files:
y, sr = librosa.load(path=file, sr=sampling_rate)
for i in range(
0, len(y) - sampling_rate, silence_stride * 100
): # stride * 100 to generate less samples for demo
file_name = "{}_{}.wav".format(file.split("/")[-1], i)
y_slice = y[i : i + sampling_rate]
magnitude = rng.uniform(0.0, 1.0)
y_slice *= magnitude
out_file_path = os.path.join(silence_path, file_name)
sf.write(out_file_path, y_slice, sr)
silence_files.append(out_file_path)
new_list_file = os.path.join(silence_path, filename)
with open(new_list_file, "w") as outfile:
outfile.write("\n".join(silence_files))
logging.info(f"Generate {len(out_file_path)} background files for {file_path}. => {new_list_file} !")
return len(silence_files)
def main():
parser = argparse.ArgumentParser(description='Speech and backgound data download and preprocess')
parser.add_argument("--out_dir", required=False, default='./manifest/', type=str)
parser.add_argument("--speech_data_root", required=True, default=None, type=str)
parser.add_argument("--background_data_root", required=True, default=None, type=str)
parser.add_argument('--test_size', required=False, default=0.1, type=float)
parser.add_argument('--val_size', required=False, default=0.1, type=float)
parser.add_argument('--window_length_in_sec', required=False, default=0.63, type=float)
parser.add_argument('--log', required=False, action='store_true')
parser.add_argument('--rebalance_method', required=False, default=None, type=str)
parser.add_argument('--demo', required=False, action='store_true')
parser.set_defaults(log=False, generate=False)
args = parser.parse_args()
if not args.rebalance_method:
rebalance = False
else:
if args.rebalance_method != 'over' and args.rebalance_method != 'under' and args.rebalance_method != 'fixed':
raise NameError("Please select a valid sampling method: over/under/fixed.")
else:
rebalance = True
if args.log:
logging.basicConfig(level=logging.DEBUG)
# Download speech data
speech_data_root = args.speech_data_root
data_set = "google_speech_recognition_v2"
speech_data_folder = os.path.join(speech_data_root, data_set)
background_data_folder = args.background_data_root
logging.info(f"Working on: {data_set}")
# Download and extract speech data
if not os.path.exists(speech_data_folder):
file_path = os.path.join(speech_data_root, data_set + ".tar.bz2")
logging.info(f"Getting {data_set}")
__maybe_download_file(file_path, URL)
logging.info(f"Extracting {data_set}")
__extract_all_files(file_path, speech_data_root, speech_data_folder)
logging.info(f"Split speech data!")
# dataset provide testing.txt and validation.txt feel free to split data using that with process_google_speech_train
split_train_val_test(speech_data_folder, "speech", args.test_size, args.val_size, args.demo)
logging.info(f"Split background data!")
split_train_val_test(background_data_folder, "background", args.test_size, args.val_size)
out_dir = args.out_dir
# Process Speech manifest
logging.info(f"=== Write speech data to manifest!")
skip_num_val, speech_seg_num_val, speech_val = load_list_write_manifest(
speech_data_folder,
out_dir,
'validation_list.txt',
'speech',
0.2,
0.8,
args.window_length_in_sec,
args.window_length_in_sec,
)
skip_num_test, speech_seg_num_test, speech_test = load_list_write_manifest(
speech_data_folder, out_dir, 'testing_list.txt', 'speech', 0.2, 0.8, 0.01, args.window_length_in_sec
)
skip_num_train, speech_seg_num_train, speech_train = load_list_write_manifest(
speech_data_folder,
out_dir,
'training_list.txt',
'speech',
0.2,
0.8,
args.window_length_in_sec,
args.window_length_in_sec,
)
logging.info(f'Val: Skip {skip_num_val} samples. Get {speech_seg_num_val} segments! => {speech_val} ')
logging.info(f'Test: Skip {skip_num_test} samples. Get {speech_seg_num_test} segments! => {speech_test}')
logging.info(f'Train: Skip {skip_num_train} samples. Get {speech_seg_num_train} segments!=> {speech_train}')
# Process background manifest
# if we select to generate more background noise data
if args.demo:
logging.info("Start generating more background noise data")
generate_variety_noise(background_data_folder, 'validation_list.txt', 'background')
generate_variety_noise(background_data_folder, 'training_list.txt', 'background')
generate_variety_noise(background_data_folder, 'testing_list.txt', 'background')
background_data_folder = os.path.join(
background_data_folder.split("_background_noise_")[0], "_background_noise_more"
)
logging.info(f"=== Write background data to manifest!")
skip_num_val, background_seg_num_val, background_val = load_list_write_manifest(
background_data_folder, out_dir, 'validation_list.txt', 'background', 0, None, 0.15, args.window_length_in_sec
)
skip_num_test, background_seg_num_test, background_test = load_list_write_manifest(
background_data_folder, out_dir, 'testing_list.txt', 'background', 0, None, 0.01, args.window_length_in_sec
)
skip_num_train, background_seg_num_train, background_train = load_list_write_manifest(
background_data_folder, out_dir, 'training_list.txt', 'background', 0, None, 0.15, args.window_length_in_sec
)
logging.info(f'Val: Skip {skip_num_val} samples. Get {background_seg_num_val} segments! => {background_val}')
logging.info(f'Test: Skip {skip_num_test} samples. Get {background_seg_num_test} segments! => {background_test}')
logging.info(
f'Train: Skip {skip_num_train} samples. Get {background_seg_num_train} segments! => {background_train}'
)
min_val, max_val = min(speech_seg_num_val, background_seg_num_val), max(speech_seg_num_val, background_seg_num_val)
min_test, max_test = (
min(speech_seg_num_test, background_seg_num_test),
max(speech_seg_num_test, background_seg_num_test),
)
min_train, max_train = (
min(speech_seg_num_train, background_seg_num_train),
max(speech_seg_num_train, background_seg_num_train),
)
logging.info('Finish generating manifest!')
if rebalance:
# Random Oversampling: Randomly duplicate examples in the minority class.
# Random Undersampling: Randomly delete examples in the majority class.
if args.rebalance_method == 'under':
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {min_val} Test: {min_test} Train: {min_train}!')
rebalance_json(out_dir, background_val, min_val, 'balanced')
rebalance_json(out_dir, background_test, min_test, 'balanced')
rebalance_json(out_dir, background_train, min_train, 'balanced')
rebalance_json(out_dir, speech_val, min_val, 'balanced')
rebalance_json(out_dir, speech_test, min_test, 'balanced')
rebalance_json(out_dir, speech_train, min_train, 'balanced')
if args.rebalance_method == 'over':
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {max_val} Test: {max_test} Train: {max_train}!')
rebalance_json(out_dir, background_val, max_val, 'balanced')
rebalance_json(out_dir, background_test, max_test, 'balanced')
rebalance_json(out_dir, background_train, max_train, 'balanced')
rebalance_json(out_dir, speech_val, max_val, 'balanced')
rebalance_json(out_dir, speech_test, max_test, 'balanced')
rebalance_json(out_dir, speech_train, max_train, 'balanced')
if args.rebalance_method == 'fixed':
fixed_test, fixed_val, fixed_train = 200, 100, 500
logging.info(f"Rebalancing number of samples in classes using {args.rebalance_method} sampling.")
logging.info(f'Val: {fixed_val} Test: {fixed_test} Train: {fixed_train}!')
rebalance_json(out_dir, background_val, fixed_val, 'balanced')
rebalance_json(out_dir, background_test, fixed_test, 'balanced')
rebalance_json(out_dir, background_train, fixed_train, 'balanced')
rebalance_json(out_dir, speech_val, fixed_val, 'balanced')
rebalance_json(out_dir, speech_test, fixed_test, 'balanced')
rebalance_json(out_dir, speech_train, fixed_train, 'balanced')
else:
logging.info("Don't rebalance number of samples in classes.")
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/process_vad_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script is heavily derived from the Patter HUB5 processing script written
# by Ryan Leary
import argparse
import glob
import json
import os
import re
import subprocess
import sys
from collections import namedtuple
from math import ceil, floor
from operator import attrgetter
import numpy as np
import scipy.io.wavfile as wavfile
from tqdm import tqdm
parser = argparse.ArgumentParser(description="Prepare HUB5 data for training/eval")
parser.add_argument(
"--data_root", default=None, type=str, required=True, help="The path to the root LDC HUB5 dataset directory.",
)
parser.add_argument(
"--dest_root",
default=None,
type=str,
required=True,
help="Path to the destination root directory for processed files.",
)
# Optional arguments
parser.add_argument(
"--min_slice_duration", default=10.0, type=float, help="Minimum audio slice duration after processing.",
)
args = parser.parse_args()
StmUtterance = namedtuple(
'StmUtterance', ['filename', 'channel', 'speaker_id', 'begin', 'end', 'label', 'transcript',],
)
STM_LINE_FMT = re.compile(r"^(\w+)\s+(\w+)\s+(\w+)\s+([0-9.]+)\s+([0-9.]+)\s+(<.*>)?\s+(.+)$")
# Transcription errors and their fixes
TRANSCRIPT_BUGS = {"en_4622-B-12079-12187": "KIND OF WEIRD BUT"}
def get_utt_id(segment):
"""
Gives utterance IDs in a form like: en_4156-a-36558-37113
"""
return "{}-{}-{}-{}".format(segment.filename, segment.channel, int(segment.begin * 100), int(segment.end * 100),)
def convert_utterances(sph_path, wav_path):
"""
Converts a sphere audio file to wav.
"""
cmd = ["sph2pipe", "-f", "wav", "-p", sph_path, wav_path]
subprocess.run(cmd)
def create_wavs(data_root, dest_root):
"""
Converts the English sph files to wav using sph2pipe.
"""
sph_root = os.path.join(data_root, "hub5e_00", "english")
sph_list = glob.glob(os.path.join(sph_root, "*.sph"))
# Iterate over each sphere file and conver to wav
for sph_path in tqdm(sph_list, desc="Converting to wav", unit="file"):
sph_name, _ = os.path.splitext(os.path.basename(sph_path))
wav_path = os.path.join(dest_root, 'full_audio_wav', sph_name + ".wav")
cmd = ["sph2pipe", "-f", "wav", "-p", sph_path, wav_path]
subprocess.run(cmd)
def process_transcripts(dataset_root):
"""
Reads in transcripts for each audio segment and processes them.
"""
stm_path = os.path.join(dataset_root, "2000_hub5_eng_eval_tr", "reference", "hub5e00.english.000405.stm",)
results = []
chars = set()
with open(stm_path, "r") as fh:
for line in fh:
# lines with ';;' are comments
if line.startswith(";;"):
continue
if "IGNORE_TIME_SEGMENT_" in line:
continue
line = line.replace("<B_ASIDE>", "").replace("<E_ASIDE>", "")
line = line.replace("(%HESITATION)", "UH")
line = line.replace("-", "")
line = line.replace("(%UH)", "UH")
line = line.replace("(%AH)", "UH")
line = line.replace("(", "").replace(")", "")
line = line.lower()
m = STM_LINE_FMT.search(line.strip())
utt = StmUtterance(*m.groups())
# Convert begin/end times to float
utt = utt._replace(begin=float(utt.begin))
utt = utt._replace(end=float(utt.end))
# Check for utterance in dict of transcript mistakes
transcript_update = TRANSCRIPT_BUGS.get(get_utt_id(utt))
if transcript_update is not None:
utt = utt._replace(transcript=transcript_update)
results.append(utt)
chars.update(list(utt.transcript))
return results, chars
def write_one_segment(dest_root, speaker_id, count, audio, sr, duration, transcript):
"""
Writes out one segment of audio, and writes its corresponding transcript
in the manifest.
Args:
dest_root: the path to the output directory root
speaker_id: ID of the speaker, used in file naming
count: number of segments from this speaker so far
audio: the segment's audio data
sr: sample rate of the audio
duration: duration of the audio
transcript: the corresponding transcript
"""
audio_path = os.path.join(dest_root, "audio", f"{speaker_id}_{count:03}.wav")
manifest_path = os.path.join(dest_root, "manifest_hub5.json")
# Write audio
wavfile.write(audio_path, sr, audio)
# Write transcript
transcript = {
"audio_filepath": audio_path,
"duration": duration,
"text": transcript,
}
with open(manifest_path, 'a') as f:
json.dump(transcript, f)
f.write('\n')
def segment_audio(info_list, dest_root, min_slice_duration):
"""
Combines audio into >= min_slice_duration segments of the same speaker,
and writes the combined transcripts into a manifest.
Args:
info_list: list of StmUtterance objects with transcript information.
dest_root: path to output destination
min_slice_duration: min number of seconds per output audio slice
"""
info_list = sorted(info_list, key=attrgetter('speaker_id', 'begin'))
prev_id = None # For checking audio concatenation
id_count = 0
sample_rate, audio_data = None, None
transcript_buffer = ''
audio_buffer = []
buffer_duration = 0.0
# Iterate through utterances to build segments
for info in info_list:
if info.speaker_id != prev_id:
# Scrap the remainder in the buffers and start next segment
prev_id = info.speaker_id
id_count = 0
sample_rate, audio_data = wavfile.read(os.path.join(dest_root, 'full_audio_wav', info.filename + '.wav'))
transcript_buffer = ''
audio_buffer = []
buffer_duration = 0.0
# Append utterance info to buffers
transcript_buffer += info.transcript
channel = 0 if info.channel.lower() == 'a' else 1
audio_buffer.append(
audio_data[floor(info.begin * sample_rate) : ceil(info.end * sample_rate), channel,]
)
buffer_duration += info.end - info.begin
if buffer_duration < min_slice_duration:
transcript_buffer += ' '
else:
# Write out segment and transcript
id_count += 1
write_one_segment(
dest_root,
info.speaker_id,
id_count,
np.concatenate(audio_buffer, axis=0),
sample_rate,
buffer_duration,
transcript_buffer,
)
transcript_buffer = ''
audio_buffer = []
buffer_duration = 0.0
def main():
data_root = args.data_root
dest_root = args.dest_root
min_slice_duration = args.min_slice_duration
if not os.path.exists(os.path.join(dest_root, 'full_audio_wav')):
os.makedirs(os.path.join(dest_root, 'full_audio_wav'))
if not os.path.exists(os.path.join(dest_root, 'audio')):
os.makedirs(os.path.join(dest_root, 'audio'))
# Create/wipe manifest contents
open(os.path.join(dest_root, "manifest_hub5.json"), 'w').close()
# Convert full audio files from .sph to .wav
create_wavs(data_root, dest_root)
# Get each audio transcript from transcript file
info_list, chars = process_transcripts(data_root)
print("Writing out vocab file", file=sys.stderr)
with open(os.path.join(dest_root, "vocab.txt"), 'w') as fh:
for x in sorted(list(chars)):
fh.write(x + "\n")
# Segment the audio data
print("Segmenting audio and writing manifest")
segment_audio(info_list, dest_root, min_slice_duration)
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/process_hub5_data.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import logging
import os
import subprocess
import librosa
parser = argparse.ArgumentParser(description="AN4 dataset download and processing")
parser.add_argument("--data_root", required=True, default=None, type=str)
args = parser.parse_args()
def build_manifest(data_root, transcripts_path, manifest_path, wav_path):
with open(transcripts_path, 'r') as fin:
with open(manifest_path, 'w') as fout:
for line in fin:
# Lines look like this:
# <s> transcript </s> (fileID)
transcript = line[: line.find('(') - 1].lower()
transcript = transcript.replace('<s>', '').replace('</s>', '')
transcript = transcript.strip()
file_id = line[line.find('(') + 1 : -2] # e.g. "cen4-fash-b"
audio_path = os.path.join(
data_root, wav_path, file_id[file_id.find('-') + 1 : file_id.rfind('-')], file_id + '.wav',
)
duration = librosa.core.get_duration(filename=audio_path)
# Write the metadata to the manifest
metadata = {
"audio_filepath": audio_path,
"duration": duration,
"text": transcript,
}
json.dump(metadata, fout)
fout.write('\n')
def main():
data_root = os.path.abspath(args.data_root)
# Convert from .sph to .wav
logging.info("Converting audio files to .wav...")
sph_list = glob.glob(os.path.join(data_root, 'an4/**/*.sph'), recursive=True)
for sph_path in sph_list:
wav_path = sph_path[:-4] + '.wav'
cmd = ['sox', sph_path, wav_path]
subprocess.run(cmd)
logging.info("Finished conversion.")
# Build manifests
logging.info("Building training manifest...")
train_transcripts = os.path.join(data_root, 'an4/etc/an4_train.transcription')
train_manifest = os.path.join(data_root, 'an4/train_manifest.json')
train_wavs = os.path.join(data_root, 'an4/wav/an4_clstk')
build_manifest(data_root, train_transcripts, train_manifest, train_wavs)
logging.info("Training manifests created.")
logging.info("Building test manifest...")
test_transcripts = os.path.join(data_root, 'an4/etc/an4_test.transcription')
test_manifest = os.path.join(data_root, 'an4/test_manifest.json')
test_wavs = os.path.join(data_root, 'an4/wav/an4test_clstk')
build_manifest(data_root, test_transcripts, test_manifest, test_wavs)
logging.info("Test manifest created.")
logging.info("Done with AN4 processing!")
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/process_an4_data.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python add_noise.py --input_manifest=<manifest file of original "clean" dataset>
# --noise_manifest=<manifest file poinitng to noise data>
# --out_dir=<destination directory for noisy audio and manifests>
# --snrs=<list of snrs at which noise should be added to the audio>
# --seed=<seed for random number generator>
# --num_workers=<number of parallel workers>
# To be able to reproduce the same noisy dataset, use a fixed seed and num_workers=1
import argparse
import copy
import json
import multiprocessing
import os
import random
import numpy as np
import soundfile as sf
from nemo.collections.asr.parts.preprocessing.perturb import NoisePerturbation
from nemo.collections.asr.parts.preprocessing.segment import AudioSegment
rng = None
att_factor = 0.8
save_noise = False
sample_rate = 16000
def get_out_dir_name(out_dir, input_name, noise_name, snr):
return os.path.join(out_dir, input_name, noise_name + "_" + str(snr) + "db")
def create_manifest(input_manifest, noise_manifest, snrs, out_path, save_noise):
os.makedirs(os.path.join(out_path, "manifests"), exist_ok=True)
for snr in snrs:
out_dir = get_out_dir_name(
out_path,
os.path.splitext(os.path.basename(input_manifest))[0],
os.path.splitext(os.path.basename(noise_manifest))[0],
snr,
)
out_mfst = os.path.join(
os.path.join(out_path, "manifests"),
os.path.splitext(os.path.basename(input_manifest))[0]
+ "_"
+ os.path.splitext(os.path.basename(noise_manifest))[0]
+ "_"
+ str(snr)
+ "db"
+ ".json",
)
with open(input_manifest, "r") as inf, open(out_mfst, "w") as outf:
for line in inf:
row = json.loads(line.strip())
row['audio_filepath'] = os.path.join(out_dir, os.path.basename(row['audio_filepath']))
if save_noise:
file_ext = os.path.splitext(row['audio_filepath'])[1]
noise_filename = os.path.basename(row['audio_filepath']).replace(file_ext, "_noise" + file_ext)
row['noise_filepath'] = os.path.join(out_dir, noise_filename)
outf.write(json.dumps(row) + "\n")
def process_row(row):
audio_file = row['audio_filepath']
global sample_rate
data_orig = AudioSegment.from_file(audio_file, target_sr=sample_rate, offset=0)
for snr in row['snrs']:
min_snr_db = snr
max_snr_db = snr
global att_factor
perturber = NoisePerturbation(
manifest_path=row['noise_manifest'], min_snr_db=min_snr_db, max_snr_db=max_snr_db, rng=rng
)
out_dir = get_out_dir_name(
row['out_dir'],
os.path.splitext(os.path.basename(row['input_manifest']))[0],
os.path.splitext(os.path.basename(row['noise_manifest']))[0],
snr,
)
os.makedirs(out_dir, exist_ok=True)
out_f = os.path.join(out_dir, os.path.basename(audio_file))
if os.path.exists(out_f):
continue
data = copy.deepcopy(data_orig)
perturber.perturb(data)
max_level = np.max(np.abs(data.samples))
norm_factor = att_factor / max_level
new_samples = norm_factor * data.samples
sf.write(out_f, new_samples.transpose(), sample_rate)
global save_noise
if save_noise:
noise_samples = new_samples - norm_factor * data_orig.samples
out_f_ext = os.path.splitext(out_f)[1]
out_f_noise = out_f.replace(out_f_ext, "_noise" + out_f_ext)
sf.write(out_f_noise, noise_samples.transpose(), sample_rate)
def add_noise(infile, snrs, noise_manifest, out_dir, num_workers=1):
allrows = []
with open(infile, "r") as inf:
for line in inf:
row = json.loads(line.strip())
row['snrs'] = snrs
row['out_dir'] = out_dir
row['noise_manifest'] = noise_manifest
row['input_manifest'] = infile
allrows.append(row)
pool = multiprocessing.Pool(num_workers)
pool.map(process_row, allrows)
pool.close()
print('Done!')
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_manifest", type=str, required=True, help="clean test set",
)
parser.add_argument("--noise_manifest", type=str, required=True, help="path to noise manifest file")
parser.add_argument("--out_dir", type=str, required=True, help="destination directory for audio and manifests")
parser.add_argument("--snrs", type=int, nargs="+", default=[0, 10, 20, 30])
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--num_workers", default=1, type=int)
parser.add_argument("--sample_rate", default=16000, type=int)
parser.add_argument(
"--attenuation_factor",
default=0.8,
type=float,
help="Attenuation factor applied on the normalized noise-added samples before writing to wave",
)
parser.add_argument(
"--save_noise", default=False, action="store_true", help="save the noise added to the input signal"
)
args = parser.parse_args()
global sample_rate
sample_rate = args.sample_rate
global att_factor
att_factor = args.attenuation_factor
global save_noise
save_noise = args.save_noise
global rng
rng = args.seed
num_workers = args.num_workers
add_noise(args.input_manifest, args.snrs, args.noise_manifest, args.out_dir, num_workers=num_workers)
create_manifest(args.input_manifest, args.noise_manifest, args.snrs, args.out_dir, args.save_noise)
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/add_noise.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Usage:
python process_speech_commands_data.py \
--data_root=<absolute path to where the data should be stored> \
--data_version=<either 1 or 2, indicating version of the dataset> \
--class_split=<either "all" or "sub", indicates whether all 30/35 classes should be used, or the 10+2 split should be used> \
--num_processes=<number of processes to use for data preprocessing> \
--rebalance \
--log
"""
import argparse
import glob
import json
import logging
import os
import re
import tarfile
import urllib.request
from collections import defaultdict
from functools import partial
from multiprocessing import Pool
from typing import Dict, List, Set, Tuple
import librosa
import numpy as np
import soundfile
from tqdm import tqdm
URL_v1 = 'http://download.tensorflow.org/data/speech_commands_v0.01.tar.gz'
URL_v2 = 'http://download.tensorflow.org/data/speech_commands_v0.02.tar.gz'
def __maybe_download_file(destination: str, source: str) -> str:
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
Local filepath of the downloaded file
"""
if not os.path.exists(destination):
logging.info(f'{destination} does not exist. Downloading ...')
urllib.request.urlretrieve(source, filename=destination + '.tmp')
os.rename(destination + '.tmp', destination)
logging.info(f'Downloaded {destination}.')
else:
logging.info(f'Destination {destination} exists. Skipping.')
return destination
def __extract_all_files(filepath: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_dir)
else:
logging.info(f'Skipping extracting. Data already there {data_dir}')
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info('Not extracting. Maybe already there?')
def __get_mp_chunksize(dataset_size: int, num_processes: int) -> int:
"""
Returns the number of chunks to split the dataset into for multiprocessing.
Args:
dataset_size: size of the dataset
num_processes: number of processes to use for multiprocessing
Returns:
Number of chunks to split the dataset into for multiprocessing
"""
chunksize = dataset_size // num_processes
return chunksize if chunksize > 0 else 1
def __construct_filepaths(
all_files: List[str],
valset_uids: Set[str],
testset_uids: Set[str],
class_split: str,
class_subset: List[str],
pattern: str,
) -> Tuple[Dict[str, int], Dict[str, List[tuple]], List[tuple], List[tuple], List[tuple], List[tuple], List[tuple]]:
"""
Prepares the filepaths for the dataset.
Args:
all_files: list of all files in the dataset
valset_uids: set of uids of files in the validation set
testset_uids: set of uids of files in the test set
class_split: whether to use all classes as distinct labels, or to use
10 classes subset and rest of the classes as noise or background
class_subset: list of classes to consider if `class_split` is set to `sub`
pattern: regex pattern to match the file names in the dataset
"""
label_count = defaultdict(int)
label_filepaths = defaultdict(list)
unknown_val_filepaths = []
unknown_test_filepaths = []
train, val, test = [], [], []
for entry in all_files:
r = re.match(pattern, entry)
if r:
label, uid = r.group(2), r.group(3)
if label == '_background_noise_' or label == 'silence':
continue
if class_split == 'sub' and label not in class_subset:
label = 'unknown'
if uid in valset_uids:
unknown_val_filepaths.append((label, entry))
elif uid in testset_uids:
unknown_test_filepaths.append((label, entry))
if uid not in valset_uids and uid not in testset_uids:
label_count[label] += 1
label_filepaths[label].append((label, entry))
if label == 'unknown':
continue
if uid in valset_uids:
val.append((label, entry))
elif uid in testset_uids:
test.append((label, entry))
else:
train.append((label, entry))
return {
'label_count': label_count,
'label_filepaths': label_filepaths,
'unknown_val_filepaths': unknown_val_filepaths,
'unknown_test_filepaths': unknown_test_filepaths,
'train': train,
'val': val,
'test': test,
}
def __construct_silence_set(
rng: np.random.RandomState, sampling_rate: int, silence_stride: int, data_folder: str, background_noise: str
) -> List[str]:
"""
Creates silence files given a background noise.
Args:
rng: Random state for random number generator
sampling_rate: sampling rate of the audio
silence_stride: stride for creating silence files
data_folder: folder containing the silence directory
background_noise: filepath of the background noise
Returns:
List of filepaths of silence files
"""
silence_files = []
if '.wav' in background_noise:
y, sr = librosa.load(background_noise, sr=sampling_rate)
for i in range(0, len(y) - sampling_rate, silence_stride):
file_path = f'silence/{os.path.basename(background_noise)[:-4]}_{i}.wav'
y_slice = y[i : i + sampling_rate] * rng.uniform(0.0, 1.0)
out_file_path = os.path.join(data_folder, file_path)
soundfile.write(out_file_path, y_slice, sr)
silence_files.append(('silence', out_file_path))
return silence_files
def __rebalance_files(max_count: int, label_filepath: str) -> Tuple[str, List[str], int]:
"""
Rebalance the number of samples for a class.
Args:
max_count: maximum number of samples for a class
label_filepath: list of filepaths for a class
Returns:
Rebalanced list of filepaths along with the label name and the number of samples
"""
command, samples = label_filepath
filepaths = [sample[1] for sample in samples]
rng = np.random.RandomState(0)
filepaths = np.asarray(filepaths)
num_samples = len(filepaths)
if num_samples < max_count:
difference = max_count - num_samples
duplication_ids = rng.choice(num_samples, difference, replace=True)
filepaths = np.append(filepaths, filepaths[duplication_ids], axis=0)
return command, filepaths, num_samples
def __prepare_metadata(skip_duration, sample: Tuple[str, str]) -> dict:
"""
Creates the manifest entry for a file.
Args:
skip_duration: Whether to skip the computation of duration
sample: Tuple of label and filepath
Returns:
Manifest entry of the file
"""
label, audio_path = sample
return json.dumps(
{
'audio_filepath': audio_path,
'duration': 0.0 if skip_duration else librosa.core.get_duration(filename=audio_path),
'command': label,
}
)
def __process_data(
data_folder: str,
dst_folder: str,
num_processes: int = 1,
rebalance: bool = False,
class_split: str = 'all',
skip_duration: bool = False,
):
"""
Processes the data and generates the manifests.
Args:
data_folder: source with wav files and validation / test lists
dst_folder: where manifest files will be stored
num_processes: number of processes
rebalance: rebalance the classes to have same number of samples
class_split: whether to use all classes as distinct labels, or to use
10 classes subset and rest of the classes as noise or background
skip_duration: Bool whether to skip duration computation. Use this only for
colab notebooks where knowing duration is not necessary for demonstration
"""
os.makedirs(dst_folder, exist_ok=True)
# Used for 10 classes + silence + unknown class setup - Only used when class_split is 'sub'
class_subset = ['yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go']
pattern = re.compile(r'(.+\/)?(\w+)\/([^_]+)_.+wav')
all_files = glob.glob(os.path.join(data_folder, '*/*wav'))
# Get files in the validation set
valset_uids = set()
with open(os.path.join(data_folder, 'validation_list.txt')) as fin:
for line in fin:
r = re.match(pattern, line)
if r:
valset_uids.add(r.group(3))
# Get files in the test set
testset_uids = set()
with open(os.path.join(data_folder, 'testing_list.txt')) as fin:
for line in fin:
r = re.match(pattern, line)
if r:
testset_uids.add(r.group(3))
logging.info('Validation and test set lists extracted')
filepath_info = __construct_filepaths(all_files, valset_uids, testset_uids, class_split, class_subset, pattern)
label_count = filepath_info['label_count']
label_filepaths = filepath_info['label_filepaths']
unknown_val_filepaths = filepath_info['unknown_val_filepaths']
unknown_test_filepaths = filepath_info['unknown_test_filepaths']
train = filepath_info['train']
val = filepath_info['val']
test = filepath_info['test']
logging.info('Prepared filepaths for dataset')
pool = Pool(num_processes)
# Add silence and unknown class label samples
if class_split == 'sub':
logging.info('Perforiming 10+2 class subsplit')
silence_path = os.path.join(data_folder, 'silence')
os.makedirs(silence_path, exist_ok=True)
silence_stride = 1000 # 0.0625 second stride
sampling_rate = 16000
folder = os.path.join(data_folder, '_background_noise_')
silence_files = []
rng = np.random.RandomState(0)
background_noise_files = [os.path.join(folder, x) for x in os.listdir(folder)]
silence_set_fn = partial(__construct_silence_set, rng, sampling_rate, silence_stride, data_folder)
for silence_flist in tqdm(
pool.imap(
silence_set_fn, background_noise_files, __get_mp_chunksize(len(background_noise_files), num_processes)
),
total=len(background_noise_files),
desc='Constructing silence set',
):
silence_files.extend(silence_flist)
rng = np.random.RandomState(0)
rng.shuffle(silence_files)
logging.info(f'Constructed silence set of {len(silence_files)}')
# Create the splits
rng = np.random.RandomState(0)
silence_split = 0.1
unknown_split = 0.1
# train split
num_total_samples = sum([label_count[cls] for cls in class_subset])
num_silence_samples = int(np.ceil(silence_split * num_total_samples))
# initialize sample
label_count['silence'] = 0
label_filepaths['silence'] = []
for silence_id in range(num_silence_samples):
label_count['silence'] += 1
label_filepaths['silence'].append(silence_files[silence_id])
train.extend(label_filepaths['silence'])
# Update train unknown set
unknown_train_samples = label_filepaths['unknown']
rng.shuffle(unknown_train_samples)
unknown_size = int(np.ceil(unknown_split * num_total_samples))
label_count['unknown'] = unknown_size
label_filepaths['unknown'] = unknown_train_samples[:unknown_size]
train.extend(label_filepaths['unknown'])
logging.info('Train set prepared')
# val set silence
num_val_samples = len(val)
num_silence_samples = int(np.ceil(silence_split * num_val_samples))
val_idx = label_count['silence'] + 1
for silence_id in range(num_silence_samples):
val.append(silence_files[val_idx + silence_id])
# Update val unknown set
rng.shuffle(unknown_val_filepaths)
unknown_size = int(np.ceil(unknown_split * num_val_samples))
val.extend(unknown_val_filepaths[:unknown_size])
logging.info('Validation set prepared')
# test set silence
num_test_samples = len(test)
num_silence_samples = int(np.ceil(silence_split * num_test_samples))
test_idx = val_idx + num_silence_samples + 1
for silence_id in range(num_silence_samples):
test.append(silence_files[test_idx + silence_id])
# Update test unknown set
rng.shuffle(unknown_test_filepaths)
unknown_size = int(np.ceil(unknown_split * num_test_samples))
test.extend(unknown_test_filepaths[:unknown_size])
logging.info('Test set prepared')
max_command = None
max_count = -1
for command, count in label_count.items():
if command == 'unknown':
continue
if count > max_count:
max_count = count
max_command = command
if rebalance:
logging.info(f'Command with maximum number of samples = {max_command} with {max_count} samples')
logging.info(f'Rebalancing dataset by duplicating classes with less than {max_count} samples...')
rebalance_fn = partial(__rebalance_files, max_count)
for command, filepaths, num_samples in tqdm(
pool.imap(rebalance_fn, label_filepaths.items(), __get_mp_chunksize(len(label_filepaths), num_processes)),
total=len(label_filepaths),
desc='Rebalancing dataset',
):
if num_samples < max_count:
logging.info(f'Extended class label {command} from {num_samples} samples to {len(filepaths)} samples')
label_filepaths[command] = [(command, filepath) for filepath in filepaths]
del train
train = []
for label, samples in label_filepaths.items():
train.extend(samples)
manifests = [
('train_manifest.json', train),
('validation_manifest.json', val),
('test_manifest.json', test),
]
metadata_fn = partial(__prepare_metadata, skip_duration)
for manifest_filename, dataset in manifests:
num_files = len(dataset)
logging.info(f'Preparing manifest : {manifest_filename} with #{num_files} files')
manifest = [
metadata
for metadata in tqdm(
pool.imap(metadata_fn, dataset, __get_mp_chunksize(len(dataset), num_processes)),
total=num_files,
desc=f'Preparing {manifest_filename}',
)
]
with open(os.path.join(dst_folder, manifest_filename), 'w') as fout:
for metadata in manifest:
fout.write(metadata + '\n')
logging.info(f'Finished construction of manifest. Path: {os.path.join(dst_folder, manifest_filename)}')
pool.close()
if skip_duration:
logging.info(
f'\n<<NOTE>> Duration computation was skipped for demonstration purposes on Colaboratory.\n'
f'In order to replicate paper results and properly perform data augmentation, \n'
f'please recompute the manifest file without the `--skip_duration` flag !\n'
)
def main():
parser = argparse.ArgumentParser(description='Google Speech Commands Data download and preprocessing')
parser.add_argument('--data_root', required=True, help='Root directory for storing data')
parser.add_argument(
'--data_version',
required=True,
default=1,
type=int,
choices=[1, 2],
help='Version of the speech commands dataset to download',
)
parser.add_argument(
'--class_split', default='all', choices=['all', 'sub'], help='Whether to consider all classes or only a subset'
)
parser.add_argument('--num_processes', default=1, type=int, help='Number of processes')
parser.add_argument('--rebalance', action='store_true', help='Rebalance the number of samples in each class')
parser.add_argument('--skip_duration', action='store_true', help='Skip computing duration of audio files')
parser.add_argument('--log', action='store_true', help='Generate logs')
args = parser.parse_args()
if args.log:
logging.basicConfig(level=logging.DEBUG)
data_root = args.data_root
data_set = f'google_speech_recognition_v{args.data_version}'
data_folder = os.path.join(data_root, data_set)
logging.info(f'Working on: {data_set}')
URL = URL_v1 if args.data_version == 1 else URL_v2
# Download and extract
if not os.path.exists(data_folder):
file_path = os.path.join(data_root, data_set + '.tar.bz2')
logging.info(f'Getting {data_set}')
__maybe_download_file(file_path, URL)
logging.info(f'Extracting {data_set}')
__extract_all_files(file_path, data_folder)
logging.info(f'Processing {data_set}')
__process_data(
data_folder,
data_folder,
num_processes=args.num_processes,
rebalance=args.rebalance,
class_split=args.class_split,
skip_duration=args.skip_duration,
)
logging.info('Done!')
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/process_speech_commands_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# downloads the training/eval set for VoxConverse.
import argparse
import logging
import os
import zipfile
from pathlib import Path
import wget
from nemo.collections.asr.parts.utils.manifest_utils import create_manifest
dev_url = "https://www.robots.ox.ac.uk/~vgg/data/voxconverse/data/voxconverse_dev_wav.zip"
test_url = "https://www.robots.ox.ac.uk/~vgg/data/voxconverse/data/voxconverse_test_wav.zip"
rttm_annotations_url = "https://github.com/joonson/voxconverse/archive/refs/heads/master.zip"
def extract_file(filepath: Path, data_dir: Path):
try:
with zipfile.ZipFile(str(filepath), 'r') as zip_ref:
zip_ref.extractall(str(data_dir))
except Exception:
logging.info("Not extracting. Maybe already there?")
def _generate_manifest(data_root: Path, audio_path: Path, rttm_path: Path, manifest_output_path: Path):
audio_list = str(data_root / 'audio_file.txt')
rttm_list = str(data_root / 'rttm_file.txt')
with open(audio_list, 'w') as f:
f.write('\n'.join([str(os.path.join(rttm_path, x)) for x in os.listdir(audio_path)]))
with open(rttm_list, 'w') as f:
f.write('\n'.join([str(os.path.join(rttm_path, x)) for x in os.listdir(rttm_path)]))
create_manifest(
audio_list, str(manifest_output_path), rttm_path=rttm_list,
)
def main():
parser = argparse.ArgumentParser(description="VoxConverse Data download")
parser.add_argument("--data_root", default='./', type=str)
args = parser.parse_args()
data_root = Path(args.data_root)
data_root.mkdir(exist_ok=True, parents=True)
test_path = data_root / os.path.basename(test_url)
dev_path = data_root / os.path.basename(dev_url)
rttm_path = data_root / os.path.basename(rttm_annotations_url)
if not os.path.exists(test_path):
test_path = wget.download(test_url, str(data_root))
if not os.path.exists(dev_path):
dev_path = wget.download(dev_url, str(data_root))
if not os.path.exists(rttm_path):
rttm_path = wget.download(rttm_annotations_url, str(data_root))
extract_file(test_path, data_root / 'test/')
extract_file(dev_path, data_root / 'dev/')
extract_file(rttm_path, data_root)
_generate_manifest(
data_root=data_root,
audio_path=os.path.abspath(data_root / 'test/voxconverse_test_wav/'),
rttm_path=os.path.abspath(data_root / 'voxconverse-master/test/'),
manifest_output_path=data_root / 'test_manifest.json',
)
_generate_manifest(
data_root=data_root,
audio_path=os.path.abspath(data_root / 'dev/audio/'),
rttm_path=os.path.abspath(data_root / 'voxconverse-master/dev/'),
manifest_output_path=data_root / 'dev_manifest.json',
)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/speaker_tasks/get_voxconverse.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# USAGE: python get_hi-mia_data.py --data_root=<where to put data>
import argparse
import json
import logging as _logging
import os
import tarfile
import urllib.request
from glob import glob
import librosa as l
from sklearn.model_selection import StratifiedShuffleSplit
from tqdm import tqdm
parser = argparse.ArgumentParser(description="HI-MIA Data download")
parser.add_argument("--data_root", required=True, default=None, type=str)
parser.add_argument("--log_level", default=20, type=int)
args = parser.parse_args()
logging = _logging.getLogger(__name__)
logging.addHandler(_logging.StreamHandler())
logging.setLevel(args.log_level)
URL = {
"dev": "http://www.openslr.org/resources/85/dev.tar.gz",
"test": "http://www.openslr.org/resources/85/test.tar.gz",
"train": "http://www.openslr.org/resources/85/train.tar.gz",
}
def __retrieve_with_progress(source: str, filename: str):
"""
Downloads source to destination
Displays progress bar
Args:
source: url of resource
destination: local filepath
Returns:
"""
with open(filename, "wb") as f:
response = urllib.request.urlopen(source)
total = response.length
if total is None:
f.write(response.content)
else:
with tqdm(total=total, unit="B", unit_scale=True, unit_divisor=1024) as pbar:
for data in response:
f.write(data)
pbar.update(len(data))
def __maybe_download_file(destination: str, source: str):
"""
Downloads source to destination if it doesn't exist.
If exists, skips download
Args:
destination: local filepath
source: url of resource
Returns:
"""
source = URL[source]
if not os.path.exists(destination) and not os.path.exists(os.path.splitext(destination)[0]):
logging.info("{0} does not exist. Downloading ...".format(destination))
__retrieve_with_progress(source, filename=destination + ".tmp")
os.rename(destination + ".tmp", destination)
logging.info("Downloaded {0}.".format(destination))
elif os.path.exists(destination):
logging.info("Destination {0} exists. Skipping.".format(destination))
elif os.path.exists(os.path.splitext(destination)[0]):
logging.warning(
"Assuming extracted folder %s contains the extracted files from %s. Will not download.",
os.path.basename(destination),
destination,
)
return destination
def __extract_all_files(filepath: str, data_root: str, data_dir: str):
if not os.path.exists(data_dir):
extract_file(filepath, data_root)
audio_dir = os.path.join(data_dir, "wav")
for subfolder, _, filelist in os.walk(audio_dir):
for ftar in filelist:
extract_file(os.path.join(subfolder, ftar), subfolder)
else:
logging.info("Skipping extracting. Data already there %s" % data_dir)
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath, encoding='utf-8')
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info("Not extracting. Maybe already there?")
def __remove_tarred_files(filepath: str, data_dir: str):
if os.path.exists(data_dir) and os.path.isfile(filepath):
logging.info("Deleting %s" % filepath)
os.remove(filepath)
def write_file(name, lines, idx):
with open(name, "w") as fout:
for i in idx:
dic = lines[i]
json.dump(dic, fout)
fout.write("\n")
logging.info("wrote %s", name)
def __process_data(data_folder: str, data_set: str):
"""
To generate manifest
Args:
data_folder: source with wav files
Returns:
"""
fullpath = os.path.abspath(data_folder)
filelist = glob(fullpath + "/**/*.wav", recursive=True)
out = os.path.join(fullpath, data_set + "_all.json")
utt2spk = os.path.join(fullpath, "utt2spk")
utt2spk_file = open(utt2spk, "w")
id = -2 # speaker id
if os.path.exists(out):
logging.warning(
"%s already exists and is assumed to be processed. If not, please delete %s and rerun this script",
out,
out,
)
return
speakers = []
lines = []
with open(out, "w") as outfile:
for line in tqdm(filelist):
line = line.strip()
y, sr = l.load(line, sr=None)
if sr != 16000:
y, sr = l.load(line, sr=16000)
l.output.write_wav(line, y, sr)
dur = l.get_duration(y=y, sr=sr)
if data_set == "test":
speaker = line.split("/")[-1].split(".")[0].split("_")[0]
else:
speaker = line.split("/")[id]
speaker = list(speaker)
speaker = "".join(speaker)
speakers.append(speaker)
meta = {"audio_filepath": line, "duration": float(dur), "label": speaker}
lines.append(meta)
json.dump(meta, outfile)
outfile.write("\n")
utt2spk_file.write(line.split("/")[-1] + "\t" + speaker + "\n")
utt2spk_file.close()
if data_set != "test":
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.1, random_state=42)
for train_idx, test_idx in sss.split(speakers, speakers):
print(len(train_idx))
out = os.path.join(fullpath, "train.json")
write_file(out, lines, train_idx)
out = os.path.join(fullpath, "dev.json")
write_file(out, lines, test_idx)
def main():
data_root = args.data_root
for data_set in URL.keys():
# data_set = 'data_aishell'
logging.info("\n\nWorking on: {0}".format(data_set))
file_path = os.path.join(data_root, data_set + ".tgz")
logging.info("Getting {0}".format(data_set))
__maybe_download_file(file_path, data_set)
logging.info("Extracting {0}".format(data_set))
data_folder = os.path.join(data_root, data_set)
__extract_all_files(file_path, data_root, data_folder)
__remove_tarred_files(file_path, data_folder)
logging.info("Processing {0}".format(data_set))
__process_data(data_folder, data_set)
logging.info("Done!")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/speaker_tasks/get_hi-mia_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Download the AMI test dataset used to evaluate Speaker Diarization
# More information here: https://groups.inf.ed.ac.uk/ami/corpus/
# USAGE: python get_ami_data.py
import argparse
import os
from nemo.collections.asr.parts.utils.manifest_utils import create_manifest
rttm_url = "https://raw.githubusercontent.com/BUTSpeechFIT/AMI-diarization-setup/main/only_words/rttms/{}/{}.rttm"
uem_url = "https://raw.githubusercontent.com/BUTSpeechFIT/AMI-diarization-setup/main/uems/{}/{}.uem"
list_url = "https://raw.githubusercontent.com/BUTSpeechFIT/AMI-diarization-setup/main/lists/{}.meetings.txt"
audio_types = ['Mix-Headset', 'Array1-01']
# these two IDs in the train set are missing download links for Array1-01.
# We exclude them as a result.
not_found_ids = ['IS1007d', 'IS1003b']
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Download the AMI Corpus Dataset for Speaker Diarization")
parser.add_argument(
"--test_manifest_filepath",
help="path to output test manifest file",
type=str,
default='AMI_test_manifest.json',
)
parser.add_argument(
"--dev_manifest_filepath", help="path to output dev manifest file", type=str, default='AMI_dev_manifest.json',
)
parser.add_argument(
"--train_manifest_filepath",
help="path to output train manifest file",
type=str,
default='AMI_train_manifest.json',
)
parser.add_argument("--data_root", help="path to output data directory", type=str, default="ami_dataset")
args = parser.parse_args()
data_path = os.path.abspath(args.data_root)
os.makedirs(data_path, exist_ok=True)
for manifest_path, split in (
(args.test_manifest_filepath, 'test'),
(args.dev_manifest_filepath, 'dev'),
(args.train_manifest_filepath, 'train'),
):
split_path = os.path.join(data_path, split)
audio_path = os.path.join(split_path, "audio")
os.makedirs(split_path, exist_ok=True)
rttm_path = os.path.join(split_path, "rttm")
uem_path = os.path.join(split_path, "uem")
os.system(f"wget -P {split_path} {list_url.format(split)}")
with open(os.path.join(split_path, f"{split}.meetings.txt")) as f:
ids = f.read().strip().split('\n')
for id in [file_id for file_id in ids if file_id not in not_found_ids]:
for audio_type in audio_types:
audio_type_path = os.path.join(audio_path, audio_type)
os.makedirs(audio_type_path, exist_ok=True)
os.system(
f"wget -P {audio_type_path} https://groups.inf.ed.ac.uk/ami/AMICorpusMirror//amicorpus/{id}/audio/{id}.{audio_type}.wav"
)
rttm_download = rttm_url.format(split, id)
os.system(f"wget -P {rttm_path} {rttm_download}")
uem_download = uem_url.format(split, id)
os.system(f"wget -P {uem_path} {uem_download}")
rttm_files_path = os.path.join(split_path, 'rttm_files.txt')
with open(rttm_files_path, 'w') as f:
f.write('\n'.join(os.path.join(rttm_path, p) for p in os.listdir(rttm_path)))
uem_files_path = os.path.join(split_path, 'uem_files.txt')
with open(uem_files_path, 'w') as f:
f.write('\n'.join(os.path.join(uem_path, p) for p in os.listdir(uem_path)))
for audio_type in audio_types:
audio_type_path = os.path.join(audio_path, audio_type)
audio_files_path = os.path.join(split_path, f'audio_files_{audio_type}.txt')
with open(audio_files_path, 'w') as f:
f.write('\n'.join(os.path.join(audio_type_path, p) for p in os.listdir(audio_type_path)))
audio_type_manifest_path = manifest_path.replace('.json', f'.{audio_type}.json')
create_manifest(
audio_files_path, audio_type_manifest_path, rttm_path=rttm_files_path, uem_path=uem_files_path
)
| NeMo-main | scripts/dataset_processing/speaker_tasks/get_ami_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# downloads the training/eval set for AISHELL Diarization.
# the training dataset is around 170GiB, to skip pass the --skip_train flag.
import argparse
import glob
import logging
import os
import tarfile
from pathlib import Path
import wget
from sox import Transformer
from nemo.collections.asr.parts.utils.manifest_utils import create_manifest
train_url = "https://www.openslr.org/resources/111/train_{}.tar.gz"
train_datasets = ["S", "M", "L"]
eval_url = "https://www.openslr.org/resources/111/test.tar.gz"
def extract_file(filepath: str, data_dir: str):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
logging.info("Not extracting. Maybe already there?")
def __process_data(dataset_url: str, dataset_path: Path, manifest_output_path: Path):
os.makedirs(dataset_path, exist_ok=True)
tar_file_path = os.path.join(dataset_path, os.path.basename(dataset_url))
if not os.path.exists(tar_file_path):
wget.download(dataset_url, tar_file_path)
extract_file(tar_file_path, str(dataset_path))
wav_path = dataset_path / 'converted_wav/'
extracted_dir = Path(tar_file_path).stem.replace('.tar', '')
flac_path = dataset_path / (extracted_dir + '/wav/')
__process_flac_audio(flac_path, wav_path)
audio_files = [os.path.join(os.path.abspath(wav_path), file) for file in os.listdir(str(wav_path))]
rttm_files = glob.glob(str(dataset_path / (extracted_dir + '/TextGrid/*.rttm')))
rttm_files = [os.path.abspath(file) for file in rttm_files]
audio_list = dataset_path / 'audio_files.txt'
rttm_list = dataset_path / 'rttm_files.txt'
with open(audio_list, 'w') as f:
f.write('\n'.join(audio_files))
with open(rttm_list, 'w') as f:
f.write('\n'.join(rttm_files))
create_manifest(
str(audio_list), manifest_output_path, rttm_path=str(rttm_list),
)
def __process_flac_audio(flac_path, wav_path):
os.makedirs(wav_path, exist_ok=True)
flac_files = os.listdir(flac_path)
for flac_file in flac_files:
# Convert FLAC file to WAV
id = Path(flac_file).stem
wav_file = os.path.join(wav_path, id + ".wav")
if not os.path.exists(wav_file):
Transformer().build(os.path.join(flac_path, flac_file), wav_file)
def main():
parser = argparse.ArgumentParser(description="Aishell Data download")
parser.add_argument("--data_root", default='./', type=str)
parser.add_argument("--output_manifest_path", default='aishell_diar_manifest.json', type=str)
parser.add_argument("--skip_train", help="skip downloading the training dataset", action="store_true")
args = parser.parse_args()
data_root = Path(args.data_root)
data_root.mkdir(exist_ok=True, parents=True)
if not args.skip_train:
for tag in train_datasets:
dataset_url = train_url.format(tag)
dataset_path = data_root / f'{tag}/'
manifest_output_path = data_root / f'train_{tag}_manifest.json'
__process_data(
dataset_url=dataset_url, dataset_path=dataset_path, manifest_output_path=manifest_output_path
)
# create test dataset
dataset_path = data_root / f'eval/'
manifest_output_path = data_root / f'eval_manifest.json'
__process_data(dataset_url=eval_url, dataset_path=dataset_path, manifest_output_path=manifest_output_path)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/speaker_tasks/get_aishell_diarization_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from argparse import ArgumentParser
from collections import defaultdict
"""
Converts Chinese Polyphones with Pinyin (CPP) data to .json manifest format for Chinese HeteronymClassificationModel training.
Chinese dataset could be found here:
https://github.com/kakaobrain/g2pM#the-cpp-dataset
Usage
# prepare manifest
mkdir -p ./cpp_manifest
git clone https://github.com/kakaobrain/g2pM.git
python3 export_zh_cpp_data_to_manifest.py --data_folder g2pM/data/ --output_folder ./cpp_manifest
"""
def parse_args():
parser = ArgumentParser()
parser.add_argument('--data_folder', help="Path to data folder with the CPP data files", type=str, required=True)
parser.add_argument(
"--output_folder", help="Path to data folder with output .json file to store the data", type=str, required=True
)
return parser.parse_args()
def convert_cpp_data_to_manifest(data_folder: str, output_folder: str):
"""
Convert CPP data to .json manifest
Args:
data_folder: data_folder that contains data files
output_folder: path to output folder
"""
wordid_dict = defaultdict(set)
for key in ['train', 'dev', 'test']:
output_manifest = f"{output_folder}/{key}.json"
sent_file = f"{data_folder}/{key}.sent"
label_file = f"{data_folder}/{key}.lb"
with open(output_manifest, "w") as f_out, open(sent_file, 'r') as f_sent, open(label_file, 'r') as f_label:
lines_sent, lines_label = f_sent.readlines(), f_label.readlines()
lines_label = [line.strip('\n') for line in lines_label]
lines_idx = [line.index('\u2581') for line in lines_sent]
lines_sent = [line.strip('\n').replace('\u2581', '') for line in lines_sent]
for i, index in enumerate(lines_idx):
wordid_dict[lines_sent[i][index]].add(lines_label[i])
for i, sent in enumerate(lines_sent):
start, end = lines_idx[i], lines_idx[i] + 1
heteronym_span = sent[start:end]
entry = {
"text_graphemes": sent,
"start_end": [start, start + 1],
"heteronym_span": heteronym_span,
"word_id": f"{heteronym_span}_{lines_label[i]}",
}
f_out.write(json.dumps(entry, ensure_ascii=False) + "\n")
print(f"Data saved at {output_manifest}")
word_id_file = f"{output_folder}/wordid.tsv"
with open(word_id_file, 'w') as f_wordid:
f_wordid.write(f"homograph\twordid\tlabel\tpronunciation\n")
for key, pronunciations in wordid_dict.items():
for value in pronunciations:
f_wordid.write(f"{key}\t{key}_{value}\tNone\t{value}\n")
if __name__ == '__main__':
args = parse_args()
convert_cpp_data_to_manifest(args.data_folder, args.output_folder)
| NeMo-main | scripts/dataset_processing/g2p/export_zh_cpp_data_to_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2012-2013 Kyle Gorman <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# syllabify.py: prosodic parsing of ARPABET entries
# source: https://github.com/kylebgorman/syllabify
from itertools import chain
## constants
SLAX = {
"IH1",
"IH2",
"EH1",
"EH2",
"AE1",
"AE2",
"AH1",
"AH2",
"UH1",
"UH2",
}
VOWELS = {
"IY1",
"IY2",
"IY0",
"EY1",
"EY2",
"EY0",
"AA1",
"AA2",
"AA0",
"ER1",
"ER2",
"ER0",
"AW1",
"AW2",
"AW0",
"AO1",
"AO2",
"AO0",
"AY1",
"AY2",
"AY0",
"OW1",
"OW2",
"OW0",
"OY1",
"OY2",
"OY0",
"IH0",
"EH0",
"AE0",
"AH0",
"UH0",
"UW1",
"UW2",
"UW0",
"UW",
"IY",
"EY",
"AA",
"ER",
"AW",
"AO",
"AY",
"OW",
"OY",
"UH",
"IH",
"EH",
"AE",
"AH",
"UH",
} | SLAX
## licit medial onsets
O2 = {
("P", "R"),
("T", "R"),
("K", "R"),
("B", "R"),
("D", "R"),
("G", "R"),
("F", "R"),
("TH", "R"),
("P", "L"),
("K", "L"),
("B", "L"),
("G", "L"),
("F", "L"),
("S", "L"),
("K", "W"),
("G", "W"),
("S", "W"),
("S", "P"),
("S", "T"),
("S", "K"),
("HH", "Y"), # "clerihew"
("R", "W"),
}
O3 = {("S", "T", "R"), ("S", "K", "L"), ("T", "R", "W")} # "octroi"
# This does not represent anything like a complete list of onsets, but
# merely those that need to be maximized in medial position.
def syllabify(pron, alaska_rule=True):
"""
Syllabifies a CMU dictionary (ARPABET) word string
# Alaska rule:
>>> pprint(syllabify('AH0 L AE1 S K AH0'.split())) # Alaska
'-AH0-.L-AE1-S.K-AH0-'
>>> pprint(syllabify('AH0 L AE1 S K AH0'.split(), 0)) # Alaska
'-AH0-.L-AE1-.S K-AH0-'
# huge medial onsets:
>>> pprint(syllabify('M IH1 N S T R AH0 L'.split())) # minstrel
'M-IH1-N.S T R-AH0-L'
>>> pprint(syllabify('AA1 K T R W AA0 R'.split())) # octroi
'-AA1-K.T R W-AA0-R'
# destressing
>>> pprint(destress(syllabify('M IH1 L AH0 T EH2 R IY0'.split())))
'M-IH-.L-AH-.T-EH-.R-IY-'
# normal treatment of 'j':
>>> pprint(syllabify('M EH1 N Y UW0'.split())) # menu
'M-EH1-N.Y-UW0-'
>>> pprint(syllabify('S P AE1 N Y AH0 L'.split())) # spaniel
'S P-AE1-N.Y-AH0-L'
>>> pprint(syllabify('K AE1 N Y AH0 N'.split())) # canyon
'K-AE1-N.Y-AH0-N'
>>> pprint(syllabify('M IH0 N Y UW2 EH1 T'.split())) # minuet
'M-IH0-N.Y-UW2-.-EH1-T'
>>> pprint(syllabify('JH UW1 N Y ER0'.split())) # junior
'JH-UW1-N.Y-ER0-'
>>> pprint(syllabify('K L EH R IH HH Y UW'.split())) # clerihew
'K L-EH-.R-IH-.HH Y-UW-'
# nuclear treatment of 'j'
>>> pprint(syllabify('R EH1 S K Y UW0'.split())) # rescue
'R-EH1-S.K-Y UW0-'
>>> pprint(syllabify('T R IH1 B Y UW0 T'.split())) # tribute
'T R-IH1-B.Y-UW0-T'
>>> pprint(syllabify('N EH1 B Y AH0 L AH0'.split())) # nebula
'N-EH1-B.Y-AH0-.L-AH0-'
>>> pprint(syllabify('S P AE1 CH UH0 L AH0'.split())) # spatula
'S P-AE1-.CH-UH0-.L-AH0-'
>>> pprint(syllabify('AH0 K Y UW1 M AH0 N'.split())) # acumen
'-AH0-K.Y-UW1-.M-AH0-N'
>>> pprint(syllabify('S AH1 K Y AH0 L IH0 N T'.split())) # succulent
'S-AH1-K.Y-AH0-.L-IH0-N T'
>>> pprint(syllabify('F AO1 R M Y AH0 L AH0'.split())) # formula
'F-AO1 R-M.Y-AH0-.L-AH0-'
>>> pprint(syllabify('V AE1 L Y UW0'.split())) # value
'V-AE1-L.Y-UW0-'
# everything else
>>> pprint(syllabify('N AO0 S T AE1 L JH IH0 K'.split())) # nostalgic
'N-AO0-.S T-AE1-L.JH-IH0-K'
>>> pprint(syllabify('CH ER1 CH M AH0 N'.split())) # churchmen
'CH-ER1-CH.M-AH0-N'
>>> pprint(syllabify('K AA1 M P AH0 N S EY2 T'.split())) # compensate
'K-AA1-M.P-AH0-N.S-EY2-T'
>>> pprint(syllabify('IH0 N S EH1 N S'.split())) # inCENSE
'-IH0-N.S-EH1-N S'
>>> pprint(syllabify('IH1 N S EH2 N S'.split())) # INcense
'-IH1-N.S-EH2-N S'
>>> pprint(syllabify('AH0 S EH1 N D'.split())) # ascend
'-AH0-.S-EH1-N D'
>>> pprint(syllabify('R OW1 T EY2 T'.split())) # rotate
'R-OW1-.T-EY2-T'
>>> pprint(syllabify('AA1 R T AH0 S T'.split())) # artist
'-AA1 R-.T-AH0-S T'
>>> pprint(syllabify('AE1 K T ER0'.split())) # actor
'-AE1-K.T-ER0-'
>>> pprint(syllabify('P L AE1 S T ER0'.split())) # plaster
'P L-AE1-S.T-ER0-'
>>> pprint(syllabify('B AH1 T ER0'.split())) # butter
'B-AH1-.T-ER0-'
>>> pprint(syllabify('K AE1 M AH0 L'.split())) # camel
'K-AE1-.M-AH0-L'
>>> pprint(syllabify('AH1 P ER0'.split())) # upper
'-AH1-.P-ER0-'
>>> pprint(syllabify('B AH0 L UW1 N'.split())) # balloon
'B-AH0-.L-UW1-N'
>>> pprint(syllabify('P R OW0 K L EY1 M'.split())) # proclaim
'P R-OW0-.K L-EY1-M'
>>> pprint(syllabify('IH0 N S EY1 N'.split())) # insane
'-IH0-N.S-EY1-N'
>>> pprint(syllabify('IH0 K S K L UW1 D'.split())) # exclude
'-IH0-K.S K L-UW1-D'
"""
## main pass
mypron = list(pron)
nuclei = []
onsets = []
i = -1
for (j, seg) in enumerate(mypron):
if seg in VOWELS:
nuclei.append([seg])
onsets.append(mypron[i + 1 : j]) # actually interludes, r.n.
i = j
codas = [mypron[i + 1 :]]
## resolve disputes and compute coda
for i in range(1, len(onsets)):
coda = []
# boundary cases
if len(onsets[i]) > 1 and onsets[i][0] == "R":
nuclei[i - 1].append(onsets[i].pop(0))
if len(onsets[i]) > 2 and onsets[i][-1] == "Y":
nuclei[i].insert(0, onsets[i].pop())
if len(onsets[i]) > 1 and alaska_rule and nuclei[i - 1][-1] in SLAX and onsets[i][0] == "S":
coda.append(onsets[i].pop(0))
# onset maximization
depth = 1
if len(onsets[i]) > 1:
if tuple(onsets[i][-2:]) in O2:
depth = 3 if tuple(onsets[i][-3:]) in O3 else 2
for j in range(len(onsets[i]) - depth):
coda.append(onsets[i].pop(0))
# store coda
codas.insert(i - 1, coda)
## verify that all segments are included in the ouput
output = list(zip(onsets, nuclei, codas)) # in Python3 zip is a generator
flat_output = list(chain.from_iterable(chain.from_iterable(output)))
if flat_output != mypron:
raise ValueError(f"could not syllabify {mypron}, got {flat_output}")
return output
def pprint(syllab):
"""
Pretty-print a syllabification
"""
return ".".join("-".join(" ".join(p) for p in syl) for syl in syllab)
def destress(syllab):
"""
Generate a syllabification with nuclear stress information removed
"""
syls = []
for (onset, nucleus, coda) in syllab:
nuke = [p[:-1] if p[-1] in {"0", "1", "2"} else p for p in nucleus]
syls.append((onset, nuke, coda))
return syls
if __name__ == "__main__":
import doctest
doctest.testmod()
| NeMo-main | scripts/dataset_processing/g2p/syllabify.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os
from argparse import ArgumentParser
from typing import Dict
from syllabify import syllabify
"""
Usage:
cd NeMo/scripts && python dataset_processing/g2p/convert_cmu_arpabet_to_ipa.py
"""
def parse_args():
parser = ArgumentParser("ARPABET to IPA conversion sctipt")
parser.add_argument(
'--cmu_arpabet',
help="Path to CMU ARPABET dictionary file",
type=str,
default="tts_dataset_files/cmudict-0.7b_nv22.10",
)
parser.add_argument("--ipa_out", help="Path to save IPA version of the dictionary", type=str, required=True)
parser.add_argument(
"--mapping",
help="ARPABET to IPA phoneme mapping file",
type=str,
default="tts_dataset_files/cmudict-arpabet_to_ipa_nv22.10.tsv",
)
return parser.parse_args()
def convert_arp_to_ipa(arp_to_ipa_dict: Dict[str, str], arp_input: str, remove_space: bool = False) -> str:
"""
Converts ARPABET phoneme to IPA based on arp_to_ipa_dict mapping
Args:
arp_to_ipa_dict: ARPABET to IPA phonemes mapping
arp_input: ARPABET input
remove_space: set to TRUE to remove spaces between IPA phonemes
Returns:
input word in IPA form
"""
primary_stress = "ˈ"
secondary_stress = "ˌ"
stress_dict = {"0": "", "1": primary_stress, "2": secondary_stress}
word_ipa = ""
phonemes = arp_input.split()
# split ARPABET phoneme input into syllables,
# e.g. syllabify(["HH", "AH0", "L", "OW1"]) -> [(['HH'], ['AH0'], []), (['L'], ['OW1'], [])]
syllables = syllabify(phonemes)
for syl_idx, syll in enumerate(syllables):
syll_stress = ""
syll_ipa = ""
# syll is a tuple of lists of phonemes, here we flatten it and get rid of empty entries,
# e.g. (['HH'], ['AH0'], []) -> ['HH', 'AH0']
syll = [x for x in itertools.chain.from_iterable(syll)]
for phon_idx, phon in enumerate(syll):
if phon[-1].isdigit():
syll_stress = phon[-1]
if syll_stress not in stress_dict:
raise ValueError(f"{syll_stress} unknown")
syll_stress = stress_dict[syll_stress]
# some phonemes are followed by a digit that represents stress, e.g., `AH0`
if phon not in arp_to_ipa_dict and phon[-1].isdigit():
phon = phon[:-1]
if phon not in arp_to_ipa_dict:
raise ValueError(f"|{phon}| phoneme not found in |{arp_input}|")
else:
ipa_phone = arp_to_ipa_dict[phon]
syll_ipa += ipa_phone + " "
word_ipa += " " + syll_stress + syll_ipa.strip()
word_ipa = word_ipa.strip()
if remove_space:
word_ipa = word_ipa.replace(" ", "")
return word_ipa
def _get_arpabet_to_ipa_mapping(arp_ipa_map_file: str) -> Dict[str, str]:
"""
arp_ipa_map_file: Arpabet to IPA phonemes mapping
"""
arp_to_ipa = {}
with open(arp_ipa_map_file, "r", encoding="utf-8") as f:
for line in f:
arp, ipa = line.strip().split("\t")
arp_to_ipa[arp] = ipa
return arp_to_ipa
def convert_cmu_arpabet_to_ipa(arp_ipa_map_file: str, arp_dict_file: str, output_ipa_file: str):
"""
Converts CMU ARPABET-based dictionary to IPA.
Args:
arp_ipa_map_file: ARPABET to IPA phoneme mapping file
arp_dict_file: path to ARPABET version of CMU dictionary
output_ipa_file: path to output IPA version of CMU dictionary
"""
arp_to_ipa_dict = _get_arpabet_to_ipa_mapping(arp_ipa_map_file)
with open(arp_dict_file, "r", encoding="utf-8") as f_arp, open(output_ipa_file, "w", encoding="utf-8") as f_ipa:
for line in f_arp:
if line.startswith(";;;"):
f_ipa.write(line)
else:
# First, split the line at " #" if there are comments in the dictionary file following the mapping entries.
# Next, split at default " " separator.
graphemes, phonemes = line.split(" #")[0].strip().split(" ")
ipa_form = convert_arp_to_ipa(arp_to_ipa_dict, phonemes, remove_space=True)
f_ipa.write(f"{graphemes} {ipa_form}\n")
print(f"IPA version of {os.path.abspath(arp_dict_file)} saved in {os.path.abspath(output_ipa_file)}")
if __name__ == "__main__":
args = parse_args()
convert_cmu_arpabet_to_ipa(args.mapping, args.cmu_arpabet, args.ipa_out)
| NeMo-main | scripts/dataset_processing/g2p/convert_cmu_arpabet_to_ipa.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
from argparse import ArgumentParser
from glob import glob
from typing import List, Tuple
from tqdm import tqdm
"""
Converts WikiHomograph data to .json manifest format for HeteronymClassificationModel training.
WikiHomograph dataset could be found here:
https://github.com/google-research-datasets/WikipediaHomographData
"""
def parse_args():
parser = ArgumentParser()
parser.add_argument('--data_folder', help="Path to data folder with .tsv files", type=str, required=True)
parser.add_argument("--output", help="Path to output .json file to store the data", type=str, required=True)
return parser.parse_args()
def read_wikihomograph_file(file: str) -> Tuple[List[str], List[List[int]], List[str], List[str]]:
"""
Reads .tsv file from WikiHomograph dataset,
e.g. https://github.com/google-research-datasets/WikipediaHomographData/blob/master/data/eval/live.tsv
Args:
file: path to .tsv file
Returns:
sentences: Text.
start_end_indices: Start and end indices of the homograph in the sentence.
heteronyms: Target heteronyms for each sentence
word_ids: Word_ids corresponding to each heteronym, i.e. label.
"""
excluded_sentences = 0
sentences = []
start_end_indices = []
heteronyms = []
word_ids = []
with open(file, "r", encoding="utf-8") as f:
tsv_file = csv.reader(f, delimiter="\t")
for i, line in enumerate(tsv_file):
if i == 0:
continue
heteronym, wordid, sentence, start, end = line
start, end = int(start), int(end)
sentence, start, end = correct_wikihomograph_data(sentence, start, end)
heteronym_span = sentence[start:end]
if heteronym_span.lower() != heteronym:
if sentence.lower().count(heteronym) == 1:
start = sentence.lower().index(heteronym)
end = start + len(heteronym)
heteronym_span = sentence[start:end].lower()
assert heteronym == heteronym_span.lower()
else:
excluded_sentences += 1
raise ValueError(f"heteronym {heteronym} != heteronym_span {heteronym_span} in {sentence}")
heteronyms.append(heteronym)
start_end_indices.append([start, end])
sentences.append(sentence)
word_ids.append(wordid)
return sentences, start_end_indices, heteronyms, word_ids
def correct_wikihomograph_data(sentence: str, start: int = None, end: int = None):
"""
Correct indices for WikiHomograph data
Args:
sentence: sentence
start: start index of homograph
end: end index of homograph
"""
corrections = {
"It is traditionally composed of 85–99% tin, mixed with copper, antimony, bismuth, and sometimes lead, although the use of lead is less common today.": [
96,
100,
],
"B₁₀₅ can be conceptually divided into a B₄₈ fragment and B₂₈-B-B₂₈ (B₅₇) fragment.": [44, 52],
"Pierrefonds Airport on Réunion recorded just 18 mm (0.71 in) of rainfall from November to January, a record minimum.": [
101,
107,
],
"Consort Chen Farong (陳法容) was an imperial consort during the Chinese dynasty Liu Song.": [42, 49],
"Unlike TiO₂, which features six-coordinate Ti in all phases, monoclinic zirconia consists of seven-coordinate zirconium centres.": [
32,
42,
],
"Its area is 16 km², its approximate length is 10 km, and its approximate width is 3 km.": [24, 35],
"The conjugate momentum to X has the expressionwhere the pᵢ are the momentum functions conjugate to the coordinates.": [
86,
95,
],
"Furthermore 17β-HSD1 levels positively correlate with E2 and negatively correlate with DHT levels in breast cancer cells.": [
39,
48,
],
"Electric car buyers get a €4,000 (US$4,520) discount while buyers of plug-in hybrid vehicles get a discount of €3,000 (US$3,390).": [
99,
107,
],
}
if sentence in corrections:
start, end = corrections[sentence]
sentence = sentence.replace("2014Coordinate", "2014 Coordinate") # for normalized data for G2P OOV models
sentence = sentence.replace("AAA", "triple A") # for normalized data for G2P OOV models
return sentence, start, end
def convert_wikihomograph_data_to_manifest(data_folder: str, output_manifest: str):
"""
Convert WikiHomograph data to .json manifest
Args:
data_folder: data_folder that contains .tsv files
output_manifest: path to output file
"""
with open(output_manifest, "w") as f_out:
for file in tqdm(glob(f"{data_folder}/*.tsv")):
sentences, start_end_indices, heteronyms, word_ids = read_wikihomograph_file(file)
for i, sent in enumerate(sentences):
start, end = start_end_indices[i]
heteronym_span = sent[start:end]
entry = {
"text_graphemes": sent,
"start_end": [start, end],
"heteronym_span": heteronym_span,
"word_id": word_ids[i],
}
f_out.write(json.dumps(entry, ensure_ascii=False) + "\n")
print(f"Data saved at {output_manifest}")
if __name__ == '__main__':
args = parse_args()
convert_wikihomograph_data_to_manifest(args.data_folder, args.output)
| NeMo-main | scripts/dataset_processing/g2p/export_wikihomograph_data_to_manifest.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from hydra.utils import instantiate
from tqdm import tqdm
from nemo.core.config import hydra_runner
def get_pitch_stats(pitch_list):
pitch_tensor = torch.cat(pitch_list)
pitch_mean, pitch_std = pitch_tensor.mean().item(), pitch_tensor.std().item()
pitch_min, pitch_max = pitch_tensor.min().item(), pitch_tensor.max().item()
print(f"PITCH_MEAN={pitch_mean}, PITCH_STD={pitch_std}")
print(f"PITCH_MIN={pitch_min}, PITCH_MAX={pitch_max}")
def preprocess_ds_for_fastpitch_align(dataloader):
pitch_list = []
for batch in tqdm(dataloader, total=len(dataloader)):
audios, audio_lengths, tokens, tokens_lengths, align_prior_matrices, pitches, pitches_lengths, *_ = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
get_pitch_stats(pitch_list)
def preprocess_ds_for_mixer_tts_x(dataloader):
pitch_list = []
for batch in tqdm(dataloader, total=len(dataloader)):
(
audios,
audio_lengths,
tokens,
tokens_lengths,
align_prior_matrices,
pitches,
pitches_lengths,
lm_tokens,
) = batch
pitch = pitches.squeeze(0)
pitch_list.append(pitch[pitch != 0])
get_pitch_stats(pitch_list)
CFG_NAME2FUNC = {
"ds_for_fastpitch_align": preprocess_ds_for_fastpitch_align,
"ds_for_mixer_tts": preprocess_ds_for_fastpitch_align,
"ds_for_mixer_tts_x": preprocess_ds_for_mixer_tts_x,
}
@hydra_runner(config_path='ljspeech/ds_conf', config_name='ds_for_fastpitch_align')
def main(cfg):
dataset = instantiate(cfg.dataset)
dataloader = torch.utils.data.DataLoader(
dataset=dataset,
batch_size=1,
collate_fn=dataset._collate_fn,
num_workers=cfg.get("dataloader_params", {}).get("num_workers", 4),
)
print(f"Processing {cfg.manifest_filepath}:")
CFG_NAME2FUNC[cfg.name](dataloader)
if __name__ == '__main__':
main() # noqa pylint: disable=no-value-for-parameter
| NeMo-main | scripts/dataset_processing/tts/extract_sup_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to preprocess audio before TTS model training.
It can be configured to do several processing steps such as silence trimming, volume normalization,
and duration filtering.
These can be done separately through multiple executions of the script, or all at once to avoid saving
too many copies of the same audio.
Most of these can also be done by the TTS data loader at training time, but doing them ahead of time
lets us implement more complex processing, validate the correctness of the output, and save on compute time.
$ python <nemo_root_path>/scripts/dataset_processing/tts/preprocess_audio.py \
--input_manifest="<data_root_path>/manifest.json" \
--output_manifest="<data_root_path>/manifest_processed.json" \
--input_audio_dir="<data_root_path>/audio" \
--output_audio_dir="<data_root_path>/audio_processed" \
--num_workers=1 \
--trim_config_path="<nemo_root_path>/examples/tts/conf/trim/energy.yaml" \
--output_sample_rate=22050 \
--output_format=flac \
--volume_level=0.95 \
--min_duration=0.5 \
--max_duration=20.0 \
--filter_file="filtered.txt"
"""
import argparse
import os
from pathlib import Path
from typing import Tuple
import librosa
import soundfile as sf
from hydra.utils import instantiate
from joblib import Parallel, delayed
from omegaconf import OmegaConf
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
from nemo.collections.tts.parts.preprocessing.audio_trimming import AudioTrimmer
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_abs_rel_paths, normalize_volume
from nemo.utils import logging
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute speaker level pitch statistics.",
)
parser.add_argument(
"--input_manifest", required=True, type=Path, help="Path to input training manifest.",
)
parser.add_argument(
"--input_audio_dir", required=True, type=Path, help="Path to base directory with audio files.",
)
parser.add_argument(
"--output_manifest", required=True, type=Path, help="Path to output training manifest with processed audio.",
)
parser.add_argument(
"--output_audio_dir", required=True, type=Path, help="Path to output directory for audio files.",
)
parser.add_argument(
"--overwrite_audio",
action=argparse.BooleanOptionalAction,
help="Whether to reprocess and overwrite existing audio files in output_audio_dir.",
)
parser.add_argument(
"--overwrite_manifest",
action=argparse.BooleanOptionalAction,
help="Whether to overwrite the output manifest file if it exists.",
)
parser.add_argument(
"--num_workers", default=1, type=int, help="Number of parallel threads to use. If -1 all CPUs are used."
)
parser.add_argument(
"--trim_config_path",
required=False,
type=Path,
help="Path to config file for nemo.collections.tts.data.audio_trimming.AudioTrimmer",
)
parser.add_argument(
"--max_entries", default=0, type=int, help="If provided, maximum number of entries in the manifest to process."
)
parser.add_argument(
"--output_sample_rate", default=0, type=int, help="If provided, rate to resample the audio to."
)
parser.add_argument(
"--output_format",
default="wav",
type=str,
help="If provided, format output audio will be saved as. If not provided, will keep original format.",
)
parser.add_argument(
"--volume_level", default=0.0, type=float, help="If provided, peak volume to normalize audio to."
)
parser.add_argument(
"--min_duration", default=0.0, type=float, help="If provided, filter out utterances shorter than min_duration."
)
parser.add_argument(
"--max_duration", default=0.0, type=float, help="If provided, filter out utterances longer than max_duration."
)
parser.add_argument(
"--filter_file",
required=False,
type=Path,
help="If provided, output filter_file will contain list of " "utterances filtered out.",
)
args = parser.parse_args()
return args
def _process_entry(
entry: dict,
input_audio_dir: Path,
output_audio_dir: Path,
overwrite_audio: bool,
audio_trimmer: AudioTrimmer,
output_sample_rate: int,
output_format: str,
volume_level: float,
) -> Tuple[dict, float, float]:
audio_filepath = Path(entry["audio_filepath"])
audio_path, audio_path_rel = get_abs_rel_paths(input_path=audio_filepath, base_path=input_audio_dir)
if not output_format:
output_format = audio_path.suffix
output_path = output_audio_dir / audio_path_rel
output_path = output_path.with_suffix(output_format)
output_path.parent.mkdir(exist_ok=True, parents=True)
if output_path.exists() and not overwrite_audio:
original_duration = librosa.get_duration(path=audio_path)
output_duration = librosa.get_duration(path=output_path)
else:
audio, sample_rate = librosa.load(audio_path, sr=None)
original_duration = librosa.get_duration(y=audio, sr=sample_rate)
if audio_trimmer is not None:
audio, start_i, end_i = audio_trimmer.trim_audio(
audio=audio, sample_rate=int(sample_rate), audio_id=str(audio_path)
)
if output_sample_rate:
audio = librosa.resample(y=audio, orig_sr=sample_rate, target_sr=output_sample_rate)
sample_rate = output_sample_rate
if volume_level:
audio = normalize_volume(audio, volume_level=volume_level)
if audio.size > 0:
sf.write(file=output_path, data=audio, samplerate=sample_rate)
output_duration = librosa.get_duration(y=audio, sr=sample_rate)
else:
output_duration = 0.0
entry["duration"] = round(output_duration, 2)
if os.path.isabs(audio_filepath):
entry["audio_filepath"] = str(output_path)
else:
output_filepath = audio_path_rel.with_suffix(output_format)
entry["audio_filepath"] = str(output_filepath)
return entry, original_duration, output_duration
def main():
args = get_args()
input_manifest_path = args.input_manifest
output_manifest_path = args.output_manifest
input_audio_dir = args.input_audio_dir
output_audio_dir = args.output_audio_dir
overwrite_audio = args.overwrite_audio
overwrite_manifest = args.overwrite_manifest
num_workers = args.num_workers
max_entries = args.max_entries
output_sample_rate = args.output_sample_rate
output_format = args.output_format
volume_level = args.volume_level
min_duration = args.min_duration
max_duration = args.max_duration
filter_file = args.filter_file
if output_manifest_path.exists():
if overwrite_manifest:
print(f"Will overwrite existing manifest path: {output_manifest_path}")
else:
raise ValueError(f"Manifest path already exists: {output_manifest_path}")
if args.trim_config_path:
audio_trimmer_config = OmegaConf.load(args.trim_config_path)
audio_trimmer = instantiate(audio_trimmer_config)
else:
audio_trimmer = None
if output_format:
if output_format.upper() not in sf.available_formats():
raise ValueError(f"Unsupported output audio format: {output_format}")
output_format = f".{output_format}"
output_audio_dir.mkdir(exist_ok=True, parents=True)
entries = read_manifest(input_manifest_path)
if max_entries:
entries = entries[:max_entries]
# 'threading' backend is required when parallelizing torch models.
job_outputs = Parallel(n_jobs=num_workers, backend='threading')(
delayed(_process_entry)(
entry=entry,
input_audio_dir=input_audio_dir,
output_audio_dir=output_audio_dir,
overwrite_audio=overwrite_audio,
audio_trimmer=audio_trimmer,
output_sample_rate=output_sample_rate,
output_format=output_format,
volume_level=volume_level,
)
for entry in tqdm(entries)
)
output_entries = []
filtered_entries = []
original_durations = 0.0
output_durations = 0.0
for output_entry, original_duration, output_duration in job_outputs:
original_durations += original_duration
if (
output_duration == 0.0
or (min_duration and output_duration < min_duration)
or (max_duration and output_duration > max_duration)
):
if output_duration != original_duration:
output_entry["original_duration"] = original_duration
filtered_entries.append(output_entry)
continue
output_durations += output_duration
output_entries.append(output_entry)
write_manifest(output_path=output_manifest_path, target_manifest=output_entries, ensure_ascii=False)
if filter_file:
write_manifest(output_path=str(filter_file), target_manifest=filtered_entries, ensure_ascii=False)
logging.info(f"Duration of original audio: {original_durations / 3600:.2f} hours")
logging.info(f"Duration of processed audio: {output_durations / 3600:.2f} hours")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/preprocess_audio.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is to generate mel spectrograms from a Fastpitch model checkpoint. Please see general usage below. It runs
on GPUs by default, but you can add `--num-workers 5 --cpu` as an option to run on CPUs.
$ python scripts/dataset_processing/tts/generate_mels.py \
--fastpitch-model-ckpt ./models/fastpitch/multi_spk/FastPitch--val_loss\=1.4473-epoch\=209.ckpt \
--input-json-manifests /home/xueyang/HUI-Audio-Corpus-German-clean/test_manifest_text_normed_phonemes.json
--output-json-manifest-root /home/xueyang/experiments/multi_spk_tts_de
"""
import argparse
import json
from pathlib import Path
import numpy as np
import soundfile as sf
import torch
from joblib import Parallel, delayed
from tqdm import tqdm
from nemo.collections.tts.models import FastPitchModel
from nemo.collections.tts.parts.utils.tts_dataset_utils import (
BetaBinomialInterpolator,
beta_binomial_prior_distribution,
)
from nemo.utils import logging
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Generate mel spectrograms with pretrained FastPitch model, and create manifests for finetuning Hifigan.",
)
parser.add_argument(
"--fastpitch-model-ckpt",
required=True,
type=Path,
help="Specify a full path of a fastpitch model checkpoint with the suffix of either .ckpt or .nemo.",
)
parser.add_argument(
"--input-json-manifests",
nargs="+",
required=True,
type=Path,
help="Specify a full path of a JSON manifest. You could add multiple manifests.",
)
parser.add_argument(
"--output-json-manifest-root",
required=True,
type=Path,
help="Specify a full path of output root that would contain new manifests.",
)
parser.add_argument(
"--num-workers",
default=-1,
type=int,
help="Specify the max number of concurrently Python workers processes. "
"If -1 all CPUs are used. If 1 no parallel computing is used.",
)
parser.add_argument("--cpu", action='store_true', default=False, help="Generate mel spectrograms using CPUs.")
args = parser.parse_args()
return args
def __load_wav(audio_file):
with sf.SoundFile(audio_file, 'r') as f:
samples = f.read(dtype='float32')
return samples.transpose()
def __generate_mels(entry, spec_model, device, use_beta_binomial_interpolator, mel_root):
# Generate a spectrograms (we need to use ground truth alignment for correct matching between audio and mels)
audio = __load_wav(entry["audio_filepath"])
audio = torch.from_numpy(audio).unsqueeze(0).to(device)
audio_len = torch.tensor(audio.shape[1], dtype=torch.long, device=device).unsqueeze(0)
if spec_model.fastpitch.speaker_emb is not None and "speaker" in entry:
speaker = torch.tensor([entry['speaker']]).to(device)
else:
speaker = None
with torch.no_grad():
if "normalized_text" in entry:
text = spec_model.parse(entry["normalized_text"], normalize=False)
else:
text = spec_model.parse(entry['text'])
text_len = torch.tensor(text.shape[-1], dtype=torch.long, device=device).unsqueeze(0)
spect, spect_len = spec_model.preprocessor(input_signal=audio, length=audio_len)
# Generate attention prior and spectrogram inputs for HiFi-GAN
if use_beta_binomial_interpolator:
beta_binomial_interpolator = BetaBinomialInterpolator()
attn_prior = (
torch.from_numpy(beta_binomial_interpolator(spect_len.item(), text_len.item()))
.unsqueeze(0)
.to(text.device)
)
else:
attn_prior = (
torch.from_numpy(beta_binomial_prior_distribution(text_len.item(), spect_len.item()))
.unsqueeze(0)
.to(text.device)
)
spectrogram = spec_model.forward(
text=text, input_lens=text_len, spec=spect, mel_lens=spect_len, attn_prior=attn_prior, speaker=speaker,
)[0]
save_path = mel_root / f"{Path(entry['audio_filepath']).stem}.npy"
np.save(save_path, spectrogram[0].to('cpu').numpy())
entry["mel_filepath"] = str(save_path)
return entry
def main():
args = get_args()
ckpt_path = args.fastpitch_model_ckpt
input_manifest_filepaths = args.input_json_manifests
output_json_manifest_root = args.output_json_manifest_root
mel_root = output_json_manifest_root / "mels"
mel_root.mkdir(exist_ok=True, parents=True)
# load pretrained FastPitch model checkpoint
suffix = ckpt_path.suffix
if suffix == ".nemo":
spec_model = FastPitchModel.restore_from(ckpt_path).eval()
elif suffix == ".ckpt":
spec_model = FastPitchModel.load_from_checkpoint(ckpt_path).eval()
else:
raise ValueError(f"Unsupported suffix: {suffix}")
if not args.cpu:
spec_model.cuda()
device = spec_model.device
use_beta_binomial_interpolator = spec_model.cfg.train_ds.dataset.get("use_beta_binomial_interpolator", False)
for manifest in input_manifest_filepaths:
logging.info(f"Processing {manifest}.")
entries = []
with open(manifest, "r") as fjson:
for line in fjson:
entries.append(json.loads(line.strip()))
if device == "cpu":
new_entries = Parallel(n_jobs=args.num_workers)(
delayed(__generate_mels)(entry, spec_model, device, use_beta_binomial_interpolator, mel_root)
for entry in entries
)
else:
new_entries = []
for entry in tqdm(entries):
new_entry = __generate_mels(entry, spec_model, device, use_beta_binomial_interpolator, mel_root)
new_entries.append(new_entry)
mel_manifest_path = output_json_manifest_root / f"{manifest.stem}_mel{manifest.suffix}"
with open(mel_manifest_path, "w") as fmel:
for entry in new_entries:
fmel.write(json.dumps(entry) + "\n")
logging.info(f"Processing {manifest} is complete --> {mel_manifest_path}")
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/generate_mels.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to preprocess text before TTS model training. This is needed mainly for text normalization,
which is slow to rerun during training.
The output manifest will be the same as the input manifest but with final text stored in the 'normalized_text' field.
$ python <nemo_root_path>/scripts/dataset_processing/tts/preprocess_text.py \
--input_manifest="<data_root_path>/manifest.json" \
--output_manifest="<data_root_path>/manifest_processed.json" \
--normalizer_config_path="<nemo_root_path>/examples/tts/conf/text/normalizer_en.yaml" \
--lower_case=True \
--num_workers=4 \
--batch_size=16
"""
import argparse
from pathlib import Path
from hydra.utils import instantiate
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.normalize import Normalizer
from omegaconf import OmegaConf
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Process and normalize text data.",
)
parser.add_argument(
"--input_manifest", required=True, type=Path, help="Path to input training manifest.",
)
parser.add_argument(
"--output_manifest", required=True, type=Path, help="Path to output training manifest with processed text.",
)
parser.add_argument(
"--overwrite",
action=argparse.BooleanOptionalAction,
help="Whether to overwrite the output manifest file if it exists.",
)
parser.add_argument(
"--text_key", default="text", type=str, help="Input text field to normalize.",
)
parser.add_argument(
"--normalized_text_key", default="normalized_text", type=str, help="Output field to save normalized text to.",
)
parser.add_argument(
"--lower_case", action=argparse.BooleanOptionalAction, help="Whether to convert the final text to lower case.",
)
parser.add_argument(
"--normalizer_config_path",
required=False,
type=Path,
help="Path to config file for nemo_text_processing.text_normalization.normalize.Normalizer.",
)
parser.add_argument(
"--num_workers", default=1, type=int, help="Number of parallel threads to use. If -1 all CPUs are used."
)
parser.add_argument(
"--joblib_batch_size", type=int, help="Batch size for joblib workers. Defaults to 'auto' if not provided."
)
parser.add_argument(
"--max_entries", default=0, type=int, help="If provided, maximum number of entries in the manifest to process."
)
args = parser.parse_args()
return args
def _process_entry(
entry: dict,
normalizer: Normalizer,
text_key: str,
normalized_text_key: str,
lower_case: bool,
lower_case_norm: bool,
) -> dict:
text = entry[text_key]
if normalizer is not None:
if lower_case_norm:
text = text.lower()
text = normalizer.normalize(text, punct_pre_process=True, punct_post_process=True)
if lower_case:
text = text.lower()
entry[normalized_text_key] = text
return entry
def main():
args = get_args()
input_manifest_path = args.input_manifest
output_manifest_path = args.output_manifest
text_key = args.text_key
normalized_text_key = args.normalized_text_key
lower_case = args.lower_case
num_workers = args.num_workers
batch_size = args.joblib_batch_size
max_entries = args.max_entries
overwrite = args.overwrite
if output_manifest_path.exists():
if overwrite:
print(f"Will overwrite existing manifest path: {output_manifest_path}")
else:
raise ValueError(f"Manifest path already exists: {output_manifest_path}")
if args.normalizer_config_path:
normalizer_config = OmegaConf.load(args.normalizer_config_path)
normalizer = instantiate(normalizer_config)
lower_case_norm = normalizer.input_case == "lower_cased"
else:
normalizer = None
lower_case_norm = False
entries = read_manifest(input_manifest_path)
if max_entries:
entries = entries[:max_entries]
if not batch_size:
batch_size = 'auto'
output_entries = Parallel(n_jobs=num_workers, batch_size=batch_size)(
delayed(_process_entry)(
entry=entry,
normalizer=normalizer,
text_key=text_key,
normalized_text_key=normalized_text_key,
lower_case=lower_case,
lower_case_norm=lower_case_norm,
)
for entry in tqdm(entries)
)
write_manifest(output_path=output_manifest_path, target_manifest=output_entries, ensure_ascii=False)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/preprocess_text.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is to compute speaker-level statistics, such as pitch mean & standard deviation, for a given
TTS training manifest.
This script should be run after extract_sup_data.py as it uses the precomputed supplemental features.
$ python <nemo_root_path>/scripts/dataset_processing/tts/compute_speaker_stats.py \
--manifest_path=<data_root_path>/fastpitch_manifest.json \
--sup_data_path=<data_root_path>/sup_data \
--pitch_stats_path=<data_root_path>/pitch_stats.json
"""
import argparse
import json
import os
from collections import defaultdict
from pathlib import Path
from typing import List, Tuple
import torch
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
from nemo.collections.tts.parts.utils.tts_dataset_utils import get_base_dir
from nemo.collections.tts.torch.tts_data_types import Pitch
from nemo.utils import logging
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute speaker level pitch statistics.",
)
parser.add_argument(
"--manifest_path", required=True, type=Path, help="Path to training manifest.",
)
parser.add_argument(
"--sup_data_path", default=Path("sup_data"), type=Path, help="Path to base directory with supplementary data.",
)
parser.add_argument(
"--pitch_stats_path",
default=Path("pitch_stats.json"),
type=Path,
help="Path to output JSON file with speaker pitch statistics.",
)
args = parser.parse_args()
return args
def _compute_stats(values: List[torch.Tensor]) -> Tuple[float, float]:
values_tensor = torch.cat(values, dim=0)
mean = values_tensor.mean().item()
std = values_tensor.std(dim=0).item()
return mean, std
def _get_sup_data_filepath(manifest_entry: dict, audio_dir: Path, sup_data_dir: Path) -> Path:
"""
Get the absolute path of a supplementary data type for the input manifest entry.
Example: audio_filepath "<audio_dir>/speaker1/audio1.wav" becomes "<sup_data_dir>/speaker1_audio1.pt"
Args:
manifest_entry: Manifest entry dictionary.
audio_dir: base directory where audio is stored.
sup_data_dir: base directory where supplementary data is stored.
Returns:
Path to the supplementary data file.
"""
audio_path = Path(manifest_entry["audio_filepath"])
rel_audio_path = audio_path.relative_to(audio_dir)
rel_sup_data_path = rel_audio_path.with_suffix(".pt")
sup_data_filename = str(rel_sup_data_path).replace(os.sep, "_")
sup_data_filepath = sup_data_dir / sup_data_filename
return sup_data_filepath
def main():
args = get_args()
manifest_path = args.manifest_path
sup_data_path = args.sup_data_path
pitch_stats_path = args.pitch_stats_path
pitch_data_path = Path(os.path.join(sup_data_path, Pitch.name))
if not os.path.exists(pitch_data_path):
raise ValueError(
f"Pitch directory {pitch_data_path} does not exist. Make sure 'sup_data_path' is correct "
f"and that you have computed the pitch using extract_sup_data.py"
)
entries = read_manifest(manifest_path)
audio_paths = [entry["audio_filepath"] for entry in entries]
base_dir = get_base_dir(audio_paths)
global_pitch_values = []
speaker_pitch_values = defaultdict(list)
for entry in tqdm(entries):
pitch_path = _get_sup_data_filepath(manifest_entry=entry, audio_dir=base_dir, sup_data_dir=pitch_data_path)
if not os.path.exists(pitch_path):
logging.warning(f"Unable to find pitch file for {entry}")
continue
pitch = torch.load(pitch_path)
# Filter out non-speech frames
pitch = pitch[pitch != 0]
global_pitch_values.append(pitch)
if "speaker" in entry:
speaker_id = entry["speaker"]
speaker_pitch_values[speaker_id].append(pitch)
global_pitch_mean, global_pitch_std = _compute_stats(global_pitch_values)
pitch_stats = {"default": {"pitch_mean": global_pitch_mean, "pitch_std": global_pitch_std}}
for speaker_id, pitch_values in speaker_pitch_values.items():
pitch_mean, pitch_std = _compute_stats(pitch_values)
pitch_stats[speaker_id] = {"pitch_mean": pitch_mean, "pitch_std": pitch_std}
with open(pitch_stats_path, 'w', encoding="utf-8") as stats_f:
json.dump(pitch_stats, stats_f, indent=4)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/compute_speaker_stats.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is a helper for resynthesizing TTS dataset using a pretrained text-to-spectrogram model.
Goal of resynthesis (as opposed to text-to-speech) is to use the largest amount of ground-truth features from existing speech data.
For example, for resynthesis we want to have the same pitch and durations instead of ones predicted by the model.
The results are to be used for some other task: vocoder finetuning, spectrogram enhancer training, etc.
Let's say we have the following toy dataset:
/dataset/manifest.json
/dataset/1/foo.wav
/dataset/2/bar.wav
/dataset/sup_data/pitch/1_foo.pt
/dataset/sup_data/pitch/2_bar.pt
manifest.json has two entries for "/dataset/1/foo.wav" and "/dataset/2/bar.wav"
(sup_data folder contains pitch files precomputed during training a FastPitch model on this dataset.)
(If you lost your sup_data - don't worry, we use TTSDataset class so they would be created on-the-fly)
Our script call is
$ python scripts/dataset_processing/tts/resynthesize_dataset.py \
--model-path ./models/fastpitch/multi_spk/FastPitch--val_loss\=1.4473-epoch\=209.ckpt \
--input-json-manifest "/dataset/manifest.json" \
--input-sup-data-path "/dataset/sup_data/" \
--output-folder "/output/" \
--device "cuda:0" \
--batch-size 1 \
--num-workers 1
Then we get output dataset with following directory structure:
/output/manifest_mel.json
/output/mels/foo.npy
/output/mels/foo_gt.npy
/output/mels/bar.npy
/output/mels/bar_gt.npy
/output/manifest_mel.json has the same entries as /dataset/manifest.json but with new fields for spectrograms.
"mel_filepath" is path to the resynthesized spectrogram .npy, "mel_gt_filepath" is path to ground-truth spectrogram .npy
The output structure is similar to generate_mels.py script for compatibility reasons.
"""
import argparse
import itertools
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, Iterable, Iterator, List
import numpy as np
import torch
from omegaconf import DictConfig, OmegaConf
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest, write_manifest
from nemo.collections.tts.models import FastPitchModel
from nemo.collections.tts.models.base import SpectrogramGenerator
from nemo.collections.tts.parts.utils.helpers import process_batch, to_device_recursive
def chunks(iterable: Iterable, size: int) -> Iterator[List]:
# chunks([1, 2, 3, 4, 5], size=2) -> [[1, 2], [3, 4], [5]]
# assumes iterable does not have any `None`s
args = [iter(iterable)] * size
for chunk in itertools.zip_longest(*args, fillvalue=None):
chunk = list(item for item in chunk if item is not None)
if chunk:
yield chunk
def load_model(path: Path, device: torch.device) -> SpectrogramGenerator:
model = None
if path.suffix == ".nemo":
model = SpectrogramGenerator.restore_from(path, map_location=device)
elif path.suffix == ".ckpt":
model = SpectrogramGenerator.load_from_checkpoint(path, map_location=device)
else:
raise ValueError(f"Unknown checkpoint type {path.suffix} ({path})")
return model.eval().to(device)
@dataclass
class TTSDatasetResynthesizer:
"""
Reuses internals of a SpectrogramGenerator to resynthesize dataset using ground truth features.
Default setup is FastPitch with learned alignment.
If your use case requires different setup, you can either contribute to this script or subclass this class.
"""
model: SpectrogramGenerator
device: torch.device
@torch.no_grad()
def resynthesize_batch(self, batch: Dict[str, Any]) -> Dict[str, Any]:
"""
Resynthesizes a single batch.
Takes a dict with main data and sup data.
Outputs a dict with model outputs.
"""
if not isinstance(self.model, FastPitchModel):
raise NotImplementedError(
"This script supports only FastPitch. Please implement resynthesizing routine for your desired model."
)
batch = to_device_recursive(batch, self.device)
mels, mel_lens = self.model.preprocessor(input_signal=batch["audio"], length=batch["audio_lens"])
reference_audio = batch.get("reference_audio", None)
reference_audio_len = batch.get("reference_audio_lens", None)
reference_spec, reference_spec_len = None, None
if reference_audio is not None:
reference_spec, reference_spec_len = self.model.preprocessor(
input_signal=reference_audio, length=reference_audio_len
)
outputs_tuple = self.model.forward(
text=batch["text"],
durs=None,
pitch=batch["pitch"],
speaker=batch.get("speaker"),
pace=1.0,
spec=mels,
attn_prior=batch.get("attn_prior"),
mel_lens=mel_lens,
input_lens=batch["text_lens"],
reference_spec=reference_spec,
reference_spec_lens=reference_spec_len,
)
names = self.model.fastpitch.output_types.keys()
return {"spec": mels, "mel_lens": mel_lens, **dict(zip(names, outputs_tuple))}
def resynthesized_batches(self) -> Iterator[Dict[str, Any]]:
"""
Returns a generator of resynthesized batches.
Each returned batch is a dict containing main data, sup data, and model output
"""
self.model.setup_training_data(self.model._cfg["train_ds"])
for batch_tuple in iter(self.model._train_dl):
batch = process_batch(batch_tuple, sup_data_types_set=self.model._train_dl.dataset.sup_data_types)
yield self.resynthesize_batch(batch)
def prepare_paired_mel_spectrograms(
model_path: Path,
input_json_manifest: Path,
input_sup_data_path: Path,
output_folder: Path,
device: torch.device,
batch_size: int,
num_workers: int,
):
model = load_model(model_path, device)
dataset_config_overrides = {
"dataset": {
"manifest_filepath": str(input_json_manifest.absolute()),
"sup_data_path": str(input_sup_data_path.absolute()),
},
"dataloader_params": {"batch_size": batch_size, "num_workers": num_workers, "shuffle": False},
}
model._cfg.train_ds = OmegaConf.merge(model._cfg.train_ds, DictConfig(dataset_config_overrides))
resynthesizer = TTSDatasetResynthesizer(model, device)
input_manifest = read_manifest(input_json_manifest)
output_manifest = []
output_json_manifest = output_folder / f"{input_json_manifest.stem}_mel{input_json_manifest.suffix}"
output_mels_folder = output_folder / "mels"
output_mels_folder.mkdir(exist_ok=True, parents=True)
for batch, batch_manifest in tqdm(
zip(resynthesizer.resynthesized_batches(), chunks(input_manifest, size=batch_size)), desc="Batch #"
):
pred_mels = batch["spect"].cpu() # key from fastpitch.output_types
true_mels = batch["spec"].cpu() # key from code above
mel_lens = batch["mel_lens"].cpu().flatten() # key from code above
for i, (manifest_entry, length) in enumerate(zip(batch_manifest, mel_lens.tolist())):
print(manifest_entry["audio_filepath"])
filename = Path(manifest_entry["audio_filepath"]).stem
# note that lengths match
pred_mel = pred_mels[i, :, :length].clone().numpy()
true_mel = true_mels[i, :, :length].clone().numpy()
pred_mel_path = output_mels_folder / f"{filename}.npy"
true_mel_path = output_mels_folder / f"{filename}_gt.npy"
np.save(pred_mel_path, pred_mel)
np.save(true_mel_path, true_mel)
new_manifest_entry = {
**manifest_entry,
"mel_filepath": str(pred_mel_path),
"mel_gt_filepath": str(true_mel_path),
}
output_manifest.append(new_manifest_entry)
write_manifest(output_json_manifest, output_manifest, ensure_ascii=False)
def argument_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Resynthesize TTS dataset using a pretrained text-to-spectrogram model",
)
parser.add_argument(
"--model-path", required=True, type=Path, help="Path to a checkpoint (either .nemo or .ckpt)",
)
parser.add_argument(
"--input-json-manifest", required=True, type=Path, help="Path to the input JSON manifest",
)
parser.add_argument(
"--input-sup-data-path", required=True, type=Path, help="sup_data_path for the JSON manifest",
)
parser.add_argument(
"--output-folder",
required=True,
type=Path,
help="Path to the output folder. Will contain updated manifest and mels/ folder with spectrograms in .npy files",
)
parser.add_argument("--device", required=True, type=torch.device, help="Device ('cpu', 'cuda:0', ...)")
parser.add_argument("--batch-size", required=True, type=int, help="Batch size in the DataLoader")
parser.add_argument("--num-workers", required=True, type=int, help="Num workers in the DataLoader")
return parser
if __name__ == "__main__":
arguments = argument_parser().parse_args()
prepare_paired_mel_spectrograms(**vars(arguments))
| NeMo-main | scripts/dataset_processing/tts/resynthesize_dataset.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script computes features for TTS models prior to training, such as pitch and energy.
The resulting features will be stored in the provided 'feature_dir'.
$ python <nemo_root_path>/scripts/dataset_processing/tts/compute_features.py \
--feature_config_path=<nemo_root_path>/examples/tts/conf/features/feature_22050.yaml \
--manifest_path=<data_root_path>/manifest.json \
--audio_dir=<data_root_path>/audio \
--feature_dir=<data_root_path>/features \
--num_workers=1
"""
import argparse
from pathlib import Path
from hydra.utils import instantiate
from joblib import Parallel, delayed
from omegaconf import OmegaConf
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute TTS features.",
)
parser.add_argument(
"--feature_config_path", required=True, type=Path, help="Path to feature config file.",
)
parser.add_argument(
"--manifest_path", required=True, type=Path, help="Path to training manifest.",
)
parser.add_argument(
"--audio_dir", required=True, type=Path, help="Path to base directory with audio data.",
)
parser.add_argument(
"--feature_dir", required=True, type=Path, help="Path to directory where feature data will be stored.",
)
parser.add_argument(
"--num_workers", default=1, type=int, help="Number of parallel threads to use. If -1 all CPUs are used."
)
args = parser.parse_args()
return args
def main():
args = get_args()
feature_config_path = args.feature_config_path
manifest_path = args.manifest_path
audio_dir = args.audio_dir
feature_dir = args.feature_dir
num_workers = args.num_workers
if not manifest_path.exists():
raise ValueError(f"Manifest {manifest_path} does not exist.")
if not audio_dir.exists():
raise ValueError(f"Audio directory {audio_dir} does not exist.")
feature_config = OmegaConf.load(feature_config_path)
feature_config = instantiate(feature_config)
featurizers = feature_config.featurizers
entries = read_manifest(manifest_path)
for feature_name, featurizer in featurizers.items():
print(f"Computing: {feature_name}")
Parallel(n_jobs=num_workers)(
delayed(featurizer.save)(manifest_entry=entry, audio_dir=audio_dir, feature_dir=feature_dir,)
for entry in tqdm(entries)
)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/compute_features.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script takes a list of TTS manifests and creates a JSON mapping the input speaker names to
unique indices for multi-speaker TTS training.
To ensure that speaker names are unique across datasets, it is recommended that you prepend the speaker
names in your manifest with the name of the dataset.
$ python <nemo_root_path>/scripts/dataset_processing/tts/create_speaker_map.py \
--manifest_path=manifest1.json \
--manifest_path=manifest2.json \
--speaker_map_path=speakers.json
Example output:
{
"vctk_p225": 0,
"vctk_p226": 1,
"vctk_p227": 2,
...
}
"""
import argparse
import json
from pathlib import Path
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Create mapping from speaker names to numerical speaker indices.",
)
parser.add_argument(
"--manifest_path", required=True, type=Path, action="append", help="Path to training manifest(s).",
)
parser.add_argument(
"--speaker_map_path", required=True, type=Path, help="Path for output speaker index JSON",
)
parser.add_argument(
"--overwrite",
action=argparse.BooleanOptionalAction,
help="Whether to overwrite the output speaker file if it exists.",
)
args = parser.parse_args()
return args
def main():
args = get_args()
manifest_paths = args.manifest_path
speaker_map_path = args.speaker_map_path
overwrite = args.overwrite
for manifest_path in manifest_paths:
if not manifest_path.exists():
raise ValueError(f"Manifest {manifest_path} does not exist.")
if speaker_map_path.exists():
if overwrite:
print(f"Will overwrite existing speaker path: {speaker_map_path}")
else:
raise ValueError(f"Speaker path already exists: {speaker_map_path}")
speaker_set = set()
for manifest_path in manifest_paths:
entries = read_manifest(manifest_path)
for entry in entries:
speaker = str(entry["speaker"])
speaker_set.add(speaker)
speaker_list = list(speaker_set)
speaker_list.sort()
speaker_index_map = {speaker_list[i]: i for i in range(len(speaker_list))}
with open(speaker_map_path, 'w', encoding="utf-8") as stats_f:
json.dump(speaker_index_map, stats_f, indent=4)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/create_speaker_map.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is to compute global and speaker-level feature statistics for a given TTS training manifest.
This script should be run after compute_features.py as it loads the precomputed feature data.
$ python <nemo_root_path>/scripts/dataset_processing/tts/compute_feature_stats.py \
--feature_config_path=<nemo_root_path>/examples/tts/conf/features/feature_22050.yaml
--manifest_path=<data_root_path>/manifest1.json \
--manifest_path=<data_root_path>/manifest2.json \
--audio_dir=<data_root_path>/audio1 \
--audio_dir=<data_root_path>/audio2 \
--feature_dir=<data_root_path>/features1 \
--feature_dir=<data_root_path>/features2 \
--stats_path=<data_root_path>/feature_stats.json
The output dictionary will contain the feature statistics for every speaker, as well as a "default" entry
with the global statistics.
For example:
{
"default": {
"pitch_mean": 100.0,
"pitch_std": 50.0,
"energy_mean": 7.5,
"energy_std": 4.5
},
"speaker1": {
"pitch_mean": 105.0,
"pitch_std": 45.0,
"energy_mean": 7.0,
"energy_std": 5.0
},
"speaker2": {
"pitch_mean": 110.0,
"pitch_std": 30.0,
"energy_mean": 5.0,
"energy_std": 2.5
}
}
"""
import argparse
import json
from collections import defaultdict
from pathlib import Path
from typing import List, Tuple
import torch
from hydra.utils import instantiate
from omegaconf import OmegaConf
from tqdm import tqdm
from nemo.collections.asr.parts.utils.manifest_utils import read_manifest
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Compute TTS feature statistics.",
)
parser.add_argument(
"--feature_config_path", required=True, type=Path, help="Path to feature config file.",
)
parser.add_argument(
"--manifest_path", required=True, type=Path, action="append", help="Path(s) to training manifest.",
)
parser.add_argument(
"--audio_dir", required=True, type=Path, action="append", help="Path(s) to base directory with audio data.",
)
parser.add_argument(
"--feature_dir",
required=True,
type=Path,
action="append",
help="Path(s) to directory where feature data was stored.",
)
parser.add_argument(
"--feature_names", default="pitch,energy", type=str, help="Comma separated list of features to process.",
)
parser.add_argument(
"--mask_field",
default="voiced_mask",
type=str,
help="If provided, stat computation will ignore non-masked frames.",
)
parser.add_argument(
"--stats_path",
default=Path("feature_stats.json"),
type=Path,
help="Path to output JSON file with dataset feature statistics.",
)
parser.add_argument(
"--overwrite",
action=argparse.BooleanOptionalAction,
help="Whether to overwrite the output stats file if it exists.",
)
args = parser.parse_args()
return args
def _compute_stats(values: List[torch.Tensor]) -> Tuple[float, float]:
values_tensor = torch.cat(values, dim=0)
mean = values_tensor.mean().item()
std = values_tensor.std(dim=0).item()
return mean, std
def main():
args = get_args()
feature_config_path = args.feature_config_path
manifest_paths = args.manifest_path
audio_dirs = args.audio_dir
feature_dirs = args.feature_dir
feature_name_str = args.feature_names
mask_field = args.mask_field
stats_path = args.stats_path
overwrite = args.overwrite
if not (len(manifest_paths) == len(audio_dirs) == len(feature_dirs)):
raise ValueError(
f"Need same number of manifest, audio_dir, and feature_dir. Received: "
f"{len(manifest_paths)}, "
f"{len(audio_dirs)}, "
f"{len(feature_dirs)}"
)
for (manifest_path, audio_dir, feature_dir) in zip(manifest_paths, audio_dirs, feature_dirs):
if not manifest_path.exists():
raise ValueError(f"Manifest {manifest_path} does not exist.")
if not audio_dir.exists():
raise ValueError(f"Audio directory {audio_dir} does not exist.")
if not feature_dir.exists():
raise ValueError(
f"Feature directory {feature_dir} does not exist. "
f"Please check that the path is correct and that you ran compute_features.py"
)
if stats_path.exists():
if overwrite:
print(f"Will overwrite existing stats path: {stats_path}")
else:
raise ValueError(f"Stats path already exists: {stats_path}")
feature_config = OmegaConf.load(feature_config_path)
feature_config = instantiate(feature_config)
featurizer_dict = feature_config.featurizers
print(f"Found featurizers for {list(featurizer_dict.keys())}.")
featurizers = featurizer_dict.values()
feature_names = feature_name_str.split(",")
# For each feature, we have a dictionary mapping speaker IDs to a list containing all features
# for that speaker
feature_stats = {name: defaultdict(list) for name in feature_names}
for (manifest_path, audio_dir, feature_dir) in zip(manifest_paths, audio_dirs, feature_dirs):
entries = read_manifest(manifest_path)
for entry in tqdm(entries):
speaker = entry["speaker"]
entry_dict = {}
for featurizer in featurizers:
feature_dict = featurizer.load(manifest_entry=entry, audio_dir=audio_dir, feature_dir=feature_dir)
entry_dict.update(feature_dict)
if mask_field:
mask = entry_dict[mask_field]
else:
mask = None
for feature_name in feature_names:
values = entry_dict[feature_name]
if mask is not None:
values = values[mask]
feature_stat_dict = feature_stats[feature_name]
feature_stat_dict["default"].append(values)
feature_stat_dict[speaker].append(values)
stat_dict = defaultdict(dict)
for feature_name in feature_names:
mean_key = f"{feature_name}_mean"
std_key = f"{feature_name}_std"
feature_stat_dict = feature_stats[feature_name]
for speaker_id, values in feature_stat_dict.items():
speaker_mean, speaker_std = _compute_stats(values)
stat_dict[speaker_id][mean_key] = speaker_mean
stat_dict[speaker_id][std_key] = speaker_std
with open(stats_path, 'w', encoding="utf-8") as stats_f:
json.dump(stat_dict, stats_f, indent=4)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/compute_feature_stats.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
import shutil
import urllib.request
from pathlib import Path
import pandas as pd
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.utils import logging
# full corpus.
URLS_FULL = {
"Bernd_Ungerer": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/Bernd_Ungerer.zip",
"Eva_K": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/Eva_K.zip",
"Friedrich": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/Friedrich.zip",
"Hokuspokus": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/Hokuspokus.zip",
"Karlsson": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/Karlsson.zip",
"others": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_full/others.zip",
}
URL_STATS_FULL = "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/datasetStatistic.zip"
# the clean subset of the full corpus.
URLS_CLEAN = {
"Bernd_Ungerer": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/Bernd_Ungerer_Clean.zip",
"Eva_K": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/Eva_K_Clean.zip",
"Friedrich": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/Friedrich_Clean.zip",
"Hokuspokus": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/Hokuspokus_Clean.zip",
"Karlsson": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/Karlsson_Clean.zip",
"others": "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/dataset_clean/others_Clean.zip",
}
URL_STATS_CLEAN = "https://opendata.iisys.de/opendata/Datasets/HUI-Audio-Corpus-German/datasetStatisticClean.zip"
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Download HUI-Audio-Corpus-German and create manifests with predefined split. "
"Please check details about the corpus in https://github.com/iisys-hof/HUI-Audio-Corpus-German.",
)
parser.add_argument("--data-root", required=True, type=Path, help="where the resulting dataset will reside.")
parser.add_argument("--manifests-root", required=True, type=Path, help="where the manifests files will reside.")
parser.add_argument("--set-type", default="clean", choices=["full", "clean"], type=str)
parser.add_argument("--min-duration", default=0.1, type=float)
parser.add_argument("--max-duration", default=15, type=float)
parser.add_argument(
"--num-workers",
default=-1,
type=int,
help="Specify the max number of concurrently Python workers processes. "
"If -1 all CPUs are used. If 1 no parallel computing is used.",
)
parser.add_argument(
"--normalize-text",
default=False,
action='store_true',
help="Normalize original text and add a new entry 'normalized_text' to .json file if True.",
)
parser.add_argument(
"--val-num-utts-per-speaker",
default=1,
type=int,
help="Specify the number of utterances for each speaker in val split. All speakers are covered.",
)
parser.add_argument(
"--test-num-utts-per-speaker",
default=1,
type=int,
help="Specify the number of utterances for each speaker in test split. All speakers are covered.",
)
parser.add_argument(
"--seed-for-ds-split",
default=100,
type=float,
help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100",
)
args = parser.parse_args()
return args
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
logging.info(f"Downloading data: {source_url} --> {destination_path}")
tmp_file_path = destination_path.with_suffix(".tmp")
urllib.request.urlretrieve(source_url, filename=tmp_file_path)
tmp_file_path.rename(destination_path)
else:
logging.info(f"Skipped downloading data because it exists: {destination_path}")
def __extract_file(filepath, data_dir):
logging.info(f"Unzipping data: {filepath} --> {data_dir}")
shutil.unpack_archive(filepath, data_dir)
logging.info(f"Unzipping data is complete: {filepath}.")
def __save_json(json_file, dict_list):
logging.info(f"Saving JSON split to {json_file}.")
with open(json_file, "w") as f:
for d in dict_list:
f.write(json.dumps(d) + "\n")
def __process_data(
dataset_path, stat_path_root, speaker_id, min_duration, max_duration, val_size, test_size, seed_for_ds_split,
):
logging.info(f"Preparing JSON split for speaker {speaker_id}.")
# parse statistic.txt
stat_path = stat_path_root / "statistic.txt"
with open(stat_path, 'r') as fstat:
lines = fstat.readlines()
num_utts = int(lines[4].strip().split()[-1])
hours = round(float(lines[9].strip().split()[-1]) / 3600.0, 2)
# parse overview.csv to generate JSON splits.
overview_path = stat_path_root / "overview.csv"
entries = []
with open(overview_path, 'r') as foverview:
# Let's skip the header
foverview.readline()
for line in tqdm(foverview):
file_stem, duration, *_, text = line.strip().split("|")
duration = float(duration)
# file_stem -> dir_name (e.g. maerchen_01_f000051 -> maerchen)
dir_name = "_".join(file_stem.split("_")[:-2])
audio_path = dataset_path / dir_name / "wavs" / f"{file_stem}.wav"
if min_duration <= duration <= max_duration:
entry = {
"audio_filepath": str(audio_path),
"duration": duration,
"text": text,
"speaker": speaker_id,
}
entries.append(entry)
random.Random(seed_for_ds_split).shuffle(entries)
train_size = len(entries) - val_size - test_size
if train_size <= 0:
logging.warning(f"Skipped speaker {speaker_id}. Not enough data for train, val and test.")
train, val, test, is_skipped = [], [], [], True
else:
logging.info(f"Preparing JSON split for speaker {speaker_id} is complete.")
train, val, test, is_skipped = (
entries[:train_size],
entries[train_size : train_size + val_size],
entries[train_size + val_size :],
False,
)
return {
"train": train,
"val": val,
"test": test,
"is_skipped": is_skipped,
"hours": hours,
"num_utts": num_utts,
}
def __text_normalization(json_file, num_workers=-1):
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True,
}
text_normalizer = Normalizer(
lang="de", input_case="cased", overwrite_cache=True, cache_dir=str(json_file.parent / "cache_dir"),
)
def normalizer_call(x):
return text_normalizer.normalize(x, **text_normalizer_call_kwargs)
def add_normalized_text(line_dict):
normalized_text = normalizer_call(line_dict["text"])
line_dict.update({"normalized_text": normalized_text})
return line_dict
logging.info(f"Normalizing text for {json_file}.")
with open(json_file, 'r', encoding='utf-8') as fjson:
lines = fjson.readlines()
# Note: you need to verify which backend works well on your cluster.
# backend="loky" is fine on multi-core Ubuntu OS; backend="threading" on Slurm.
dict_list = Parallel(n_jobs=num_workers)(
delayed(add_normalized_text)(json.loads(line)) for line in tqdm(lines)
)
json_file_text_normed = json_file.parent / f"{json_file.stem}_text_normed{json_file.suffix}"
with open(json_file_text_normed, 'w', encoding="utf-8") as fjson_norm:
for dct in dict_list:
fjson_norm.write(json.dumps(dct) + "\n")
logging.info(f"Normalizing text is complete: {json_file} --> {json_file_text_normed}")
def main():
args = get_args()
data_root = args.data_root
manifests_root = args.manifests_root
set_type = args.set_type
dataset_root = data_root / f"HUI-Audio-Corpus-German-{set_type}"
dataset_root.mkdir(parents=True, exist_ok=True)
if set_type == "full":
data_source = URLS_FULL
stats_source = URL_STATS_FULL
elif set_type == "clean":
data_source = URLS_CLEAN
stats_source = URL_STATS_CLEAN
else:
raise ValueError(f"Unknown {set_type}. Please choose either clean or full.")
# download and unzip dataset stats
zipped_stats_path = dataset_root / Path(stats_source).name
__maybe_download_file(stats_source, zipped_stats_path)
__extract_file(zipped_stats_path, dataset_root)
# download datasets
# Note: you need to verify which backend works well on your cluster.
# backend="loky" is fine on multi-core Ubuntu OS; backend="threading" on Slurm.
Parallel(n_jobs=args.num_workers)(
delayed(__maybe_download_file)(data_url, dataset_root / Path(data_url).name)
for _, data_url in data_source.items()
)
# unzip datasets
# Note: you need to verify which backend works well on your cluster.
# backend="loky" is fine on multi-core Ubuntu OS; backend="threading" on Slurm.
Parallel(n_jobs=args.num_workers)(
delayed(__extract_file)(dataset_root / Path(data_url).name, dataset_root)
for _, data_url in data_source.items()
)
# generate json files for train/val/test splits
stats_path_root = dataset_root / Path(stats_source).stem / "speacker"
entries_train, entries_val, entries_test = [], [], []
speaker_entries = []
num_speakers = 0
for child in stats_path_root.iterdir():
if child.is_dir():
speaker = child.name
num_speakers += 1
speaker_stats_root = stats_path_root / speaker
speaker_data_path = dataset_root / speaker
logging.info(f"Processing Speaker: {speaker}")
results = __process_data(
speaker_data_path,
speaker_stats_root,
num_speakers,
args.min_duration,
args.max_duration,
args.val_num_utts_per_speaker,
args.test_num_utts_per_speaker,
args.seed_for_ds_split,
)
entries_train.extend(results["train"])
entries_val.extend(results["val"])
entries_test.extend(results["test"])
speaker_entry = {
"speaker_name": speaker,
"speaker_id": num_speakers,
"hours": results["hours"],
"num_utts": results["num_utts"],
"is_skipped": results["is_skipped"],
}
speaker_entries.append(speaker_entry)
# shuffle in place across multiple speakers
random.Random(args.seed_for_ds_split).shuffle(entries_train)
random.Random(args.seed_for_ds_split).shuffle(entries_val)
random.Random(args.seed_for_ds_split).shuffle(entries_test)
# save speaker stats.
df = pd.DataFrame.from_records(speaker_entries)
df.sort_values(by="hours", ascending=False, inplace=True)
spk2id_file_path = manifests_root / "spk2id.csv"
df.to_csv(spk2id_file_path, index=False)
logging.info(f"Saving Speaker to ID mapping to {spk2id_file_path}.")
# save json splits.
train_json = manifests_root / "train_manifest.json"
val_json = manifests_root / "val_manifest.json"
test_json = manifests_root / "test_manifest.json"
__save_json(train_json, entries_train)
__save_json(val_json, entries_val)
__save_json(test_json, entries_test)
# normalize text if requested. New json file, train_manifest_text_normed.json, will be generated.
if args.normalize_text:
__text_normalization(train_json, args.num_workers)
__text_normalization(val_json, args.num_workers)
__text_normalization(test_json, args.num_workers)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/hui_acg/get_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import tarfile
import urllib.request
from pathlib import Path
import sox
import wget
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser(description='Download LJSpeech and create manifests with predefined split')
parser.add_argument("--data-root", required=True, type=Path)
parser.add_argument(
'--whitelist-path',
type=str,
default="lj_speech.tsv extracted from the readme file in the dataset. You can also download the file from https://github.com/NVIDIA/NeMo-text-processing/blob/main/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv",
)
args = parser.parse_args()
return args
URL = "https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2"
FILELIST_BASE = 'https://raw.githubusercontent.com/NVIDIA/tacotron2/master/filelists'
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
tmp_file_path = destination_path.with_suffix('.tmp')
urllib.request.urlretrieve(source_url, filename=str(tmp_file_path))
tmp_file_path.rename(destination_path)
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __process_data(data_root, whitelist_path):
if whitelist_path is None:
wget.download(
"https://raw.githubusercontent.com/NVIDIA/NeMo-text-processing/main/nemo_text_processing/text_normalization/en/data/whitelist/lj_speech.tsv",
out=str(data_root),
)
whitelist_path = data_root / "lj_speech.tsv"
text_normalizer = Normalizer(
lang="en",
input_case="cased",
whitelist=whitelist_path,
overwrite_cache=True,
cache_dir=data_root / "cache_dir",
)
text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True}
normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs)
# Create manifests (based on predefined NVIDIA's split)
filelists = ['train', 'val', 'test']
for split in tqdm(filelists):
# Download file list if necessary
filelist_path = data_root / f"ljs_audio_text_{split}_filelist.txt"
if not filelist_path.exists():
wget.download(f"{FILELIST_BASE}/ljs_audio_text_{split}_filelist.txt", out=str(data_root))
manifest_target = data_root / f"{split}_manifest.json"
with open(manifest_target, 'w') as f_out:
with open(filelist_path, 'r') as filelist:
print(f"\nCreating {manifest_target}...")
for line in tqdm(filelist):
basename = line[6:16]
text = line[21:].strip()
norm_text = normalizer_call(text)
# Make sure corresponding wavfile exists
wav_path = data_root / 'wavs' / f"{basename}.wav"
assert wav_path.exists(), f"{wav_path} does not exist!"
entry = {
'audio_filepath': str(wav_path),
'duration': sox.file_info.duration(wav_path),
'text': text,
'normalized_text': norm_text,
}
f_out.write(json.dumps(entry) + '\n')
def main():
args = get_args()
tarred_data_path = args.data_root / "LJSpeech-1.1.tar.bz2"
__maybe_download_file(URL, tarred_data_path)
__extract_file(str(tarred_data_path), str(args.data_root))
data_root = args.data_root / "LJSpeech-1.1"
whitelist_path = args.whitelist_path
__process_data(data_root, whitelist_path)
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/tts/ljspeech/get_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import os
import random
import subprocess
from pathlib import Path
import numpy as np
from nemo_text_processing.text_normalization.normalize import Normalizer
from opencc import OpenCC
def get_args():
parser = argparse.ArgumentParser(
description='Prepare SF_bilingual dataset and create manifests with predefined split'
)
parser.add_argument(
"--data-root",
type=Path,
help="where the dataset will reside",
default="./DataChinese/sf_bilingual_speech_zh_en_vv1/SF_bilingual/",
)
parser.add_argument(
"--manifests-path", type=Path, help="where the resulting manifests files will reside", default="./"
)
parser.add_argument("--val-size", default=0.01, type=float, help="eval set split")
parser.add_argument("--test-size", default=0.01, type=float, help="test set split")
parser.add_argument(
"--seed-for-ds-split",
default=100,
type=float,
help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100",
)
args = parser.parse_args()
return args
def __process_transcript(file_path: str):
# Create zh-TW to zh-simplify converter
cc = OpenCC('t2s')
# Create normalizer
text_normalizer = Normalizer(
lang="zh", input_case="cased", overwrite_cache=True, cache_dir=str(file_path / "cache_dir"),
)
text_normalizer_call_kwargs = {"punct_pre_process": True, "punct_post_process": True}
normalizer_call = lambda x: text_normalizer.normalize(x, **text_normalizer_call_kwargs)
entries = []
i = 0
with open(file_path / "text_SF.txt", encoding="utf-8") as fin:
for line in fin:
content = line.split()
wav_name, text = content[0], "".join(content[1:])
wav_name = wav_name.replace(u'\ufeff', '')
# WAR: change DL to SF, e.g. real wave file com_SF_ce2727.wav, wav name in text_SF
# com_DL_ce2727. It would be fixed through the dataset in the future.
wav_name = wav_name.replace('DL', 'SF')
wav_file = file_path / "wavs" / (wav_name + ".wav")
assert os.path.exists(wav_file), f"{wav_file} not found!"
duration = subprocess.check_output(f"soxi -D {wav_file}", shell=True)
simplified_text = cc.convert(text)
normalized_text = normalizer_call(simplified_text)
entry = {
'audio_filepath': os.path.abspath(wav_file),
'duration': float(duration),
'text': text,
'normalized_text': normalized_text,
}
i += 1
entries.append(entry)
return entries
def __process_data(dataset_path, val_size, test_size, seed_for_ds_split, manifests_dir):
entries = __process_transcript(dataset_path)
random.Random(seed_for_ds_split).shuffle(entries)
train_size = 1.0 - val_size - test_size
train_entries, validate_entries, test_entries = np.split(
entries, [int(len(entries) * train_size), int(len(entries) * (train_size + val_size))]
)
assert len(train_entries) > 0, "Not enough data for train, val and test"
def save(p, data):
with open(p, 'w') as f:
for d in data:
f.write(json.dumps(d) + '\n')
save(manifests_dir / "train_manifest.json", train_entries)
save(manifests_dir / "val_manifest.json", validate_entries)
save(manifests_dir / "test_manifest.json", test_entries)
def main():
args = get_args()
dataset_root = args.data_root
dataset_root.mkdir(parents=True, exist_ok=True)
__process_data(
dataset_root, args.val_size, args.test_size, args.seed_for_ds_split, args.manifests_path,
)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/sfbilingual/get_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# USAGE: python get_data.py --data-root=<where to put data> --data-set=<datasets_to_download> --num-workers=<number of parallel workers>
# where <datasets_to_download> can be: dev_clean, dev_other, test_clean,
# test_other, train_clean_100, train_clean_360, train_other_500 or ALL
# You can also put more than one data_set comma-separated:
# --data-set=dev_clean,train_clean_100
import argparse
import fnmatch
import functools
import json
import multiprocessing
import os
import subprocess
import tarfile
import urllib.request
from pathlib import Path
from tqdm import tqdm
parser = argparse.ArgumentParser(description='Download LibriTTS and create manifests')
parser.add_argument("--data-root", required=True, type=Path)
parser.add_argument("--data-sets", default="dev_clean", type=str)
parser.add_argument("--num-workers", default=4, type=int)
args = parser.parse_args()
URLS = {
'TRAIN_CLEAN_100': "https://www.openslr.org/resources/60/train-clean-100.tar.gz",
'TRAIN_CLEAN_360': "https://www.openslr.org/resources/60/train-clean-360.tar.gz",
'TRAIN_OTHER_500': "https://www.openslr.org/resources/60/train-other-500.tar.gz",
'DEV_CLEAN': "https://www.openslr.org/resources/60/dev-clean.tar.gz",
'DEV_OTHER': "https://www.openslr.org/resources/60/dev-other.tar.gz",
'TEST_CLEAN': "https://www.openslr.org/resources/60/test-clean.tar.gz",
'TEST_OTHER': "https://www.openslr.org/resources/60/test-other.tar.gz",
}
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
tmp_file_path = destination_path.with_suffix('.tmp')
urllib.request.urlretrieve(source_url, filename=str(tmp_file_path))
tmp_file_path.rename(destination_path)
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __process_transcript(file_path: str):
entries = []
with open(file_path, encoding="utf-8") as fin:
text = fin.readlines()[0].strip()
# TODO(oktai15): add normalized text via Normalizer/NormalizerWithAudio
wav_file = file_path.replace(".normalized.txt", ".wav")
speaker_id = file_path.split('/')[-3]
assert os.path.exists(wav_file), f"{wav_file} not found!"
duration = subprocess.check_output(f"soxi -D {wav_file}", shell=True)
entry = {
'audio_filepath': os.path.abspath(wav_file),
'duration': float(duration),
'text': text,
'speaker': int(speaker_id),
}
entries.append(entry)
return entries
def __process_data(data_folder, manifest_file, num_workers):
files = []
entries = []
for root, dirnames, filenames in os.walk(data_folder):
# we will use normalized text provided by the original dataset
for filename in fnmatch.filter(filenames, '*.normalized.txt'):
files.append(os.path.join(root, filename))
with multiprocessing.Pool(num_workers) as p:
processing_func = functools.partial(__process_transcript)
results = p.imap(processing_func, files)
for result in tqdm(results, total=len(files)):
entries.extend(result)
with open(manifest_file, 'w') as fout:
for m in entries:
fout.write(json.dumps(m) + '\n')
def main():
data_root = args.data_root
data_sets = args.data_sets
num_workers = args.num_workers
if data_sets == "ALL":
data_sets = "dev_clean,dev_other,train_clean_100,train_clean_360,train_other_500,test_clean,test_other"
if data_sets == "mini":
data_sets = "dev_clean,train_clean_100"
for data_set in data_sets.split(','):
filepath = data_root / f"{data_set}.tar.gz"
print(f"Downloading data for {data_set}...")
__maybe_download_file(URLS[data_set.upper()], filepath)
print("Extracting...")
__extract_file(str(filepath), str(data_root))
print("Processing and building manifest.")
__process_data(
str(data_root / "LibriTTS" / data_set.replace("_", "-")),
str(data_root / "LibriTTS" / f"{data_set}.json"),
num_workers=num_workers,
)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/libritts/get_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import json
import re
import tarfile
import urllib.request
from pathlib import Path
from tqdm import tqdm
def get_args():
parser = argparse.ArgumentParser(description='Download HiFiTTS and create manifests with predefined split')
parser.add_argument(
"--data-root",
required=True,
type=Path,
help='Directory into which to download and extract dataset. \{data-root\}/hi_fi_tts_v0 will be created.',
)
parser.add_argument(
'--split',
type=str,
default='all',
help='Choose to generate manifest for all or one of (train, test, split), note that this will still download the full dataset.',
)
args = parser.parse_args()
return args
URL = "https://us.openslr.org/resources/109/hi_fi_tts_v0.tar.gz"
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
tmp_file_path = destination_path.with_suffix('.tmp')
urllib.request.urlretrieve(source_url, filename=str(tmp_file_path))
tmp_file_path.rename(destination_path)
def __extract_file(filepath, data_dir):
try:
tar = tarfile.open(filepath)
tar.extractall(data_dir)
tar.close()
except Exception:
print(f"Error while extracting {filepath}. Already extracted?")
def __process_data(data_root, filelists):
# Create manifests (based on predefined NVIDIA's split)
for split in tqdm(filelists):
manifest_target = data_root / f"{split}_manifest.json"
print(f"Creating manifest for {split}.")
entries = []
for manifest_src in glob.glob(str(data_root / f"*_{split}.json")):
try:
search_res = re.search('.*\/([0-9]+)_manifest_([a-z]+)_.*.json', manifest_src)
speaker_id = search_res.group(1)
audio_quality = search_res.group(2)
except Exception:
print(f"Failed to find speaker id or audio quality for {manifest_src}, check formatting.")
continue
with open(manifest_src, 'r') as f_in:
for input_json_entry in f_in:
data = json.loads(input_json_entry)
# Make sure corresponding wavfile exists
wav_path = data_root / data['audio_filepath']
assert wav_path.exists(), f"{wav_path} does not exist!"
entry = {
'audio_filepath': data['audio_filepath'],
'duration': data['duration'],
'text': data['text'],
'normalized_text': data['text_normalized'],
'speaker': int(speaker_id),
# Audio_quality is either clean or other.
# The clean set includes recordings with high sound-to-noise ratio and wide bandwidth.
# The books with noticeable noise or narrow bandwidth are included in the other subset.
# Note: some speaker_id's have both clean and other audio quality.
'audio_quality': audio_quality,
}
entries.append(entry)
with open(manifest_target, 'w') as f_out:
for m in entries:
f_out.write(json.dumps(m) + '\n')
def main():
args = get_args()
split = ['train', 'dev', 'test'] if args.split == 'all' else list(args.split)
tarred_data_path = args.data_root / "hi_fi_tts_v0.tar.gz"
__maybe_download_file(URL, tarred_data_path)
__extract_file(str(tarred_data_path), str(args.data_root))
data_root = args.data_root / "hi_fi_tts_v0"
__process_data(data_root, split)
if __name__ == '__main__':
main()
| NeMo-main | scripts/dataset_processing/tts/hifitts/get_data.py |
# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script is used to generate JSON manifests for mel-generator model training. The usage is below.
$ python scripts/dataset_processing/tts/thorsten_neutral/get_data.py \
--data-root ~/experiments/thorsten_neutral \
--manifests-root ~/experiments/thorsten_neutral \
--data-version "22_10" \
--min-duration 0.1 \
--normalize-text
"""
import argparse
import json
import random
import shutil
import subprocess
import urllib.request
from pathlib import Path
from joblib import Parallel, delayed
from nemo_text_processing.text_normalization.normalize import Normalizer
from tqdm import tqdm
from nemo.utils import logging
# Thorsten Müller published two neural voice datasets, 21.02 and 22.10.
THORSTEN_NEUTRAL = {
"21_02": {
"url": "https://zenodo.org/record/5525342/files/thorsten-neutral_v03.tgz?download=1",
"dir_name": "thorsten-de_v03",
"metadata": ["metadata.csv"],
},
"22_10": {
"url": "https://zenodo.org/record/7265581/files/ThorstenVoice-Dataset_2022.10.zip?download=1",
"dir_name": "ThorstenVoice-Dataset_2022.10",
"metadata": ["metadata_train.csv", "metadata_dev.csv", "metadata_test.csv"],
},
}
def get_args():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description="Download Thorsten Müller's neutral voice dataset and create manifests with predefined split. "
"Thorsten Müller published two neural voice datasets, 21.02 and 22.10, where 22.10 provides better "
"audio quality. Please choose one of the two for your TTS models. Details about the dataset are "
"in https://github.com/thorstenMueller/Thorsten-Voice.",
)
parser.add_argument("--data-root", required=True, type=Path, help="where the resulting dataset will reside.")
parser.add_argument("--manifests-root", required=True, type=Path, help="where the manifests files will reside.")
parser.add_argument("--data-version", default="22_10", choices=["21_02", "22_10"], type=str)
parser.add_argument("--min-duration", default=0.1, type=float)
parser.add_argument("--max-duration", default=float('inf'), type=float)
parser.add_argument("--val-size", default=100, type=int)
parser.add_argument("--test-size", default=100, type=int)
parser.add_argument(
"--num-workers",
default=-1,
type=int,
help="Specify the max number of concurrent Python worker processes. "
"If -1 all CPUs are used. If 1 no parallel computing is used.",
)
parser.add_argument(
"--normalize-text",
default=False,
action='store_true',
help="Normalize original text and add a new entry 'normalized_text' to .json file if True.",
)
parser.add_argument(
"--seed-for-ds-split",
default=100,
type=float,
help="Seed for deterministic split of train/dev/test, NVIDIA's default is 100.",
)
args = parser.parse_args()
return args
def __maybe_download_file(source_url, destination_path):
if not destination_path.exists():
logging.info(f"Downloading data: {source_url} --> {destination_path}")
tmp_file_path = destination_path.with_suffix(".tmp")
urllib.request.urlretrieve(source_url, filename=tmp_file_path)
tmp_file_path.rename(destination_path)
else:
logging.info(f"Skipped downloading data because it exists: {destination_path}")
def __extract_file(filepath, data_dir):
logging.info(f"Unzipping data: {filepath} --> {data_dir}")
shutil.unpack_archive(filepath, data_dir)
logging.info(f"Unzipping data is complete: {filepath}.")
def __save_json(json_file, dict_list):
logging.info(f"Saving JSON split to {json_file}.")
with open(json_file, "w") as f:
for d in dict_list:
f.write(json.dumps(d) + "\n")
def __text_normalization(json_file, num_workers=-1):
text_normalizer_call_kwargs = {
"punct_pre_process": True,
"punct_post_process": True,
}
text_normalizer = Normalizer(
lang="de", input_case="cased", overwrite_cache=True, cache_dir=str(json_file.parent / "cache_dir"),
)
def normalizer_call(x):
return text_normalizer.normalize(x, **text_normalizer_call_kwargs)
def add_normalized_text(line_dict):
normalized_text = normalizer_call(line_dict["text"])
line_dict.update({"normalized_text": normalized_text})
return line_dict
logging.info(f"Normalizing text for {json_file}.")
with open(json_file, 'r', encoding='utf-8') as fjson:
lines = fjson.readlines()
# Note: you need to verify which backend works well on your cluster.
# backend="loky" is fine on multi-core Ubuntu OS; backend="threading" on Slurm.
dict_list = Parallel(n_jobs=num_workers)(
delayed(add_normalized_text)(json.loads(line)) for line in tqdm(lines)
)
json_file_text_normed = json_file.parent / f"{json_file.stem}_text_normed{json_file.suffix}"
with open(json_file_text_normed, 'w', encoding="utf-8") as fjson_norm:
for dct in dict_list:
fjson_norm.write(json.dumps(dct) + "\n")
logging.info(f"Normalizing text is complete: {json_file} --> {json_file_text_normed}")
def __process_data(
unzipped_dataset_path, metadata, min_duration, max_duration, val_size, test_size, seed_for_ds_split
):
logging.info("Preparing JSON train/val/test splits.")
entries = list()
not_found_wavs = list()
wrong_duration_wavs = list()
for metadata_fname in metadata:
meta_file = unzipped_dataset_path / metadata_fname
with open(meta_file, 'r') as fmeta:
for line in tqdm(fmeta):
items = line.strip().split('|')
wav_file_stem, text = items[0], items[1]
wav_file = unzipped_dataset_path / "wavs" / f"{wav_file_stem}.wav"
# skip audios if they do not exist.
if not wav_file.exists():
not_found_wavs.append(wav_file)
logging.warning(f"Skipping {wav_file}: it is not found.")
continue
# skip audios if their duration is out of range.
duration = subprocess.check_output(f"soxi -D {wav_file}", shell=True)
duration = float(duration)
if min_duration <= duration <= max_duration:
entry = {
'audio_filepath': str(wav_file),
'duration': duration,
'text': text,
}
entries.append(entry)
elif duration < min_duration:
wrong_duration_wavs.append(wav_file)
logging.warning(f"Skipping {wav_file}: it is too short, less than {min_duration} seconds.")
continue
else:
wrong_duration_wavs.append(wav_file)
logging.warning(f"Skipping {wav_file}: it is too long, greater than {max_duration} seconds.")
continue
random.Random(seed_for_ds_split).shuffle(entries)
train_size = len(entries) - val_size - test_size
if train_size <= 0:
raise ValueError("Not enough data for the train split.")
logging.info("Preparing JSON train/val/test splits is complete.")
train, val, test = (
entries[:train_size],
entries[train_size : train_size + val_size],
entries[train_size + val_size :],
)
return train, val, test, not_found_wavs, wrong_duration_wavs
def main():
args = get_args()
data_root = args.data_root
manifests_root = args.manifests_root
data_version = args.data_version
dataset_root = data_root / f"ThorstenVoice-Dataset-{data_version}"
dataset_root.mkdir(parents=True, exist_ok=True)
# download and extract dataset
dataset_url = THORSTEN_NEUTRAL[data_version]["url"]
zipped_dataset_path = dataset_root / Path(dataset_url).name.split("?")[0]
__maybe_download_file(dataset_url, zipped_dataset_path)
__extract_file(zipped_dataset_path, dataset_root)
# generate train/dev/test splits
unzipped_dataset_path = dataset_root / THORSTEN_NEUTRAL[data_version]["dir_name"]
entries_train, entries_val, entries_test, not_found_wavs, wrong_duration_wavs = __process_data(
unzipped_dataset_path=unzipped_dataset_path,
metadata=THORSTEN_NEUTRAL[data_version]["metadata"],
min_duration=args.min_duration,
max_duration=args.max_duration,
val_size=args.val_size,
test_size=args.test_size,
seed_for_ds_split=args.seed_for_ds_split,
)
# save json splits.
train_json = manifests_root / "train_manifest.json"
val_json = manifests_root / "val_manifest.json"
test_json = manifests_root / "test_manifest.json"
__save_json(train_json, entries_train)
__save_json(val_json, entries_val)
__save_json(test_json, entries_test)
# save skipped audios that are not found into a file.
if len(not_found_wavs) > 0:
skipped_not_found_file = manifests_root / "skipped_not_found_wavs.list"
with open(skipped_not_found_file, "w") as f_notfound:
for line in not_found_wavs:
f_notfound.write(f"{line}\n")
# save skipped audios that are too short or too long into a file.
if len(wrong_duration_wavs) > 0:
skipped_wrong_duration_file = manifests_root / "skipped_wrong_duration_wavs.list"
with open(skipped_wrong_duration_file, "w") as f_wrong_dur:
for line in wrong_duration_wavs:
f_wrong_dur.write(f"{line}\n")
# normalize text if requested. New json file, train_manifest_text_normed.json, will be generated.
if args.normalize_text:
__text_normalization(train_json, args.num_workers)
__text_normalization(val_json, args.num_workers)
__text_normalization(test_json, args.num_workers)
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/tts/thorsten_neutral/get_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from tqdm import tqdm
"""
Dataset preprocessing script for the Financial Phrase Bank Sentiement dataset:
https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
data-dir: (str) The unziped directory where the Financial PhraseBank dataset was downloaded, files will be saved here
file-name: (str) Name of the input file you want to process
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'financial_phrase_bank' for
example, the files would be saved as financial_phrase_bank_train.jsonl, financial_phrase_bank_val.jsonl, and
financial_phrase_bank_test.jsonl
make-ground-truth: (bool) If true, test files will include labels, if false, test files will not include labels
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
train-percent: (float) Precentage of data that should be used for the train split. The val and test splits will be made
by splitting the remaining data evenly.
Saves train, val, and test files for the Financial PhraseBank dataset.
An example of the processed output written to file:
{"taskname": "sentiment", "sentence": "In the Baltic countries , sales fell by 42.6 % .", "label": " negative"}
{"taskname": "sentiment", "sentence": "Danske Bank is Denmark 's largest bank with 3.5 million customers .", "label": " neutral"}
{"taskname": "sentiment", "sentence": "The total value of the deliveries is some EUR65m .", "label": " neutral"}
{"taskname": "sentiment", "sentence": "Operating profit margin increased from 11.2 % to 11.7 % .", "label": " positive"}
{"taskname": "sentiment", "sentence": "It will also strengthen Ruukki 's offshore business .", "label": " positive"}
{"taskname": "sentiment", "sentence": "Sanoma News ' advertising sales decreased by 22 % during the year .", "label": " negative"}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default="data/FinancialPhraseBank-v1.0")
parser.add_argument("--file-name", type=str, default="Sentences_AllAgree.txt")
parser.add_argument("--save-name-base", type=str, default="financial_phrase_bank")
parser.add_argument("--make-ground-truth", action='store_true')
parser.add_argument("--random-seed", type=int, default=1234)
parser.add_argument("--train-percent", type=float, default=0.8)
args = parser.parse_args()
data = open(f"{args.data_dir}/{args.file_name}", "r", encoding="ISO-8859-1").readlines()
save_name_base = f"{args.data_dir}/{args.save_name_base}"
process_data(data, save_name_base, args.train_percent, args.random_seed, args.make_ground_truth)
def process_data(data, save_name_base, train_percent, random_seed, make_ground_truth=False):
random.seed(random_seed)
random.shuffle(data)
data_total = len(data)
train_total = int(data_total * train_percent)
val_total = (data_total - train_total) // 2
train_set = data[0:train_total]
val_set = data[train_total : train_total + val_total]
test_set = data[train_total + val_total :]
gen_file(train_set, save_name_base, 'train')
gen_file(val_set, save_name_base, 'val')
gen_file(test_set, save_name_base, 'test', make_ground_truth)
def gen_file(data, save_name_base, split_type, make_ground_truth=False):
save_path = f"{save_name_base}_{split_type}.jsonl"
print(f"Saving {split_type} split to {save_path}")
with open(save_path, 'w') as save_file:
for line in tqdm(data):
example_json = {"taskname": "sentiment"}
sent, label = line.split('@')
sent = sent.strip()
label = label.strip()
example_json["sentence"] = sent
# Dont want labels in the test set
if split_type != "test" or make_ground_truth:
example_json["label"] = " " + label
save_file.write(json.dumps(example_json) + '\n')
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/nlp/financial_phrase_bank/prompt_learning_financial_phrase_bank_preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import shutil
from nemo.collections.nlp.data.data_utils.data_preprocessing import DATABASE_EXISTS_TMP, if_exist, write_files
from nemo.utils import logging
def copy_input_files(infold):
"""
Put training files in convenient place for conversion to our format.
Args:
infold: location of an original fold of the dataset (in the sense of k-fold cross validation)
"""
our_infold = infold + "/dataset"
if os.path.exists(our_infold + "/trainset") and os.path.exists(our_infold + "/testset"):
logging.info("Input folders exists")
return
logging.info(f"Copying files to input folder: {our_infold}")
os.makedirs(infold, exist_ok=True)
old_infold = (
infold + '/CrossValidation/autoGeneFromRealAnno/autoGene_2018_03_22-13_01_25_169/CrossValidation/KFold_1'
)
if not os.path.exists(our_infold + "/trainset"):
shutil.copytree(old_infold + '/trainset', our_infold + '/trainset')
if not os.path.exists(our_infold + "/testset"):
shutil.copytree(old_infold + '/testset/csv', our_infold + '/testset')
def get_intents(infold):
""" Get list of intents from file names. """
intents = [f[:-4] for f in os.listdir(infold)]
intents.sort()
logging.info(f'Found {len(intents)} intents')
return intents
def get_intent_queries(infold, intent_names, mode):
""" Get list of queries with their corresponding intent number. """
intent_queries = ['sentence\tlabel\n']
for index, intent in enumerate(intent_names):
queries = open(f'{infold}/{mode}set/{intent}.csv', 'r', encoding='utf-8').readlines()
for query in queries[1:]:
phrases = query.split(";")
intent_query = phrases[4][1:-1] + "\t" + str(index)
intent_queries.append(intent_query)
return intent_queries
def get_slots(infold, modes):
"""
Find a list of unique slot types in training and testing data.
We use a single slot type name both for starting and continuation tokens (not using B-, I- notation).
"""
slots = set()
for mode in modes:
path = f'{infold}/{mode}set'
for filename in os.listdir(path):
lines = open(f'{path}/{filename}', 'r', encoding='utf-8').readlines()
for line in lines[1:]:
query = line.split(";")[3]
slot_phrases = re.findall('\[.*?\]', query)
for slot_phrase in slot_phrases:
slot = slot_phrase.split(" : ")[0][1:]
slots.add(slot)
slots = sorted(slots)
slots.append("O")
logging.info(f'Found {len(slots)} slot types')
return slots
def get_slot_queries(infold, slot_dict, mode, intent_names):
"""
Convert each word in a query to corresponding slot number.
Args:
infold: fold of the data
slot_dict: dict containing slot-names to positions
mode: train, validation or test
intent_names: list of intents
"""
slot_queries = []
outside_slot = len(slot_dict) - 1
# keep the same order of files/queries as for intents
for intent in intent_names:
lines = open(f'{infold}/{mode}set/{intent}.csv', 'r', encoding='utf-8').readlines()
for line in lines[1:]:
slot_query = ""
query = line.split(";")[3]
words = query.split(" ")
current_slot = outside_slot
for word in words:
if word[0] == "[":
current_slot = slot_dict[word[1:]]
elif word[0] == ":":
continue
else:
slot_query += str(current_slot) + " "
if word[-1] == ']':
current_slot = outside_slot
slot_queries.append(slot_query.strip())
return slot_queries
def process_assistant(infold, outfold, modes=['train', 'test']):
"""
https://github.com/xliuhw/NLU-Evaluation-Data - this dataset includes
about 25 thousand examples with 66 various multi-domain intents and 57 entity types.
"""
if if_exist(outfold, [f'{mode}_slots.tsv' for mode in modes]):
logging.info(DATABASE_EXISTS_TMP.format('robot', outfold))
return outfold
logging.info(f'Processing assistant commands dataset and store at {outfold}')
os.makedirs(outfold, exist_ok=True)
# copy train/test files to the convenient directory to work with
copy_input_files(infold)
infold += "/dataset"
# get list of intents from train folder (test folder supposed to be the same)
intent_names = get_intents(infold + "/trainset")
write_files(intent_names, f'{outfold}/dict.intents.csv')
# get all train and test queries with their intent
for mode in modes:
intent_queries = get_intent_queries(infold, intent_names, mode)
write_files(intent_queries, f'{outfold}/{mode}.tsv')
# get list of all unique slots in training and testing files
slot_types = get_slots(infold, modes)
write_files(slot_types, f'{outfold}/dict.slots.csv')
# create files of slot queries
slot_dict = {k: v for v, k in enumerate(slot_types)}
for mode in modes:
slot_queries = get_slot_queries(infold, slot_dict, mode, intent_names)
write_files(slot_queries, f'{outfold}/{mode}_slots.tsv')
| NeMo-main | scripts/dataset_processing/nlp/intent_and_slot/assistant_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import random
from assistant_utils import process_assistant
from tqdm import tqdm
"""
Dataset preprocessing script for the Assistant dataset: https://github.com/xliuhw/NLU-Evaluation-Data/archive/master.zip
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
source-dir: (str) The unziped directory where the assistant dataset was downloaded
nemo-format-dir: (str) The directory where intermediate preprocessed files will be saved
output-dir: (str) The directory where the final train, val, and test files will be saved
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'assistant' for
example, the files would be saved as assistant_train.jsonl, assistant_val.jsonl, and assistant_test.jsonl
make-ground-truth: (bool) If true, test files will include answers, if false, test files will not include answers
include-options: (bool) If true, all intent and slot options will be added to the jsonl file under the key names
'intent options' and 'slot_options'. This will be added in addition to 'taskname', 'utterance', and 'label'.
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
Saves train, val, and test files for the assitant dataset.
Example Output format (with include-options = False):
{"taskname": "intent_and_slot", "utterance": "who was john dillinger", "label": "\nIntent: qa_factoid\nSlots: person(john dillinger)"}
{"taskname": "intent_and_slot", "utterance": "can you play my favorite music", "label": "\nIntent: play_music\nSlots: None"}
{"taskname": "intent_and_slot", "utterance": "is adele going to go on tour", "label": "\nIntent: qa_factoid\nSlots: artist_name(adele)"}
{"taskname": "intent_and_slot", "utterance": "will the temperature be in the today", "label": "\nIntent: weather_query\nSlots: weather_descriptor(temperature), date(today)"}
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--source-dir", type=str, default="data/assistant/NLU-Evaluation-Data-master")
parser.add_argument("--nemo-format-dir", type=str, default="data/assistant/nemo-format")
parser.add_argument("--output-dir", type=str, default="data/assistant")
parser.add_argument("--save-name-base", type=str, default="assistant")
parser.add_argument("--make-ground-truth", action='store_true')
parser.add_argument("--include-options", action='store_true')
parser.add_argument("--random-seed", type=int, default=42)
args = parser.parse_args()
random.seed(args.random_seed)
process_assistant(args.source_dir, args.nemo_format_dir, modes=["train", "test"])
intent_dict = open(f"{args.nemo_format_dir}/dict.intents.csv").readlines()
slot_dict = open(f"{args.nemo_format_dir}/dict.slots.csv").readlines()
# Convert train set to prompt learning format
train_utterance_lines = open(f"{args.nemo_format_dir}/train.tsv").readlines()[1:]
train_slot_lines = open(f"{args.nemo_format_dir}/train_slots.tsv").readlines()
train_examples = list(zip(train_utterance_lines, train_slot_lines))
random.shuffle(train_examples)
train_utterance_lines, train_slot_lines = zip(*train_examples)
train_save_path = f"{args.output_dir}/{args.save_name_base}_train.jsonl"
process_data_for_prompt_learning(
train_utterance_lines, train_slot_lines, intent_dict, slot_dict, train_save_path, args.include_options,
)
# Split test set into validation and test sets
test_utterance_lines = open(f"{args.nemo_format_dir}/test.tsv").readlines()[1:]
test_slot_lines = open(f"{args.nemo_format_dir}/test_slots.tsv").readlines()
val_half = len(test_utterance_lines) // 2
test_examples = list(zip(test_utterance_lines, test_slot_lines))
random.shuffle(test_examples)
test_utterance_lines, test_slot_lines = zip(*test_examples)
# Convert val set to prompt learning format
val_utterance_lines = test_utterance_lines[:val_half]
val_slot_lines = test_slot_lines[:val_half]
val_save_path = f"{args.output_dir}/{args.save_name_base}_val.jsonl"
process_data_for_prompt_learning(
val_utterance_lines, val_slot_lines, intent_dict, slot_dict, val_save_path, args.include_options,
)
# Convert test set to prompt learning format
test_utterance_lines = test_utterance_lines[val_half:]
test_slot_lines = test_slot_lines[val_half:]
test_save_path = f"{args.output_dir}/{args.save_name_base}_test.jsonl"
process_data_for_prompt_learning(
test_utterance_lines,
test_slot_lines,
intent_dict,
slot_dict,
test_save_path,
args.include_options,
make_ground_truth=args.make_ground_truth,
)
def process_data_for_prompt_learning(
utterance_lines, slot_lines, intent_dict, slot_dict, save_path, include_options, make_ground_truth=False
):
"""
Formats each line in the utterance file as a json object
with intent and slot labels.
"""
save_file = open(save_path, "w")
print(f"Saving data to {save_path}")
# List all possible intent and slot lables
if include_options:
all_intents = ", ".join([intent.strip() for intent in intent_dict])
all_slots = ", ".join([slot.strip() for slot in slot_dict])
# all_labels = f"possible intents: {all_intents}\n\npossible slots: {all_slots}\n\n"
for line_idx, line in enumerate(tqdm(utterance_lines)):
# Get utterance and intent label
utterance, intent_label_idx = line.split("\t")
intent_label_idx = int(intent_label_idx.strip())
intent_label = intent_dict[intent_label_idx].strip()
slot_line = slot_lines[line_idx].strip().split()
# Get and foramt all slot labels for the utterance
slot_labels = get_slots(slot_line, utterance, slot_dict)
if include_options:
example_json = {
"taskname": "intent_and_slot",
"intent options": all_intents,
"slot_options": all_slots,
"utterance": utterance,
}
else:
example_json = {
"taskname": "intent_and_slot",
"utterance": utterance,
}
# Dont want test examples to have labels
if "_test" not in save_path or make_ground_truth:
example_json["label"] = f"\nIntent: {intent_label}\nSlots: {slot_labels}"
save_file.write(json.dumps(example_json) + "\n")
def get_slots(slot_line, utterance, slot_dict):
"""
Formats slot labels for an utterance. Ensures the multiword
slot labels are grouped together. For example the words
'birthday party' should be grouped together under the
same event_name label like event_name(birthday party)
instead of event_name(birthday), event_name(party).
"""
# Get slots and their labels
utterance_words = utterance.split()
slots_and_labels = []
prev_slot_label = 'O'
prev_word_idx = 0
current_word = ""
if len(utterance_words) != len(slot_line):
slot_line = slot_line[1:]
for word_idx, slot_label_idx in enumerate(slot_line):
word = utterance_words[word_idx]
slot_label = slot_dict[int(slot_label_idx)].strip()
# Only care about words with labels
if slot_label != 'O':
# Keep multiword answers together
if prev_slot_label == slot_label and prev_word_idx == word_idx - 1:
current_word += " " + word
# Previous answer has ended and a new one is starting
else:
if current_word != "":
slots_and_labels.append(f"{prev_slot_label}({current_word})")
current_word = word
prev_word_idx = word_idx
prev_slot_label = slot_label.strip()
# Add last labeled word to list of slots and labels if the utterance is over
if current_word != "" and prev_slot_label != 'O':
slots_and_labels.append(f"{prev_slot_label}({current_word})")
# Format slot labels
if not slots_and_labels:
slot_labels = "None"
else:
slot_labels = ", ".join(slots_and_labels)
return slot_labels
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/nlp/intent_and_slot/prompt_learning_assistant_preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
import pandas as pd
def convert_atis_multi_label(source_dir: str, target_dir: str, mode: str) -> None:
"""
Converts single label atis nemo data to multi-label data. Previous
labels in atis mapped multi-labels to a single index rather than two separate indicies.
Args:
source_dir: directory that stored original nemo files
target_dir: directory to store multi-label nemo files
mode: specifies the name of the dataset i.e, train, test, dev
Returns:
None
"""
data = pd.read_csv(f'{source_dir}/{mode}.tsv', sep='\t')
# Get the original intent dictionary
old_intents_file = f'{source_dir}/dict.intents.csv'
new_intents_file = f'{target_dir}/dict.intents.csv'
intent_labels = []
with open(old_intents_file, "r") as input_file:
old_intents = input_file.read().splitlines()
with open(new_intents_file, "r") as input_file:
new_intents = input_file.read().splitlines()
for index, intent in data.iterrows():
temp_dict = {}
temp_dict['sentence'] = intent['sentence']
old_label = old_intents[int(intent['label'])]
values = [old_label]
if '+' in old_label:
values = old_label.split('+')
for index, label in enumerate(new_intents):
if label in values:
if 'label' not in temp_dict:
temp_dict['label'] = f"{index}"
else:
temp_dict['label'] = f"{temp_dict['label']},{index}"
intent_labels.append(temp_dict)
multi_intent_df = pd.DataFrame(intent_labels)
multi_intent_df.to_csv(f'{target_dir}/{mode}.tsv', sep='\t', index=False)
def convert_intent_dictionary(source_dir: str, target_dir: str) -> None:
"""
Converts original intent dictionary containing labels that represented multiple labels into
dictionary with only single labels. Example: if index 5 was referring to label "a+b", it is no longer
a label in the new intent dictionary. Only labels "a" and "b" are included within the new dictionary
Args:
source_dir: directory that stored original nemo files
target_dir: directory to store multi-label nemo files
Returns:
None
"""
os.makedirs(target_dir, exist_ok=True)
source_file = os.path.join(source_dir, "dict.intents.csv")
target_file = os.path.join(target_dir, "dict.intents.csv")
with open(source_file, "r") as input_file:
orig_intents = input_file.read().splitlines()
with open(target_file, "w") as output_file:
for line in orig_intents:
if "+" not in line:
output_file.write(f"{line}\n")
if __name__ == "__main__":
# Parse the command-line arguments.
parser = argparse.ArgumentParser(description="Process and convert datasets into NeMo\'s format.")
parser.add_argument(
"--source_data_dir", required=True, type=str, help='path to the folder containing the dataset files'
)
parser.add_argument("--target_data_dir", required=True, type=str, help='path to save the processed dataset')
args = parser.parse_args()
source_dir = args.source_data_dir
target_dir = args.target_data_dir
shutil.copyfile(f'{source_dir}/test.tsv', f'{source_dir}/dev.tsv')
convert_intent_dictionary(f'{source_dir}', f'{target_dir}')
convert_atis_multi_label(f'{source_dir}', f'{target_dir}', 'train')
convert_atis_multi_label(f'{source_dir}', f'{target_dir}', 'dev')
shutil.copyfile(f'{source_dir}/dict.slots.csv', f'{target_dir}/dict.slots.csv')
shutil.copyfile(f'{source_dir}/train_slots.tsv', f'{target_dir}/train_slots.tsv')
shutil.copyfile(f'{source_dir}/test_slots.tsv', f'{target_dir}/dev_slots.tsv')
| NeMo-main | scripts/dataset_processing/nlp/intent_and_slot/convert_datasets.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import os
import random
import shutil
import pandas as pd
def augment_nemo_data(source_dir: str, target_dir: str, link_string: str, num_mixed: int) -> None:
"""
Augments Training data to include more multi-label utterances by through utterance combining.
Args:
source_dir: directory that contains nemo-format files
target_dir: directory to store the newly transformed files
num_mixed: the number of additional combined examples per class combination
link_string: the string concatenated in between two utterances
Raises:
ValueError: dict.slots.csv must contain 'O' as one of the labels
"""
os.makedirs(target_dir, exist_ok=True)
train_df = pd.read_csv(f'{source_dir}/train.tsv', sep="\t")
# Filler Slots
slots_df = pd.read_csv(f'{source_dir}/train_slots.tsv', sep="\t", header=None)
slots_df.columns = ["slots"]
# Get Slots Dictionary
slot_file = f'{source_dir}/dict.slots.csv'
with open(slot_file, "r") as f:
slot_lines = f.read().splitlines()
dataset = list(slot_lines)
if "O" not in dataset:
raise ValueError("dict.slots.csv must contain 'O' as one of the labels")
# Find the index that contains the 'O' slot
o_slot_index = dataset.index('O')
labels = train_df.columns[1:]
actual_labels = train_df[labels].values.tolist()
sentences = train_df['sentence'].values.tolist()
# Set of all existing lables
all_labels = set(map(lambda labels: tuple(labels), actual_labels))
label_indices = []
for label in all_labels:
label_indices.append([i for i, x in enumerate(actual_labels) if tuple(x) == label])
series_list = []
slots_list = []
for i in range(len(label_indices)):
for j in range(i + 1, len(label_indices)):
first_class_indices = label_indices[i]
second_class_indices = label_indices[j]
combined_list = list(itertools.product(first_class_indices, second_class_indices))
combined_list = random.sample(combined_list, min(num_mixed, len(combined_list)))
for index, index2 in combined_list:
sentence1 = sentences[index]
sentence2 = sentences[index2]
labels1 = set(actual_labels[index][0].split(','))
labels2 = set(actual_labels[index2][0].split(','))
slots1 = slots_df["slots"][index]
slots2 = slots_df["slots"][index2]
combined_labels = ",".join(sorted(labels1.union(labels2)))
combined_sentences = f"{sentence1}{link_string} {sentence2}"
combined_lst = [combined_sentences] + [combined_labels]
combined_slots = f"{slots1} {o_slot_index} {slots2}"
series_list.append(combined_lst)
slots_list.append(combined_slots)
new_df = pd.DataFrame(series_list, columns=train_df.columns)
new_slots_df = pd.DataFrame(slots_list, columns=slots_df.columns)
train_df = train_df.append(new_df)
slots_df = slots_df.append(new_slots_df)
train_df = train_df.reset_index(drop=True)
slots_df = slots_df.reset_index(drop=True)
train_df.to_csv(f'{target_dir}/train.tsv', sep="\t", index=False)
slots_df.to_csv(f'{target_dir}/train_slots.tsv', sep="\t", index=False, header=False)
if __name__ == "__main__":
# Parse the command-line arguments.
parser = argparse.ArgumentParser(description="Process and convert datasets into NeMo\'s format.")
parser.add_argument(
"--source_data_dir", required=True, type=str, help='path to the folder containing the dataset files'
)
parser.add_argument("--target_data_dir", required=True, type=str, help='path to save the processed dataset')
parser.add_argument("--num_mixed", type=int, default=100, help='Number of training examples per class to mix')
parser.add_argument("--link_string", type=str, default="", help='string used to concatenate')
args = parser.parse_args()
source_dir = args.source_data_dir
target_dir = args.target_data_dir
num_mixed = args.num_mixed
link_string = args.link_string
augment_nemo_data(f'{source_dir}', f'{target_dir}', link_string, num_mixed)
shutil.copyfile(f'{source_dir}/dict.intents.csv', f'{target_dir}/dict.intents.csv')
shutil.copyfile(f'{source_dir}/dict.slots.csv', f'{target_dir}/dict.slots.csv')
shutil.copyfile(f'{source_dir}/dev.tsv', f'{target_dir}/dev.tsv')
shutil.copyfile(f'{source_dir}/dev_slots.tsv', f'{target_dir}/dev_slots.tsv')
| NeMo-main | scripts/dataset_processing/nlp/intent_and_slot/augment_training_data.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import shutil
from os.path import exists
from assistant_utils import process_assistant
from nemo.collections.nlp.data.data_utils.data_preprocessing import (
DATABASE_EXISTS_TMP,
MODE_EXISTS_TMP,
create_dataset,
get_dataset,
get_vocab,
if_exist,
)
from nemo.utils import logging
def ids2text(ids, vocab):
"""
Map list of ids of words in utterance to utterance
"""
return ' '.join([vocab[int(id_)] for id_ in ids])
def process_atis(infold, outfold, modes=['train', 'test'], do_lower_case=False):
"""
Process ATIS dataset found at https://www.kaggle.com/siddhadev/atis-dataset-from-ms-cntk
Args:
infold: location for input fold of data
outfold: location for output fold of data
modes: dataset splits to process
do_lowercase: whether to lowercase the input utterances
"""
vocab = get_vocab(f'{infold}/atis.dict.vocab.csv')
if if_exist(outfold, [f'{mode}.tsv' for mode in modes]):
logging.info(DATABASE_EXISTS_TMP.format('ATIS', outfold))
return outfold
logging.info(f'Processing ATIS dataset and storing at {outfold}.')
os.makedirs(outfold, exist_ok=True)
outfiles = {}
for mode in modes:
outfiles[mode] = open(os.path.join(outfold, mode + '.tsv'), 'w', encoding='utf-8')
outfiles[mode].write('sentence\tlabel\n')
outfiles[mode + '_slots'] = open(f'{outfold}/{mode}_slots.tsv', 'w', encoding='utf-8')
queries = open(f'{infold}/atis.{mode}.query.csv', 'r', encoding='utf-8').readlines()
intents = open(f'{infold}/atis.{mode}.intent.csv', 'r', encoding='utf-8').readlines()
slots = open(f'{infold}/atis.{mode}.slots.csv', 'r', encoding='utf-8').readlines()
for i, query in enumerate(queries):
sentence = ids2text(query.strip().split()[1:-1], vocab)
if do_lower_case:
sentence = sentence.lower()
outfiles[mode].write(f'{sentence}\t{intents[i].strip()}\n')
slot = ' '.join(slots[i].strip().split()[1:-1])
outfiles[mode + '_slots'].write(slot + '\n')
shutil.copyfile(f'{infold}/atis.dict.intent.csv', f'{outfold}/dict.intents.csv')
shutil.copyfile(f'{infold}/atis.dict.slots.csv', f'{outfold}/dict.slots.csv')
for mode in modes:
outfiles[mode].close()
def process_snips(infold, outfold, do_lower_case, modes=['train', 'test'], dev_split=0.1):
"""
Process snips dataset
Args:
infold: location for input fold of data
outfold: location for output fold of data
do_lowercase: whether to lowercase the input utterances
modes: dataset splits to process
dev_split: proportion of train samples to put into dev set
"""
if not os.path.exists(infold):
link = 'https://github.com/snipsco/spoken-language-understanding-research-datasets'
raise ValueError(f'Data not found at {infold}. ' f'You may request to download the SNIPS dataset from {link}.')
exist = True
for dataset in ['light', 'speak', 'all']:
if if_exist(f'{outfold}/{dataset}', [f'{mode}.tsv' for mode in modes]):
logging.info(DATABASE_EXISTS_TMP.format('SNIPS-' + dataset, outfold))
else:
exist = False
if exist:
return outfold
logging.info(f'Processing SNIPS dataset and storing at folders "speak", "light" and "all" under {outfold}.')
logging.info(
f'Processing and importing "smart-speaker-en-close-field" -> "speak" and "smart-speaker-en-close-field" -> "light".'
)
os.makedirs(outfold, exist_ok=True)
speak_dir = 'smart-speaker-en-close-field'
light_dir = 'smart-lights-en-close-field'
light_files = [f'{infold}/{light_dir}/dataset.json']
speak_files = [f'{infold}/{speak_dir}/training_dataset.json']
speak_files.append(f'{infold}/{speak_dir}/test_dataset.json')
light_train, light_dev, light_slots, light_intents = get_dataset(light_files, dev_split)
speak_train, speak_dev, speak_slots, speak_intents = get_dataset(speak_files)
create_dataset(light_train, light_dev, light_slots, light_intents, do_lower_case, f'{outfold}/light')
create_dataset(speak_train, speak_dev, speak_slots, speak_intents, do_lower_case, f'{outfold}/speak')
create_dataset(
light_train + speak_train,
light_dev + speak_dev,
light_slots | speak_slots,
light_intents | speak_intents,
do_lower_case,
f'{outfold}/all',
)
def process_jarvis_datasets(
infold, outfold, modes=['train', 'test', 'dev'], do_lower_case=False, ignore_prev_intent=False
):
"""
Process and convert Jarvis datasets into NeMo's BIO format
Args:
infold: location for input fold of data
outfold: location for output fold of data
modes: dataset splits to process
do_lowercase: whether to lowercase the input utterances
ignore_prev_intent: whether to include intent from previous turn in predicting intent of current turn
"""
dataset_name = "jarvis"
if if_exist(outfold, ['dict.intents.csv', 'dict.slots.csv']):
logging.info(DATABASE_EXISTS_TMP.format(dataset_name, outfold))
return outfold
logging.info(f'Processing {dataset_name} dataset and storing at {outfold}')
os.makedirs(outfold, exist_ok=True)
outfiles = {}
intents_list = {}
slots_list = {}
slots_list_all = {}
outfiles['dict_intents'] = open(f'{outfold}/dict.intents.csv', 'w', encoding='utf-8')
outfiles['dict_slots'] = open(f'{outfold}/dict.slots.csv', 'w', encoding='utf-8')
outfiles['dict_slots'].write('O\n')
slots_list["O"] = 0
slots_list_all["O"] = 0
for mode in modes:
if if_exist(outfold, [f'{mode}.tsv']):
logging.info(MODE_EXISTS_TMP.format(mode, dataset_name, outfold, mode))
continue
if not if_exist(infold, [f'{mode}.tsv']):
logging.info(f'{mode} mode of {dataset_name}' f' is skipped as it was not found.')
continue
outfiles[mode] = open(os.path.join(outfold, mode + '.tsv'), 'w', encoding='utf-8')
outfiles[mode].write('sentence\tlabel\n')
outfiles[mode + '_slots'] = open(f'{outfold}/{mode}_slots.tsv', 'w', encoding='utf-8')
queries = open(f'{infold}/{mode}.tsv', 'r', encoding='utf-8').readlines()
for i, query in enumerate(queries):
line_splits = query.strip().split("\t")
if len(line_splits) == 3:
intent_str, slot_tags_str, sentence = line_splits
else:
intent_str, sentence = line_splits
slot_tags_str = ""
if intent_str not in intents_list:
intents_list[intent_str] = len(intents_list)
outfiles['dict_intents'].write(f'{intent_str}\n')
if ignore_prev_intent:
start_token = 2
else:
start_token = 1
if do_lower_case:
sentence = sentence.lower()
sentence_cld = " ".join(sentence.strip().split()[start_token:-1])
outfiles[mode].write(f'{sentence_cld}\t' f'{str(intents_list[intent_str])}\n')
slot_tags_list = []
if slot_tags_str.strip():
slot_tags = slot_tags_str.strip().split(",")
for st in slot_tags:
if not st.strip():
continue
[start_i, end_i, slot_name] = st.strip().split(":")
slot_tags_list.append([int(start_i), int(end_i), slot_name])
if slot_name not in slots_list:
slots_list[slot_name] = len(slots_list)
slots_list_all[f'B-{slot_name}'] = len(slots_list_all)
slots_list_all[f'I-{slot_name}'] = len(slots_list_all)
outfiles['dict_slots'].write(f'B-{slot_name}\n')
outfiles['dict_slots'].write(f'I-{slot_name}\n')
slot_tags_list.sort(key=lambda x: x[0])
slots = []
processed_index = 0
for tag_start, tag_end, tag_str in slot_tags_list:
if tag_start > processed_index:
words_list = sentence[processed_index:tag_start].strip().split()
slots.extend([str(slots_list_all['O'])] * len(words_list))
words_list = sentence[tag_start:tag_end].strip().split()
slots.append(str(slots_list_all[f'B-{tag_str}']))
slots.extend([str(slots_list_all[f'I-{tag_str}'])] * (len(words_list) - 1))
processed_index = tag_end
if processed_index < len(sentence):
words_list = sentence[processed_index:].strip().split()
slots.extend([str(slots_list_all['O'])] * len(words_list))
slots = slots[1:-1]
slot = ' '.join(slots)
outfiles[mode + '_slots'].write(slot + '\n')
outfiles[mode + '_slots'].close()
outfiles[mode].close()
outfiles['dict_slots'].close()
outfiles['dict_intents'].close()
return outfold
if __name__ == "__main__":
# Parse the command-line arguments.
parser = argparse.ArgumentParser(description="Process and convert datasets into NeMo\'s format.")
parser.add_argument(
"--dataset_name", required=True, type=str, choices=['atis', 'snips', 'jarvis', 'assistant'],
)
parser.add_argument(
"--source_data_dir", required=True, type=str, help='path to the folder containing the dataset files'
)
parser.add_argument("--target_data_dir", required=True, type=str, help='path to save the processed dataset')
parser.add_argument("--do_lower_case", action='store_true')
parser.add_argument(
"--ignore_prev_intent",
action='store_true',
help='ignores previous intent while importing datasets in jarvis\'s format',
)
args = parser.parse_args()
dataset_name = args.dataset_name
source_dir = args.source_data_dir
target_dir = args.target_data_dir
if not exists(source_dir):
raise FileNotFoundError(f"{source_dir} does not exist.")
if dataset_name == 'atis':
process_atis(infold=source_dir, outfold=target_dir, do_lower_case=args.do_lower_case)
elif dataset_name == 'snips':
process_snips(infold=source_dir, outfold=target_dir, do_lower_case=args.do_lower_case)
elif dataset_name == 'jarvis':
process_jarvis_datasets(
infold=source_dir,
outfold=target_dir,
modes=["train", "test", "dev"],
do_lower_case=args.do_lower_case,
ignore_prev_intent=args.ignore_prev_intent,
)
elif dataset_name == 'assistant':
process_assistant(infold=source_dir, outfold=target_dir)
else:
raise ValueError(f'Dataset {dataset_name} is not supported.')
| NeMo-main | scripts/dataset_processing/nlp/intent_and_slot/import_datasets.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
from tqdm import tqdm
"""
Dataset preprocessing script for the SQuAD dataset: https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json
Converts the dataset into a jsonl format that can be used for p-tuning/prompt tuning in NeMo.
Inputs:
data-dir: (str) The directory where the squad dataset was downloaded, files will be saved here
train-file: (str) Name of train set file, either train-v1.1.json or train-v2.0.json
dev-file: (str) Name of dev set file, either dev-v1.1.json or dev-v2.0.json
save-name-base: (str) The base name for each of the train, val, and test files. If save-name-base were 'squad' for
example, the files would be saved as squad_train.jsonl, squad_val.jsonl, and squad_test.jsonl
include-topic-name: Whether to include the topic name for the paragraph in the data json. See the squad explaination
below for more context on what is ment by 'topic name'.
random-seed: (int) Random seed for repeatable shuffling of train/val/test splits.
Saves train, val, and test files for the SQuAD dataset. The val and test splits are the same data, because the given test
split lacks ground truth answers.
An example of the processed output written to file:
{
"taskname": "squad",
"context": "Red is the traditional color of warning and danger. In the Middle Ages, a red flag announced that the defenders of a town or castle would fight to defend it, and a red flag hoisted by a warship meant they would show no mercy to their enemy. In Britain, in the early days of motoring, motor cars had to follow a man with a red flag who would warn horse-drawn vehicles, before the Locomotives on Highways Act 1896 abolished this law. In automobile races, the red flag is raised if there is danger to the drivers. In international football, a player who has made a serious violation of the rules is shown a red penalty card and ejected from the game.",
"question": "What did a red flag signal in the Middle Ages?",
"answer": " defenders of a town or castle would fight to defend it"
},
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--data-dir", type=str, default=".")
parser.add_argument("--train-file", type=str, default="train-v1.1.json")
parser.add_argument("--dev-file", type=str, default="dev-v1.1.json")
parser.add_argument("--save-name-base", type=str, default="squad")
parser.add_argument("--include-topic-name", action='store_true')
parser.add_argument("--random-seed", type=int, default=1234)
parser.add_argument("--sft-format", action='store_true')
args = parser.parse_args()
train_data_dict = json.load(open(f"{args.data_dir}/{args.train_file}"))
dev_data_dict = json.load(open(f"{args.data_dir}/{args.dev_file}"))
train_data = train_data_dict['data']
val_data = dev_data_dict['data']
save_name_base = f"{args.data_dir}/{args.save_name_base}"
process_data(train_data, val_data, save_name_base, args.include_topic_name, args.sft_format)
def process_data(train_data, val_data, save_name_base, include_topic, sft_format):
train_set = extract_questions(train_data, include_topic, sft_format, split="train")
val_set = extract_questions(val_data, include_topic, sft_format, split="val")
test_set = extract_questions(val_data, include_topic, sft_format, split="test")
gen_file(train_set, save_name_base, 'train', sft_format)
gen_file(val_set, save_name_base, 'val', sft_format)
gen_file(test_set, save_name_base, 'test', sft_format, make_ground_truth=True)
gen_file(test_set, save_name_base, 'test', sft_format, make_ground_truth=False)
def extract_questions(data, include_topic, sft_format, split):
processed_data = []
# Iterate over topics, want to keep them seprate in train/val/test splits
for question_group in data:
processed_topic_data = []
topic = question_group['title']
questions = question_group['paragraphs']
# Iterate over paragraphs related to topics
for qa_group in questions:
context = qa_group['context']
qas = qa_group['qas']
# Iterate over questions about paragraph
for qa in qas:
question = qa['question']
try:
# Dev set has multiple right answers. Want all possible answers in test split ground truth
if split == "test":
answers = [qa['answers'][i]['text'] for i in range(len(qa['answers']))]
# Choose one anser from dev set if making validation split, train set only has one answer
else:
answers = qa['answers'][0]["text"]
except IndexError:
continue
if sft_format:
example_json = {
"input": f"User: Context:{context} Question:{question}\n\nAssistant:",
"output": answers,
}
else:
example_json = {"taskname": "squad", "context": context, "question": question, "answer": answers}
if include_topic:
example_json["topic"] = topic
processed_topic_data.append(example_json)
processed_data.extend(processed_topic_data)
return processed_data
def gen_file(data, save_name_base, split_type, sft_format, make_ground_truth=False):
save_path = f"{save_name_base}_{split_type}.jsonl"
if make_ground_truth:
save_path = f"{save_name_base}_{split_type}_ground_truth.jsonl"
print(f"Saving {split_type} split to {save_path}")
with open(save_path, 'w') as save_file:
for example_json in tqdm(data):
# Dont want labels in the test set
if split_type == "test" and not make_ground_truth:
if sft_format:
example_json["output"] = ""
else:
del example_json["answer"]
save_file.write(json.dumps(example_json) + '\n')
if __name__ == "__main__":
main()
| NeMo-main | scripts/dataset_processing/nlp/squad/prompt_learning_squad_preprocessing.py |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This script can be used to preprocess Spoken Wikipedia corpus before running ctc-segmentation.
The input folder consists of subfolders with following stricture
├── <Name of Wikipedia article>
│ ├── aligned.swc
│ ├── audiometa.txt
│ ├── audio.ogg
│ ├── info.json
│ ├── wiki.html
│ ├── wiki.txt
│ └── wiki.xml
## The destination folder will contain look enumerated .ogg and .txt files like this:
├── audio
| ├── 1.ogg
| ├── 2.ogg
| ...
└── text
├── 1.txt
├── 2.txt
...
"""
import argparse
import os
import re
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_folder", required=True, type=str, help="Input folder in which each subfolder contains an article"
)
parser.add_argument(
"--destination_folder", required=True, type=str, help="Destination folder with audio and text subfolder"
)
args = parser.parse_args()
def replace_diacritics(text):
text = re.sub(r"[éèëēêęěė]", "e", text)
text = re.sub(r"[ãâāáäăâàąåạả]", "a", text)
text = re.sub(r"[úūüùưûů]", "u", text)
text = re.sub(r"[ôōóöõòő]", "o", text)
text = re.sub(r"[ćçč]", "c", text)
text = re.sub(r"[ïīíîıì]", "i", text)
text = re.sub(r"[ñńňņ]", "n", text)
text = re.sub(r"[țť]", "t", text)
text = re.sub(r"[łľ]", "l", text)
text = re.sub(r"[żžź]", "z", text)
text = re.sub(r"[ğ]", "g", text)
text = re.sub(r"[ř]", "r", text)
text = re.sub(r"[ý]", "y", text)
text = re.sub(r"[æ]", "ae", text)
text = re.sub(r"[œ]", "oe", text)
text = re.sub(r"[șşšś]", "s", text)
return text
def get_audio(name, n):
"""
Copies .ogg file. If there are several .ogg files, concatenates them.
Args:
name - name of folder within Spoken Wikipedia
n - integer that will serve as output file name, e.g. if n=1, file 1.ogg will be created
"""
audio_path = os.path.join(args.input_folder, name, "audio.ogg")
if not os.path.exists(audio_path):
## Some folders have multiple .ogg files, so we need to first combine them into one file. Example:
## |── Universe
## │ ├── aligned.swc
## │ ├── audio1.ogg
## │ ├── audio2.ogg
## │ ├── audio3.ogg
## │ ├── audio4.ogg
## │ ├── audiometa.txt
## │ ├── info.json
## │ ├── wiki.html
## │ ├── wiki.txt
## │ └── wiki.xml
multiple_ogg_files = []
for i in range(1, 5):
path = os.path.join(args.input_folder, name, "audio" + str(i) + ".ogg")
if os.path.exists(path):
multiple_ogg_files.append(path)
else:
break
if len(multiple_ogg_files) == 0:
return
elif len(multiple_ogg_files) == 1:
os.system("cp \"" + multiple_ogg_files[0] + "\" \"" + audio_path + "\"")
else:
tmp_file_name = "ffmeg_inputs.txt"
print("tmp_file_name=", tmp_file_name)
with open(tmp_file_name, "w", encoding="utf-8") as tmp_file:
for path in multiple_ogg_files:
tmp_file.write("file '" + path + "'\n")
cmd = "ffmpeg -f concat -i \"" + tmp_file_name + "\" -c copy \"" + audio_path + "\""
print(cmd)
os.system(cmd)
output_audio_path = args.destination_folder + "/audio/" + str(n) + ".ogg"
os.system("cp \"" + audio_path + "\" " + output_audio_path)
def get_text(name, n):
"""
Cleans wiki.txt.
Args:
name - name of folder within Spoken Wikipedia
n - integer that will serve as output file name, e.g. if n=1, file 1.txt will be created
"""
# Then we need to clean the text
out_text = open(args.destination_folder + "/text/" + str(n) + ".txt", "w", encoding="utf-8")
with open(args.input_folder + "/" + name + "/wiki.txt", "r", encoding="utf-8") as f:
for line in f:
do_break = False
line2 = line.strip()
ref_parts = line2.split("<ref")
for idx, s in enumerate(ref_parts):
if idx != 0:
s = "<ref" + s
if s.startswith("[[Image") and s.endswith("]]"):
continue
if s.startswith("[[File") and s.endswith("]]"):
continue
if s.startswith(":"):
continue
if s.startswith("{|") or s.startswith("|}") or s.startswith("|") or s.startswith("!"):
continue
if s.startswith("{{") and (s.endswith("}}") or "}}" not in s):
continue
if s.startswith("{{pp-move"):
continue
s = re.sub(r"\[\[Image\:[^\]]+\]\]", r"", s)
s = re.sub(r"\[\[File\:[^\]]+\]\]", r"", s)
s = re.sub(r"\[http[^\]]+\]", r"", s)
s = re.sub(r"<math>[^<>]+</math>", r"", s)
s = re.sub(r"<!\-\-.+\-\->", r"", s) # <!--DashBot--> can be inside <ref>
s = re.sub(r"<ref>.+</ref>", r"", s)
s = re.sub(r"<ref .+</ref>", r"", s)
s = re.sub(r"<ref[^<>]+/>", r"", s)
s = re.sub(r"<[^ <>]+>", r"", s) # <sub>, <sup>, </u>
if (
re.match(r"== *Notes *==", s)
or re.match(r"== *References *==", s)
or re.match(r"== *External links *==", s)
or re.match(r"== *See also *==", s)
):
do_break = True
break
s = re.sub(r"{{convert\|(\d+)\|(\w+)\|[^}]+}}", r"\g<1> \g<2>", s) # {{convert|7600|lb|kg}}
s = re.sub(r"{{cquote\|", r"", s)
s = re.sub(r"{{[^{}]+}}", r"", s)
s = s.replace("{{", "").replace("}}", "")
s = re.sub(r"(lang[^()]+)", r"", s) # (lang-bn...)
s = re.sub(r"==+", r"", s)
s = re.sub(r"''+", r" ", s) # remove multiple quotes
s = re.sub(r" '", r" ", s) # remove quote at the beginning
s = re.sub(r"' ", r" ", s) # remove quote at the end
s = re.sub(r"[…\*]", r" ", s)
s = re.sub(r"\\u....", r" ", s) # remove unicode
s = re.sub(r"&[^ ;&]+;", r"", s) # —
s = replace_diacritics(s)
s = re.sub(r"\[\[[^\]]+\|([^\]]+)\]\]", r"\g<1>", s) # if several variants, take the last one
s = re.sub(r"\[\[([^\]]+)\]\]", r"\g<1>", s)
out_text.write(s + "\n")
if do_break:
break
out_text.close()
if __name__ == "__main__":
n = 0
for name in os.listdir(args.input_folder):
n += 1
if not os.path.exists(args.input_folder + "/" + name + "/wiki.txt"):
print("wiki.txt does not exist in " + name)
continue
get_audio(name, n)
get_text(name, n)
| NeMo-main | scripts/dataset_processing/spoken_wikipedia/preprocess.py |
# Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Script to build and install decoder package.
It is used by scripts/asr_language_modeling/ngram_lm/install_beamsearch_decoders.sh to install
KenLM and OpenSeq2Seq decoder.
You can set the order of KenLM model by changing -DKENLM_MAX_ORDER=10 argument.
"""
from __future__ import absolute_import, division, print_function
import argparse
import distutils.ccompiler
import glob
import multiprocessing.pool
import os
import platform
import sys
from setuptools import Extension, distutils, setup
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--num_processes", default=1, type=int, help="Number of cpu processes to build package. (default: %(default)d)"
)
args = parser.parse_known_args()
# reconstruct sys.argv to pass to setup below
sys.argv = [sys.argv[0]] + args[1]
# monkey-patch for parallel compilation
# See: https://stackoverflow.com/a/13176803
def parallelCCompile(
self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None,
):
# those lines are copied from distutils.ccompiler.CCompiler directly
macros, objects, extra_postargs, pp_opts, build = self._setup_compile(
output_dir, macros, include_dirs, sources, depends, extra_postargs
)
cc_args = self._get_cc_args(pp_opts, debug, extra_preargs)
# parallel code
def _single_compile(obj):
try:
src, ext = build[obj]
except KeyError:
return
self._compile(obj, src, ext, cc_args, extra_postargs, pp_opts)
# convert to list, imap is evaluated on-demand
thread_pool = multiprocessing.pool.ThreadPool(args[0].num_processes)
list(thread_pool.imap(_single_compile, objects))
return objects
def compile_test(header, library):
dummy_path = os.path.join(os.path.dirname(__file__), "dummy")
command = (
"bash -c \"g++ -include "
+ header
+ " -l"
+ library
+ " -x c++ - <<<'int main() {}' -o "
+ dummy_path
+ " >/dev/null 2>/dev/null && rm "
+ dummy_path
+ " 2>/dev/null\""
)
return os.system(command) == 0
# hack compile to support parallel compiling
distutils.ccompiler.CCompiler.compile = parallelCCompile
FILES = glob.glob('kenlm/util/*.cc') + glob.glob('kenlm/lm/*.cc') + glob.glob('kenlm/util/double-conversion/*.cc')
FILES += glob.glob('openfst-1.6.3/src/lib/*.cc')
FILES = [fn for fn in FILES if not (fn.endswith('main.cc') or fn.endswith('test.cc') or fn.endswith('unittest.cc'))]
LIBS = ['stdc++']
if platform.system() != 'Darwin':
LIBS.append('rt')
ARGS = ['-O3', '-DNDEBUG', '-DKENLM_MAX_ORDER=10', '-std=c++11']
if compile_test('zlib.h', 'z'):
ARGS.append('-DHAVE_ZLIB')
LIBS.append('z')
if compile_test('bzlib.h', 'bz2'):
ARGS.append('-DHAVE_BZLIB')
LIBS.append('bz2')
if compile_test('lzma.h', 'lzma'):
ARGS.append('-DHAVE_XZLIB')
LIBS.append('lzma')
os.system('swig -python -c++ ./decoders.i')
decoders_module = [
Extension(
name='_swig_decoders',
sources=FILES + glob.glob('*.cxx') + glob.glob('*.cpp'),
language='c++',
include_dirs=['.', 'kenlm', 'openfst-1.6.3/src/include', 'ThreadPool',],
libraries=LIBS,
extra_compile_args=ARGS,
)
]
setup(
name='ctc_decoders',
version='1.1',
description="""CTC decoders""",
ext_modules=decoders_module,
py_modules=['ctc_decoders', 'swig_decoders'],
)
| NeMo-main | scripts/installers/setup_os2s_decoders.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script responsible for generation of a JSON file containing list of modules of a given collection. """
import argparse
import importlib
import inspect
import json
import os
import nemo
from nemo.utils import logging
def process_member(name, obj, module_list):
""" Helper function processing the passed object and, if ok, adding a record to the module list.
Args:
name: name of the member
obj: member (class/function etc.)
module_list: list of modules that (probably) will be expanded.
"""
# It is not a class - skip it.
if not inspect.isclass(obj):
return
# Check inheritance - we know that all our datasets/modules/losses inherit from Serialization,
# Btw. Serialization is also required by this script.
if not issubclass(obj, nemo.core.Serialization):
return
logging.info(" * Processing `{}`".format(str(obj)))
module_list.append(
{
"name": name,
"cls": str(obj),
# Temporary solution: mockup arguments.
"arguments": [
"jasper",
"activation",
"feat_in",
"normalization_mode",
"residual_mode",
"norm_groups",
"conv_mask",
"frame_splicing",
"init_mode",
],
# Temporary solution: mockup input types.
"input_types": {
"audio_signal": "axes: (batch, dimension, time); elements_type: MelSpectrogramType",
"length": "axes: (batch,); elements_type: LengthType",
},
# Temporary solution: mockup output types.
"output_types": {
"encoder_output": "axes: (batch, dimension, time); elements_type: AcousticEncodedRepresentation"
},
}
)
def main():
""" Main function analysing the indicated NeMo collection and generating a JSON file with module descriptions. """
# Parse filename.
parser = argparse.ArgumentParser()
parser.add_argument('--collection', help='ID of the collection', type=str)
parser.add_argument('--filename', help='Name of the output JSON file', type=str, default="modules.json")
args = parser.parse_args()
# Get collections directory.
colletions_dir = os.path.dirname(nemo.collections.__file__)
logging.info('Analysing collections in `{}`'.format(colletions_dir))
# Generate list of NeMo collections - from the list of collection subfolders.
collections = {}
for sub_dir in os.listdir(colletions_dir):
# Skip cache.
if sub_dir == "__pycache__":
continue
# Check if it is a directory.
if os.path.isdir(os.path.join(colletions_dir, sub_dir)):
collections[sub_dir] = "nemo.collections." + sub_dir
# Check the collection.
if args.collection not in collections.keys():
logging.error("Coudn't process the incidated `{}` collection".format(args.collection))
logging.info(
"Please select one of the existing collections using `--collection [{}]`".format("|".join(collections))
)
exit(-1)
# Load the collection specification.
collection_spec = importlib.util.find_spec(collections[args.collection])
if collection_spec is None:
logging.error("Failed to load the `{}` collection".format(val))
# Import the module from the module specification.
collection = importlib.util.module_from_spec(collection_spec)
collection_spec.loader.exec_module(collection)
module_list = []
# Iterate over the packages in the indicated collection.
logging.info("Analysing the `{}` collection".format(args.collection))
try: # Datasets in dataset folder
logging.info("Analysing the 'data' package")
for name, obj in inspect.getmembers(collection.data):
process_member(name, obj, module_list)
except AttributeError as e:
logging.info(" * No datasets found")
try: # Datasets in dataset folder
logging.info("Analysing the 'datasets' package")
for name, obj in inspect.getmembers(collection.datasets):
process_member(name, obj, module_list)
except AttributeError as e:
logging.info(" * No datasets found")
try: # Modules
logging.info("Analysing the 'modules' package")
for name, obj in inspect.getmembers(collection.modules):
process_member(name, obj, module_list)
except AttributeError as e:
logging.info(" * No modules found")
try: # Losses
logging.info("Analysing the 'losses' package")
for name, obj in inspect.getmembers(collection.losses):
process_member(name, obj, module_list)
except AttributeError as e:
logging.info(" * No losses found")
# Add prefix - only for default name.
filename = args.filename if args.filename != "modules.json" else args.collection + "_" + args.filename
# Export to JSON.
with open(filename, 'w', encoding='utf-8') as outfile:
json.dump(module_list, outfile)
logging.info(
'Finished analysis of the `{}` collection, results exported to `{}`.'.format(args.collection, filename)
)
if __name__ == '__main__':
main()
| NeMo-main | external/get_modules.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script responsible for generation of a JSON file with list of NeMo collections. """
import argparse
import importlib
import json
import os
import nemo
from nemo.utils import logging
def process_collection(id, col):
""" Helper function processing the collection.
Args:
id: (short) name of the collection.
col: a collection (python module).
"""
return {
"id": id,
"name": col.__name__,
"description": col.__description__,
"version": col.__version__,
"author": col.__author__,
}
def main():
""" Main function generating a JSON file with list of NeMo collections. """
# Parse filename.
parser = argparse.ArgumentParser()
parser.add_argument('--filename', help='Name of the output JSON file', type=str, default="collections.json")
args = parser.parse_args()
# Get collections directory.
colletions_dir = os.path.dirname(nemo.collections.__file__)
logging.info('Analysing collections in `{}`'.format(colletions_dir))
# Generate list of NeMo collections - from the list of collection subfolders.
collections = {}
for sub_dir in os.listdir(colletions_dir):
# Skip cache.
if sub_dir == "__pycache__":
continue
# Check if it is a directory.
if os.path.isdir(os.path.join(colletions_dir, sub_dir)):
collections[sub_dir] = "nemo.collections." + sub_dir
output_list = []
# Iterate over all collections.
for key, val in collections.items():
# Try to get module specification.
module_spec = importlib.util.find_spec(val)
if module_spec is None:
logging.warning(" * Failed to process `{}`".format(val))
else:
try:
# Import the module from the module specification.
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
# Add to list.
output_list.append(process_collection(key, module))
logging.info(" * Processed `{}`".format(val))
except AttributeError:
logging.warning(" * Failed to process `{}`".format(val))
# Export to JSON.
with open(args.filename, 'w', encoding='utf-8') as outfile:
json.dump(output_list, outfile)
logging.info('Finshed the analysis, results exported to `{}`.'.format(args.filename))
if __name__ == '__main__':
main()
| NeMo-main | external/get_collections.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Setup for pip package."""
import codecs
import os
import sys
import subprocess
from setuptools import setup
from setuptools import Extension
sys.path.insert(0, os.path.abspath(os.path.join("nvtx_plugins"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("nvtx_plugins", "python"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("nvtx_plugins", "python", "nvtx"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("nvtx_plugins", "python", "nvtx", "plugins"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("nvtx_plugins", "python", "nvtx", "plugins", "tf"))) # Important
from package_info import __contact_emails__
from package_info import __contact_names__
from package_info import __description__
from package_info import __download_url__
from package_info import __homepage__
from package_info import __keywords__
from package_info import __license__
from package_info import __package_name__
from package_info import __repository_url__
from package_info import __version__
from setup_utils import custom_build_ext
def run_cmd(command):
ps = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
return ps.communicate()[0].decode('utf-8').strip()
def get_tf_pkgname():
for pkg_name in ["tensorflow-gpu", "tf-nightly-gpu"]:
if pkg_name in run_cmd("pip freeze | grep %s" % pkg_name):
return pkg_name
return "tensorflow" # Default if not found
def req_file(filename, folder="requirements"):
with open(os.path.join(folder, filename)) as f:
content = f.readlines()
# you may also want to remove whitespace characters
# Example: `\n` at the end of each line
return [x.strip() for x in content]
install_requires = req_file("requirements.txt") + [get_tf_pkgname()]
extras_require = {
'test': req_file("requirements_test.txt"),
}
tests_requirements = extras_require["test"]
tensorflow_nvtx_lib = Extension(
'nvtx.plugins.tf.lib.nvtx_ops',
sources=[
'nvtx_plugins/cc/nvtx_ops.cc',
'nvtx_plugins/cc/nvtx_kernels.cc',
],
undef_macros=["NDEBUG"],
extra_compile_args=['-lnvToolsExt'],
extra_link_args=['-lnvToolsExt']
)
# =================== Reading Readme file as TXT files ===================
if os.path.exists('README.rst'):
# codec is used for consistent encoding
long_description = codecs.open(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.rst'),
'r', 'utf-8'
).read()
long_description = long_description.replace(
"docs/images/",
"https://github.com/NVIDIA/nvtx-plugins/raw/master/docs/images/"
)
else:
long_description = 'See ' + __homepage__
setup(
name=__package_name__,
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=__version__,
description=__description__,
long_description=long_description,
# The project's main homepage.
url=__repository_url__,
download_url=__download_url__,
# Author details
author=__contact_names__,
author_email=__contact_emails__,
# maintainer Details
maintainer=__contact_names__,
maintainer_email=__contact_emails__,
# The licence under which the project is released
license=__license__,
classifiers=[
# How mature is this project? Common values are
# 1 - Planning
# 2 - Pre-Alpha
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
# 6 - Mature
# 7 - Inactive
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Intended Audience :: Information Technology',
# Indicate what your project relates to
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache Software License',
# Supported python versions
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
# Additional Setting
'Environment :: Console',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
],
cmdclass={'build_ext': lambda dist: custom_build_ext(dist, tensorflow_nvtx_lib)},
ext_modules=[tensorflow_nvtx_lib],
# Add in any packaged data.
include_package_data=True,
packages=['nvtx.plugins.tf', 'nvtx.plugins.tf.keras'],
package_dir={'': 'nvtx_plugins/python'},
# Contained modules and scripts.
install_requires=install_requires,
extras_require=extras_require,
setup_requires=['pytest-runner'],
zip_safe=False,
# PyPI package information.
keywords=__keywords__,
)
| nvtx-plugins-master | setup.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Code Inspired by: https://github.com/horovod/horovod/blob/master/setup.py
#
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# Modified by NVIDIA to fit our requirements
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import re
import subprocess
import sys
import textwrap
import traceback
from contextlib import contextmanager
from distutils.errors import CompileError
from distutils.errors import DistutilsError
from distutils.errors import DistutilsPlatformError
from distutils.errors import LinkError
from distutils.sysconfig import customize_compiler
from distutils.version import LooseVersion
from setuptools.command.build_ext import build_ext
__all__ = ["custom_build_ext"]
# determining if the system has cmake installed
try:
subprocess.check_output(['cmake', '--version'])
have_cmake = True
except (FileNotFoundError, subprocess.CalledProcessError):
have_cmake = False
def check_tf_version():
try:
import tensorflow as tf
if LooseVersion(tf.__version__) < LooseVersion('1.1.0'):
raise DistutilsPlatformError(
'Your TensorFlow version %s is outdated. '
'NVTX Plugins requires tensorflow>=1.1.0' % tf.__version__
)
except ImportError:
raise DistutilsPlatformError(
'import tensorflow failed, is it installed?\n\n%s' % traceback.format_exc()
)
except AttributeError:
# This means that tf.__version__ was not exposed, which makes it *REALLY* old.
raise DistutilsPlatformError(
'Your TensorFlow version is outdated. NVTX Plugins requires tensorflow>=1.1.0'
)
def build_cmake(build_ext, ext, prefix, plugin_ext=None, options=None):
cmake_bin = 'cmake'
# All statically linked libraries will be placed here
lib_output_dir = os.path.abspath(os.path.join(build_ext.build_temp, 'lib', prefix))
if not os.path.exists(lib_output_dir):
os.makedirs(lib_output_dir)
if plugin_ext:
plugin_ext.library_dirs += [lib_output_dir]
if options:
options['LIBRARY_DIRS'] += [lib_output_dir]
extdir = os.path.abspath(os.path.dirname(build_ext.get_ext_fullpath(ext.name)))
# config = 'Debug' if build_ext.debug else 'Release'
config = 'Release'
cmake_args = [
'-DCMAKE_BUILD_TYPE=' + config,
'-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(config.upper(), extdir),
'-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY_{}={}'.format(config.upper(),
lib_output_dir),
]
cmake_build_args = [
'--config', config,
'--', '-j4',
]
# Keep temp build files within a unique subdirectory
build_temp = os.path.abspath(os.path.join(build_ext.build_temp, ext.name))
if not os.path.exists(build_temp):
os.makedirs(build_temp)
# Config and build the extension
try:
subprocess.check_call([cmake_bin, ext.cmake_lists_dir] + cmake_args, cwd=build_temp)
subprocess.check_call([cmake_bin, '--build', '.'] + cmake_build_args, cwd=build_temp)
except OSError as e:
raise RuntimeError('CMake failed: {}'.format(str(e)))
# Add the library so the plugin will link against it during compilation
if plugin_ext:
plugin_ext.libraries += [ext.name]
if options:
options['LIBRARIES'] += [ext.name]
def remove_offensive_gcc_compiler_options(compiler_version):
offensive_replacements = dict()
if compiler_version < LooseVersion('4.9'):
offensive_replacements = {
'-Wdate-time': '',
'-fstack-protector-strong': '-fstack-protector'
}
if offensive_replacements:
from sysconfig import get_config_var
cflags = get_config_var('CONFIGURE_CFLAGS')
cppflags = get_config_var('CONFIGURE_CPPFLAGS')
ldshared = get_config_var('LDSHARED')
for k, v in offensive_replacements.items():
cflags = cflags.replace(k, v)
cppflags = cppflags.replace(k, v)
ldshared = ldshared.replace(k, v)
return cflags, cppflags, ldshared
# Use defaults
return None, None, None
# def check_avx_supported():
# try:
# flags_output = subprocess.check_output(
# 'gcc -march=native -E -v - </dev/null 2>&1 | grep cc1',
# shell=True, universal_newlines=True).strip()
# flags = shlex.split(flags_output)
# return '+f16c' in flags and '+avx' in flags
# except subprocess.CalledProcessError:
# # Fallback to non-AVX if were not able to get flag information.
# return False
def get_cpp_flags(build_ext):
last_err = None
default_flags = ['-std=c++11', '-fPIC', '-O2', '-Wall']
# avx_flags = ['-mf16c', '-mavx'] if check_avx_supported() else []
avx_flags = []
flags_to_try = [
default_flags,
default_flags + ['-stdlib=libc++']
]
if avx_flags:
flags_to_try.append(default_flags + avx_flags)
flags_to_try.append(default_flags + ['-stdlib=libc++'] + avx_flags)
for cpp_flags in flags_to_try:
try:
test_compile(
build_ext, 'test_cpp_flags',
extra_compile_preargs=cpp_flags,
code=textwrap.dedent(
'''\
#include <unordered_map>
void test() {
}
'''
)
)
return cpp_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ compilation flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ compilation flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_link_flags(build_ext):
last_err = None
libtool_flags = ['-Wl,-exported_symbols_list']
ld_flags = []
flags_to_try = [ld_flags, libtool_flags]
for link_flags in flags_to_try:
try:
test_compile(build_ext, 'test_link_flags',
extra_link_preargs=link_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
return link_flags
except (CompileError, LinkError):
last_err = 'Unable to determine C++ link flags (see error above).'
except Exception:
last_err = 'Unable to determine C++ link flags. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_cuda_dirs(build_ext, cpp_flags):
cuda_include_dirs = []
cuda_lib_dirs = []
cuda_home = os.environ.get('CUDA_HOME')
cuda_lib = os.environ.get('CUDA_LIB')
cuda_include = os.environ.get('CUDA_INCLUDE')
if cuda_home and os.path.exists(cuda_home):
for _dir in ['%s/include' % cuda_home]:
if os.path.exists(_dir):
cuda_include_dirs.append(_dir)
for _dir in ['%s/lib' % cuda_home, '%s/lib64' % cuda_home]:
if os.path.exists(_dir):
cuda_lib_dirs.append(_dir)
if cuda_include and os.path.exists(cuda_include) and cuda_include not in cuda_include_dirs:
cuda_include_dirs.append(cuda_include)
if cuda_lib and os.path.exists(cuda_lib) and cuda_lib not in cuda_lib_dirs:
cuda_lib_dirs.append(cuda_lib)
if not cuda_include_dirs and not cuda_lib_dirs:
# default to /usr/local/cuda
cuda_include_dirs += ['/usr/local/cuda/include']
cuda_lib_dirs += ['/usr/local/cuda/lib', '/usr/local/cuda/lib64']
try:
test_compile(
build_ext,
'test_cuda',
libraries=['cudart'],
include_dirs=cuda_include_dirs,
library_dirs=cuda_lib_dirs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent(
'''\
#include <cuda_runtime.h>
void test() {
cudaSetDevice(0);
}
'''
)
)
except (CompileError, LinkError):
raise DistutilsPlatformError(
'CUDA library was not found (see error above).\n'
'Please specify correct CUDA location with the CUDA_HOME '
'environment variable or combination of CUDA_INCLUDE and '
'CUDA_LIB environment variables.\n\n'
'CUDA_HOME - path where CUDA include and lib directories can be found\n'
'CUDA_INCLUDE - path to CUDA include directory\n'
'CUDA_LIB - path to CUDA lib directory'
)
return cuda_include_dirs, cuda_lib_dirs
def get_common_options(build_ext):
cpp_flags = get_cpp_flags(build_ext)
link_flags = get_link_flags(build_ext)
have_cuda = True
MACROS = []
INCLUDES = []
SOURCES = []
COMPILE_FLAGS = cpp_flags
LINK_FLAGS = link_flags
LIBRARY_DIRS = []
LIBRARIES = []
if have_cuda:
cuda_include_dirs, cuda_lib_dirs = get_cuda_dirs(build_ext, cpp_flags)
MACROS += [('HAVE_CUDA', '1')]
INCLUDES += cuda_include_dirs
LIBRARY_DIRS += cuda_lib_dirs
LIBRARIES += ['cudart']
return dict(
MACROS=MACROS,
INCLUDES=INCLUDES,
SOURCES=SOURCES,
COMPILE_FLAGS=COMPILE_FLAGS,
LINK_FLAGS=LINK_FLAGS,
LIBRARY_DIRS=LIBRARY_DIRS,
LIBRARIES=LIBRARIES
)
# run the customize_compiler
class custom_build_ext(build_ext):
def __init__(self, dist, tf_libs):
if isinstance(tf_libs, (list, tuple)):
self._tf_libs = tf_libs
else:
self._tf_libs = [tf_libs]
super(custom_build_ext, self).__init__(dist)
def build_extensions(self):
options = get_common_options(self)
built_plugins = []
for extension in self._tf_libs:
try:
build_tf_extension(self, extension, options)
built_plugins.append(True)
except:
print("===========================================================================================")
print(
'INFO: Unable to build TensorFlow plugin, will skip it.\n\n%s' % traceback.format_exc(),
file=sys.stderr
)
print("===========================================================================================")
built_plugins.append(False)
if not built_plugins[-1]:
raise DistutilsError('TensorFlow plugin: `%s` failed to build. Aborting.' % extension.name)
if not any(built_plugins):
raise DistutilsError('No plugin was built. See errors above.')
def build_tf_extension(build_ext, tf_lib, options):
check_tf_version()
tf_compile_flags, tf_link_flags = get_tf_flags(build_ext, options['COMPILE_FLAGS'])
tf_lib.define_macros = options['MACROS'] + tf_lib.define_macros
tf_lib.include_dirs = options['INCLUDES'] + tf_lib.include_dirs
tf_lib.sources = options['SOURCES'] + tf_lib.sources
tf_lib.extra_compile_args = options['COMPILE_FLAGS'] + tf_compile_flags + tf_lib.extra_compile_args
tf_lib.extra_link_args = options['LINK_FLAGS'] + tf_link_flags + tf_lib.extra_link_args
tf_lib.library_dirs = options['LIBRARY_DIRS'] + tf_lib.library_dirs
tf_lib.libraries = options['LIBRARIES'] + tf_lib.libraries
cc_compiler = cxx_compiler = None
if not sys.platform.startswith('linux'):
raise EnvironmentError("Only Linux Systems are supported")
if not os.getenv('CC') and not os.getenv('CXX'):
# Determine g++ version compatible with this TensorFlow installation
import tensorflow as tf
if hasattr(tf, 'version'):
# Since TensorFlow 1.13.0
tf_compiler_version = LooseVersion(tf.version.COMPILER_VERSION)
else:
tf_compiler_version = LooseVersion(tf.COMPILER_VERSION)
if tf_compiler_version.version[0] == 4:
# g++ 4.x is ABI-incompatible with g++ 5.x+ due to std::function change
# See: https://github.com/tensorflow/tensorflow/issues/27067
maximum_compiler_version = LooseVersion('5')
else:
maximum_compiler_version = LooseVersion('999')
# Find the compatible compiler of the highest version
compiler_version = LooseVersion('0')
for candidate_cxx_compiler, candidate_compiler_version in find_gxx_compiler_in_path():
if tf_compiler_version <= candidate_compiler_version < maximum_compiler_version:
candidate_cc_compiler = find_matching_gcc_compiler_in_path(candidate_compiler_version)
if candidate_cc_compiler and candidate_compiler_version > compiler_version:
cc_compiler = candidate_cc_compiler
cxx_compiler = candidate_cxx_compiler
compiler_version = candidate_compiler_version
else:
print("===========================================================================================")
print(
'INFO: Compiler %s (version %s) is not usable for this TensorFlow '
'installation. Require g++ (version >=%s, <%s).' %
(candidate_cxx_compiler, candidate_compiler_version, tf_compiler_version, maximum_compiler_version)
)
print("===========================================================================================")
if cc_compiler:
print("===========================================================================================")
print('INFO: Compilers %s and %s (version %s) selected for TensorFlow plugin build.' % (
cc_compiler, cxx_compiler, compiler_version
))
print("===========================================================================================")
else:
raise DistutilsPlatformError(
'Could not find compiler compatible with this TensorFlow installation.\n'
'Please check the NVTX-Plugins Github Repository for recommended compiler versions.\n'
'To force a specific compiler version, set CC and CXX environment variables.'
)
cflags, cppflags, ldshared = remove_offensive_gcc_compiler_options(compiler_version)
try:
with env(CC=cc_compiler, CXX=cxx_compiler, CFLAGS=cflags, CPPFLAGS=cppflags, LDSHARED=ldshared):
customize_compiler(build_ext.compiler)
try:
build_ext.compiler.compiler.remove("-DNDEBUG")
except (AttributeError, ValueError):
pass
try:
build_ext.compiler.compiler_so.remove("-DNDEBUG")
except (AttributeError, ValueError):
pass
try:
build_ext.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
try:
build_ext.compiler.linker_so.remove("-Wl,-O1")
except (AttributeError, ValueError):
pass
build_ext.build_extension(tf_lib)
finally:
# Revert to the default compiler settings
customize_compiler(build_ext.compiler)
@contextmanager
def env(**kwargs):
# ignore args with None values
for k in list(kwargs.keys()):
if kwargs[k] is None:
del kwargs[k]
# backup environment
backup = {}
for k in kwargs.keys():
backup[k] = os.environ.get(k)
# set new values & yield
for k, v in kwargs.items():
os.environ[k] = v
try:
yield
finally:
# restore environment
for k in kwargs.keys():
if backup[k] is not None:
os.environ[k] = backup[k]
else:
del os.environ[k]
def get_tf_include_dirs():
import tensorflow as tf
tf_inc = tf.sysconfig.get_include()
return [tf_inc, '%s/external/nsync/public' % tf_inc]
def get_tf_lib_dirs():
import tensorflow as tf
tf_lib = tf.sysconfig.get_lib()
return [tf_lib]
def test_compile(build_ext, name, code, libraries=None, include_dirs=None, library_dirs=None, macros=None,
extra_compile_preargs=None, extra_link_preargs=None):
test_compile_dir = os.path.join(build_ext.build_temp, 'test_compile')
if not os.path.exists(test_compile_dir):
os.makedirs(test_compile_dir)
source_file = os.path.join(test_compile_dir, '%s.cc' % name)
with open(source_file, 'w') as f:
f.write(code)
compiler = build_ext.compiler
[object_file] = compiler.object_filenames([source_file])
shared_object_file = compiler.shared_object_filename(name, output_dir=test_compile_dir)
try:
build_ext.compiler.compiler_so.remove("-Wstrict-prototypes")
except (AttributeError, ValueError):
pass
compiler.compile(
[source_file],
extra_preargs=extra_compile_preargs,
include_dirs=include_dirs,
macros=macros
)
compiler.link_shared_object(
[object_file],
shared_object_file,
libraries=libraries,
library_dirs=library_dirs,
extra_preargs=extra_link_preargs
)
return shared_object_file
def get_tf_libs(build_ext, lib_dirs, cpp_flags):
for tf_libs in [['tensorflow_framework'], []]:
try:
lib_file = test_compile(
build_ext,
'test_tensorflow_libs',
library_dirs=lib_dirs,
libraries=tf_libs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent('''\
void test() {
}
'''))
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return tf_libs
except (CompileError, LinkError):
last_err = 'Unable to determine -l link flags to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine -l link flags to use with TensorFlow. Last error:\n\n%s' % \
traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_abi(build_ext, include_dirs, lib_dirs, libs, cpp_flags):
cxx11_abi_macro = '_GLIBCXX_USE_CXX11_ABI'
for cxx11_abi in ['0', '1']:
try:
lib_file = test_compile(build_ext, 'test_tensorflow_abi',
macros=[(cxx11_abi_macro, cxx11_abi)],
include_dirs=include_dirs,
library_dirs=lib_dirs,
libraries=libs,
extra_compile_preargs=cpp_flags,
code=textwrap.dedent('''\
#include <string>
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
void test() {
auto ignore = tensorflow::strings::StrCat("a", "b");
}
''')
)
from tensorflow.python.framework import load_library
load_library.load_op_library(lib_file)
return cxx11_abi_macro, cxx11_abi
except (CompileError, LinkError):
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow (see error above).'
except Exception:
last_err = 'Unable to determine CXX11 ABI to use with TensorFlow. ' \
'Last error:\n\n%s' % traceback.format_exc()
raise DistutilsPlatformError(last_err)
def get_tf_flags(build_ext, cpp_flags):
import tensorflow as tf
try:
return tf.sysconfig.get_compile_flags(), tf.sysconfig.get_link_flags()
except AttributeError:
# fallback to the previous logic
tf_include_dirs = get_tf_include_dirs()
tf_lib_dirs = get_tf_lib_dirs()
tf_libs = get_tf_libs(build_ext, tf_lib_dirs, cpp_flags)
tf_abi = get_tf_abi(build_ext, tf_include_dirs,
tf_lib_dirs, tf_libs, cpp_flags)
compile_flags = []
for include_dir in tf_include_dirs:
compile_flags.append('-I%s' % include_dir)
if tf_abi:
compile_flags.append('-D%s=%s' % tf_abi)
link_flags = []
for lib_dir in tf_lib_dirs:
link_flags.append('-L%s' % lib_dir)
for lib in tf_libs:
link_flags.append('-l%s' % lib)
return compile_flags, link_flags
def determine_gcc_version(compiler):
try:
compiler_macros = subprocess.check_output(
'%s -dM -E - </dev/null' % compiler,
shell=True,
universal_newlines=True
).split('\n')
for m in compiler_macros:
version_match = re.match('^#define __VERSION__ "(.*?)"$', m)
if version_match:
return LooseVersion(version_match.group(1))
print("===========================================================================================")
print('INFO: Unable to determine version of the gcc compiler %s.' % compiler)
print("===========================================================================================")
except subprocess.CalledProcessError:
print("===========================================================================================")
print('INFO: Unable to determine version of the gcc compiler %s.\n%s' % (compiler, traceback.format_exc()))
print("===========================================================================================")
return None
def determine_nvcc_version(compiler):
try:
nvcc_macros = [
_l for _l in subprocess.check_output(
'%s --version </dev/null' % compiler,
shell=True,
universal_newlines=True
).split('\n')
if _l != ''
][-1]
nvcc_version = nvcc_macros.split(", ")[-1][1:]
return LooseVersion(nvcc_version)
except subprocess.CalledProcessError:
print("===========================================================================================")
print('INFO: Unable to determine version of the nvcc compiler %s.\n%s' % (compiler, traceback.format_exc()))
print("===========================================================================================")
return None
def enumerate_binaries_in_path():
for path_dir in os.getenv('PATH', '').split(':'):
if os.path.isdir(path_dir):
for bin_file in sorted(os.listdir(path_dir)):
yield path_dir, bin_file
def find_matching_gcc_compiler_in_path(gxx_compiler_version):
for path_dir, bin_file in enumerate_binaries_in_path():
if re.match('^gcc(?:-\\d+(?:\\.\\d+)*)?$', bin_file):
# gcc, or gcc-7, gcc-4.9, or gcc-4.8.5
compiler = os.path.join(path_dir, bin_file)
compiler_version = determine_gcc_version(compiler)
if compiler_version == gxx_compiler_version:
return compiler
print("===========================================================================================")
print('INFO: Unable to find gcc compiler (version %s).' % gxx_compiler_version)
print("===========================================================================================")
return None
def find_gxx_compiler_in_path():
compilers = []
for path_dir, bin_file in enumerate_binaries_in_path():
if re.match('^g\\+\\+(?:-\\d+(?:\\.\\d+)*)?$', bin_file):
# g++, or g++-7, g++-4.9, or g++-4.8.5
compiler = os.path.join(path_dir, bin_file)
compiler_version = determine_gcc_version(compiler)
if compiler_version:
compilers.append((compiler, compiler_version))
if not compilers:
print("===========================================================================================")
print('INFO: Unable to find any gxx compiler.')
print("===========================================================================================")
return compilers
def find_nvcc_compiler_in_path():
for path_dir, bin_file in enumerate_binaries_in_path():
if bin_file == 'nvcc':
compiler = os.path.join(path_dir, bin_file)
compiler_version = determine_nvcc_version(compiler)
return compiler, compiler_version
else:
print("===========================================================================================")
print('INFO: Unable to find any nvcc compiler.')
print("===========================================================================================")
| nvtx-plugins-master | setup_utils.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| nvtx-plugins-master | nvtx_plugins/python/nvtx/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modifications copyright (C) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
#
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sysconfig
from tensorflow.python.framework import load_library as _load_library
from tensorflow.python.platform import resource_loader
__all__ = ["get_ext_suffix", "load_library"]
# Source: https://github.com/horovod/horovod/blob/abc3d88544/horovod/tensorflow/mpi_ops.py#L33
def load_library(name):
"""Loads a .so file containing the specified operators.
Args:
name: The name of the .so file to load.
Raises:
NotFoundError if were not able to load .so file.
"""
filename = resource_loader.get_path_to_datafile(name)
library = _load_library.load_op_library(filename)
return library
def get_ext_suffix():
"""Determine library extension for various versions of Python."""
ext_suffix = sysconfig.get_config_var('EXT_SUFFIX')
if ext_suffix:
return ext_suffix
ext_suffix = sysconfig.get_config_var('SO')
if ext_suffix:
return ext_suffix
return '.so'
def get_extension_full_path(pkg_path, *args):
assert len(args) >= 1
dir_path = os.path.join(os.path.dirname(pkg_path), *args[:-1])
full_path = os.path.join(dir_path, args[-1] + get_ext_suffix())
return full_path
def check_extension(ext_name, ext_env_var, pkg_path, *args):
full_path = get_extension_full_path(pkg_path, *args)
if not os.path.exists(full_path):
raise ImportError(
'NVTX-Plugins %s has not been built. If this is not expected, please reinstall to debug the build error' %
ext_name
) | nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/ext_utils.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAJOR = 0
MINOR = 1
PATCH = 8
PRE_RELEASE = ''
# Use the following formatting: (major, minor, patch, pre-release)
VERSION = (MAJOR, MINOR, PATCH, PRE_RELEASE)
__shortversion__ = '.'.join(map(str, VERSION[:3]))
__version__ = '.'.join(map(str, VERSION[:3])) + ''.join(VERSION[3:])
__package_name__ = 'nvtx-plugins'
__contact_names__ = 'Ahmad Kiswani, Roni Forte, Jonathan Dekhtiar, Yaki Tebeka'
__contact_emails__ = '[email protected], [email protected], [email protected], [email protected]'
__homepage__ = 'https://github.com/NVIDIA/nvtx-plugins/'
__repository_url__ = 'https://github.com/NVIDIA/nvtx-plugins/'
__download_url__ = 'https://github.com/NVIDIA/nvtx-plugins/'
__description__ = 'Python bindings for NVTX'
__license__ = 'Apache2'
__keywords__ = 'deep learning, machine learning, gpu, nvtx, nvidia, tensorflow, tf'
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/package_info.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
# TODO(ahmadki): support domain names
# TODO(ahmadki): move nvtx functionality to nvtx.plugins module ?
class BaseCallback(object):
def __init__(self):
# TODO(ahmadki): try except OSError
self.libnvtx = ctypes.cdll.LoadLibrary('libnvToolsExt.so')
self.marker_ids = {}
def open_marker(self, message):
if self.marker_ids.get(message, None) is None:
self.marker_ids[message] = []
marker = self.libnvtx.nvtxRangeStartW(message)
self.marker_ids[message].append(marker)
def close_marker(self, message):
if self.marker_ids.get(message, None) is not None:
self.libnvtx.nvtxRangeEnd(self.marker_ids[message].pop())
if len(self.marker_ids[message]) == 0:
del self.marker_ids[message]
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/base_callbacks.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NVTX Plugins"""
from .package_info import __shortversion__
from .package_info import __version__
from .package_info import __package_name__
from .package_info import __contact_names__
from .package_info import __contact_emails__
from .package_info import __homepage__
from .package_info import __repository_url__
from .package_info import __download_url__
from .package_info import __description__
from .package_info import __license__
from .package_info import __keywords__
import nvtx.plugins.tf.ops
import nvtx.plugins.tf.estimator
import nvtx.plugins.tf.keras
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wrapt
import tensorflow as tf
from tensorflow.python.framework import ops
from nvtx.plugins.tf.ext_utils import load_library
from nvtx.plugins.tf.ext_utils import get_ext_suffix
__all__ = ['nvtx_tf_ops', 'start', 'end', 'trace']
nvtx_tf_ops = load_library('lib/nvtx_ops' + get_ext_suffix())
def _maybe_convert_list_to_tensor(inputs):
inputs_were_processed = False
if isinstance(inputs, (list, tuple)) and \
all([isinstance(x, tf.Tensor) for x in inputs]):
inputs = tf.stack(inputs, axis=0, name="nvtx_trace_inputs")
inputs_were_processed = True
assert isinstance(inputs, tf.Tensor)
return inputs, inputs_were_processed
@ops.RegisterGradient('NvtxStart')
def _nvtx_start_grad(op, grad, marker_id, domain_handle):
# grad_message and grad_domain_name are not used
if not isinstance(marker_id, tf.Tensor) and marker_id is None:
raise RuntimeError('Error in nvtx range %s. '
'Make sure all nvtx ranges are closed' % op.name)
grad, null_grad = nvtx_tf_ops.nvtx_end(inputs=grad,
marker_id=marker_id, domain_handle=domain_handle,
grad_message=op.inputs[2], grad_domain_name=op.inputs[3])
return [grad, null_grad, None, None]
@ops.RegisterGradient('NvtxEnd')
def _nvtx_end_grad(op, grad, null_grad):
grad, marker_id, domain_handle = nvtx_tf_ops.nvtx_start(
inputs=grad, null_input=1.,
message=op.inputs[3], domain_name=op.inputs[4])
return [grad, marker_id, domain_handle, None, None]
def start(inputs, message, domain_name=None,
grad_message=None, grad_domain_name=None,
trainable=False, enabled=True, name=None):
"""An identity operation with a side effect of opening an NVTX marker.
Note:
The :func:`ops.start <start>` and :func:`ops.end <end>` operations
must be used in pairs.
Example:
.. highlight:: python
.. code-block:: python
x, nvtx_context = nvtx.plugins.tf.ops.start(x, message='Dense 1-3',
domain_name='Forward', grad_domain_name='Gradient')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_1')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_3')
x = nvtx.plugins.tf.ops.end(x, nvtx_context)
Arguments:
inputs: A ``Tensor`` object that is passed to ``output``.
message: A ``string`` message to be associated with this marker.
domain_name: An optional ``string`` domain name to be associated with
this marker. If not provided the default NVTX domain will be used.
grad_message: An optional ``string`` message to be associated with
the op gradient. If not provided ``message`` will be used.
grad_domain_name: An optional ``string`` domain name to be associated
with this marker gradient. If not provided ``domain_name`` will
be used.
trainable: ``bool``, if ``True`` will make this op
trainable. Used when this is the first operation in the graph to
prevent an open ended marker during gradient calculation.
enabled: ``bool``, if ``False`` the nvtx marker will be disabled.
name: An optional `string` name for the operation.
Returns:
``tuple``:
- output: The inputs ``Tensor``.
- nvtx_context: ``list``, NVTX context associated with this op and passed to :func:`ops.end <end>`. ``None`` if ``enabled=False``.
"""
if not enabled:
return inputs, None
domain_name = domain_name or ''
grad_message = grad_message or message
grad_domain_name = grad_domain_name or domain_name or ''
null_input = 1.
if trainable:
with tf.compat.v1.variable_scope("nvtx", reuse=tf.compat.v1.AUTO_REUSE):
null_input = tf.compat.v1.get_variable('null_input', shape=(),
dtype=tf.float32,
initializer=tf.zeros_initializer,
trainable=True)
inputs, should_unstack = _maybe_convert_list_to_tensor(inputs)
inputs, marker_id, domain_handle = nvtx_tf_ops.nvtx_start(
inputs=inputs, null_input=null_input,
message=message, domain_name=domain_name, name=name)
if should_unstack:
inputs = tf.unstack(inputs, axis=0)
return inputs, (marker_id, domain_handle, grad_message, grad_domain_name)
def end(inputs, nvtx_context, name=None):
"""An identity operation with a side effect of closing an NVTX marker.
Note:
The :func:`ops.start <start>` and :func:`ops.end <end>` operations
must be used in pairs.
Example:
.. highlight:: python
.. code-block:: python
x, nvtx_context = nvtx.plugins.tf.ops.start(x, message='Dense 1-3',
domain_name='Forward', grad_domain_name='Gradient')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_1')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')
x = tf.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_3')
x = nvtx.plugins.tf.ops.end(x, nvtx_context)
Arguments:
inputs: A ``Tensor`` object that will be passed to ``output``.
nvtx_context: ``list``, NVTX context received from
:func:`ops.start <start>` If `None` the marker will be disabled.
name: An optional ``string`` name for the operation.
Returns:
The inputs ``Tensor``.
"""
if nvtx_context is None:
return inputs
marker_id, domain_handle, grad_message, grad_domain_name = nvtx_context
inputs, should_unstack = _maybe_convert_list_to_tensor(inputs)
output, null_output = nvtx_tf_ops.nvtx_end(inputs=inputs,
marker_id=marker_id, domain_handle=domain_handle,
grad_message=grad_message, grad_domain_name=grad_domain_name, name=name
)
if should_unstack:
output = tf.unstack(output, axis=0)
return output
def trace(message, domain_name=None,
grad_message=None, grad_domain_name=None,
trainable=False, enabled=True, name=None):
"""An identity function decorator with a side effect of adding NVTX marker.
Note:
The decorator expects the wrapped function to take the input ``Tensor``
as the first argument or to be named ``inputs``, and to return a single
``Tensor``.
Arguments:
message: A ``string`` message to be associated with this marker.
domain_name: An optional ``string`` domain name to be associated with
this marker. If not provided the default NVTX domain will be used.
grad_message: An optional ``string`` message to be associated with
the op gradient. If not provided `message` will be used.
grad_domain_name: An optional ``string`` domain name to be associated
with this marker gradient. If not provided ``domain_name`` will
be used.
trainable: ``bool``, if ``True`` will make this op
trainable. Used when this is the first operation in the graph to
prevent an open ended marker during gradient calculation.
enabled: ``bool``, if ``False`` the nvtx marker will be disabled.
name: An optional ``string`` name for the operation.
"""
@wrapt.decorator
def func_wrapper(wrapped, instance, args, kwargs):
try:
inputs = kwargs["inputs"] if "inputs" in kwargs else args[0]
except:
raise ValueError("The input tensor must be the first argument"
" or named `inputs`")
inputs, should_unstack = _maybe_convert_list_to_tensor(inputs)
start_name = '{}_start'.format(name) if name else None
end_name = '{}_end'.format(name) if name else None
inputs, nvtx_context = start(inputs=inputs,
message=message, domain_name=domain_name,
grad_message=grad_message, grad_domain_name=grad_domain_name,
enabled=enabled, trainable=trainable, name=start_name
)
if should_unstack:
inputs = tf.unstack(inputs, axis=0)
if "inputs" in kwargs:
kwargs["inputs"] = inputs
else:
args = [inputs] + list(args[1:])
output = wrapped(*args, **kwargs)
output = end(inputs=output, nvtx_context=nvtx_context, name=end_name)
return output
return func_wrapper
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/ops.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from nvtx.plugins.tf.base_callbacks import BaseCallback
class NVTXHook(BaseCallback, tf.estimator.SessionRunHook):
"""Hook that adds NVTX markers to a TensorFlow session.
Arguments:
skip_n_steps: ``int``, skips adding markers for the first N
``session.run()`` calls.
name: ``string``, a marker name for the session.
"""
def __init__(self, skip_n_steps=0, name=None):
super(NVTXHook, self).__init__()
self.name = name
self.step_counter = 0
self.skip_n_steps = skip_n_steps
self.iteration_message = 'step {iter}'
def begin(self):
self.step_counter = 0
if self.name:
self.open_marker(self.name)
def before_run(self, run_context):
if self.step_counter >= self.skip_n_steps:
self.open_marker(self.iteration_message.format(iter=self.step_counter))
def after_run(self, run_context, run_values):
if self.step_counter >= self.skip_n_steps:
self.close_marker(self.iteration_message.format(iter=self.step_counter))
self.step_counter += 1
def end(self, session):
if self.name:
self.close_marker(self.name)
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/estimator.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/keras/__init__.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras callback.
"""
import tensorflow as tf
from nvtx.plugins.tf.base_callbacks import BaseCallback
class NVTXCallback(BaseCallback, tf.keras.callbacks.Callback):
"""Callback that adds NVTX markers to a keras session.
"""
def __init__(self, **kwargs):
super(NVTXCallback, self).__init__(**kwargs)
self.epoch_message = 'epoch {epoch}'
self.batch_message = 'batch {batch}'
def on_epoch_begin(self, epoch, logs=None):
self.open_marker(self.epoch_message.format(epoch=epoch))
def on_epoch_end(self, epoch, logs=None):
self.close_marker(self.epoch_message.format(epoch=epoch))
def on_train_batch_begin(self, batch, logs=None):
self.open_marker(self.batch_message.format(batch=batch))
def on_train_batch_end(self, batch, logs=None):
self.close_marker(self.batch_message.format(batch=batch))
def on_test_batch_begin(self, batch, logs=None):
self.open_marker(self.batch_message.format(batch=batch))
def on_test_batch_end(self, batch, logs=None):
self.close_marker(self.batch_message.format(batch=batch))
def on_predict_batch_begin(self, batch, logs=None):
self.open_marker(self.batch_message.format(batch=batch))
def on_predict_batch_end(self, batch, logs=None):
self.close_marker(self.batch_message.format(batch=batch))
def on_train_begin(self, logs=None):
self.open_marker('Train')
def on_train_end(self, logs=None):
self.close_marker('Train')
def on_test_begin(self, logs=None):
self.open_marker('Test')
def on_test_end(self, logs=None):
self.close_marker('Test')
def on_predict_begin(self, logs=None):
self.open_marker('Predict')
def on_predict_end(self, logs=None):
self.close_marker('Predict')
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/keras/callbacks.py |
# ! /usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keras layers.
"""
from tensorflow.keras.layers import Layer
from nvtx.plugins.tf.ops import nvtx_tf_ops
class NVTXStart(Layer):
"""An identity layer with a side effect of opening an NVTX marker.
Note:
The :func:`NVTXStart <NVTXStart>` and :func:`NVTXEnd <NVTXEnd>` layers
must be used in pairs.
Example:
.. highlight:: python
.. code-block:: python
x, marker_id, domain_id = NVTXStart(message='Dense',
domain_name='forward')(x)
x = Dense(1024, activation='relu')(x)
x = NVTXEnd(grad_message='Dense grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
Arguments:
message: A ``string`` message to be associated with this layer.
domain_name: An optional ``string`` domain name to be associated with
this layer. If not provided the default NVTX domain will be used.
trainable: ``bool``, if ``True`` will make this layer trainable.
Used when this is the first layer in the graph to
prevent an open ended marker during gradient calculation.
name: An optional ``string`` name for the layer.
Input shape:
A ``Tensor`` object that is passed to ``output``.
Output shape:
``list`` of length 3:
- output: The inputs ``Tensor``.
- marker_id: ``int64 Tensor``, sent to :func:`NVTXEnd <NVTXEnd>`.
- domain_handle: ``int64 Tensor``. sent to :func:`NVTXEnd <NVTXEnd>`.
"""
def __init__(self, message, domain_name=None,
trainable=False, **kwargs):
super(NVTXStart, self).__init__(**kwargs)
self.message = message
self.domain_name = domain_name or ''
self.trainable = trainable
def build(self, input_shape):
self.null_input = 1.
if self.trainable:
self.null_input = self.add_weight(name='null_input', shape=(),
trainable=True, dtype='float32')
super(NVTXStart, self).build(input_shape)
def call(self, x):
x, marker_id, domain_handle = nvtx_tf_ops.nvtx_start(inputs=x,
message=self.message, domain_name=self.domain_name,
null_input=self.null_input)
return [x, marker_id, domain_handle]
def compute_output_shape(self, input_shape):
return [input_shape, (), ()]
class NVTXEnd(Layer):
"""An identity layer with a side effect of closing an NVTX marker.
Note:
The :func:`NVTXStart <NVTXStart>` and :func:`NVTXEnd <NVTXEnd>` layers
must be used in pairs.
Example:
.. highlight:: python
.. code-block:: python
x, marker_id, domain_id = NVTXStart(message='Dense',
domain_name='forward')(x)
x = Dense(1024, activation='relu')(x)
x = NVTXEnd(grad_message='Dense grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
Arguments:
grad_message: An optional ``string`` message to be associated with
the op gradient. If not provided an empty message will be used.
grad_domain_name: An optional ``string`` domain name to be associated
with this marker gradient. If not provided the default domain name
will be used.
name: An optional ``string`` name for the layer.
Input shape:
``list`` of length 3:
- inputs: The input ``Tensor``.
- marker_id: ``int64 Tensor`` from :func:`NVTXStart <NVTXStart>`.
- domain_handle: ``int64 Tensor`` from :func:`NVTXStart <NVTXStart>`.
Output shape:
A ``Tensor`` with ``inputs`` shape.
"""
def __init__(self, grad_message=None, grad_domain_name=None, **kwargs):
super(NVTXEnd, self).__init__(**kwargs)
self.grad_message = grad_message or ''
self.grad_domain_name = grad_domain_name or ''
def build(self, input_shape):
super(NVTXEnd, self).build(input_shape)
def call(self, x):
assert isinstance(x, list) and (len(x) == 3)
inputs, marker_id, domain_handle = x
output, _ = nvtx_tf_ops.nvtx_end(inputs=inputs, marker_id=marker_id,
domain_handle=domain_handle,
grad_message=self.grad_message,
grad_domain_name=self.grad_domain_name)
return output
def compute_output_shape(self, input_shape):
assert isinstance(input_shape, list)
return [input_shape[0], ()]
| nvtx-plugins-master | nvtx_plugins/python/nvtx/plugins/tf/keras/layers.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pytest
from tests.base import CustomTestCase
class TensorflowSessionTestCase(CustomTestCase):
JOB_NAME = "tf_session_example"
def test_execution(self):
self.assertTrue(self.run_command(TensorflowSessionTestCase.JOB_NAME))
@pytest.mark.run(after='test_execution')
def test_report_is_compliant(self):
import nvtx
import sys
# ['/usr/local/lib/python3.6/dist-packages/nvtx']
print(nvtx.__path__._path, file=sys.stderr)
reference_count = -1
range_names = [
"Dense 1",
"Dense 1 grad",
"Dense 2",
"Dense 2 grad",
"Dense 3",
"Dense 3 grad",
"Dense 4",
"Dense 4 grad",
"Dense 5",
"Dense 5 grad",
"Dense Block",
"Dense Block grad"
]
with self.open_db(TensorflowSessionTestCase.JOB_NAME) as conn:
for range in range_names:
try:
count, avg_exec_time = self.query_report(
conn,
range_name=range
)
if reference_count < 0:
# At least 500 steps should be processed
self.assertGreater(count, 500)
reference_count = count
continue
# The profile could start & end in the middle of one step.
# Hence the a we check for a range instead of strict equal.
self.assertGreaterEqual(count, reference_count - 1)
self.assertLessEqual(count, reference_count + 1)
except AssertionError as e:
raise AssertionError("Issue with range: %s" % range) from e
count, _ = self.query_report(
conn,
range_name="Train",
filter_negative_start=False
)
self.assertEqual(count, 1)
count, _ = self.query_report(conn, range_name="step %")
self.assertGreaterEqual(count, reference_count - 1)
self.assertLessEqual(count, reference_count + 1)
if __name__ == '__main__':
unittest.main()
| nvtx-plugins-master | tests/test_tensorflow_session.py |
nvtx-plugins-master | tests/__init__.py |
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import pytest
from tests.base import CustomTestCase
class KerasTestCase(CustomTestCase):
JOB_NAME = "keras_example"
def test_execution(self):
self.assertTrue(self.run_command(KerasTestCase.JOB_NAME))
@pytest.mark.run(after='test_execution')
def test_report_is_compliant(self):
import nvtx
import sys
# ['/usr/local/lib/python3.6/dist-packages/nvtx']
print(nvtx.__path__._path, file=sys.stderr)
reference_count = -1
range_names = [
"Dense 1",
"Dense 1 grad",
"Dense 2",
"Dense 2 grad",
"Dense 3",
"Dense 3 grad",
"Dense 4",
"Dense 4 grad",
"Dense 5",
"Dense 5 grad"
]
with self.open_db(KerasTestCase.JOB_NAME) as conn:
for range in range_names:
count, avg_exec_time = self.query_report(conn, range_name=range)
if reference_count < 0:
# At least 500 steps should be processed
self.assertGreater(count, 500)
reference_count = count
else:
# The profile could start & end in the middle of one step.
# Hence the a we check for a range instead of strict equal.
self.assertGreaterEqual(count, reference_count - 1)
self.assertLessEqual(count, reference_count + 1)
count, _ = self.query_report(
conn,
range_name="Train",
filter_negative_start=False
)
self.assertEqual(count, 1)
count, _ = self.query_report(
conn,
range_name="epoch 0",
filter_negative_start=False
)
self.assertEqual(count, 1)
count, _ = self.query_report(conn, range_name="batch %")
self.assertGreaterEqual(count, reference_count - 1)
self.assertLessEqual(count, reference_count + 1)
if __name__ == '__main__':
unittest.main()
| nvtx-plugins-master | tests/test_keras.py |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sqlite3
import subprocess
import sys
import unittest
from abc import abstractmethod
from abc import ABCMeta
from contextlib import contextmanager
__all__ = [
'CustomTestCase',
]
SUCCESS_CODE = 0
SIGKILL_CODE = 9
class CustomTestCase(unittest.TestCase, metaclass=ABCMeta):
@abstractmethod
def JOB_NAME(self):
pass
def run_command(self, job_name):
for ext in ["qdrep", "sqlite"]:
try:
os.remove("examples/%s.%s" % (job_name, ext))
except FileNotFoundError:
pass
def exec_cmd(cmd):
command_proc = subprocess.Popen(cmd)
return_code = command_proc.wait()
if return_code not in [SUCCESS_CODE, SIGKILL_CODE]:
sys.tracebacklimit = 0
stdout, stderr = command_proc.communicate()
raise RuntimeError(
"\n##################################################\n"
"[*] STDOUT:{error_stdout}\n"
"[*] STERR:{error_stderr}\n"
"[*] command launched: `{command}`\n"
"##################################################\n".format(
error_stdout=stdout.decode("utf-8"),
error_stderr=stderr.decode("utf-8"),
command=" ".join(cmd)
)
)
return True
modified_command = [
'nsys',
'profile',
'--delay=10',
'--duration=30',
'--sample=cpu',
'--trace=nvtx,cuda',
'--output=examples/%s' % job_name,
'--force-overwrite=true',
'--stop-on-exit=true',
'--kill=sigkill'
]
py_command = "python examples/%s.py" % job_name
run_command = modified_command + py_command.split(" ")
print("Command Executed: %s" % (" ".join(run_command)), file=sys.stderr)
self.assertTrue(exec_cmd(run_command))
base_path = "examples/%s." % job_name
self.assertTrue(os.path.exists(base_path + "qdrep"))
command_export = [
"nsys-exporter",
"--export-sqlite",
"--input-file=examples/%s.qdrep" % job_name,
"--output-file=examples/%s.sqlite" % job_name
]
self.assertTrue(exec_cmd(command_export))
self.assertTrue(os.path.exists(base_path + "sqlite"))
return True
@staticmethod
@contextmanager
def open_db(db_file):
""" create a database connection to the SQLite database
specified by the db_file
:param db_file: database file
:return: Connection object or None
"""
filepath = os.path.join("examples", db_file + ".sqlite")
conn = None
try:
conn = sqlite3.connect(filepath)
except Exception as e:
print(e)
yield conn
conn.close()
def query_report(self, conn, range_name, filter_negative_start=True):
filter_negative_start_query = "AND `start` > 0 "
cur = conn.cursor()
cur.execute(
"SELECT "
"count(*), "
"avg(`end` - `start`) as `avg_exec_time` "
"FROM NVTX_EVENTS "
"WHERE "
"`text` LIKE '{range_name}' "
"{filter_qry}".format(
range_name=range_name,
filter_qry=(
filter_negative_start_query
if filter_negative_start else
""
)
)
)
return cur.fetchone()
| nvtx-plugins-master | tests/base.py |
# -*- coding: utf-8 -*-
#
# Sphinx RTD theme demo documentation build configuration file, created by
# sphinx-quickstart on Sun Nov 3 11:56:36 2013.
#
# This file is executed with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath("../")) # Important
sys.path.insert(0, os.path.abspath(os.path.join("..", "nvtx_plugins"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("..", "nvtx_plugins", "python"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("..", "nvtx_plugins", "python", "nvtx"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("..", "nvtx_plugins", "python", "nvtx", "plugins"))) # Important
sys.path.insert(0, os.path.abspath(os.path.join("..", "nvtx_plugins", "python", "nvtx", "plugins", "tf"))) # Important
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.ifconfig',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon', # support numpy and google docstrings
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.httpdomain',
'recommonmark', # Markdown
]
# TODO(ahmadki): bug https://github.com/sphinx-doc/sphinx/issues/5995 prevents
# us from using mock imports with python < 3.7, thus nvtx_plugins must
# be installed to generate docs
# mock imports
autodoc_mock_imports = [
'tensorflow',
]
# Do not warn about external images (status badges in README.rst)
suppress_warnings = ['image.nonlocal_uri']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = {
'.rst': 'restructuredtext',
'.txt': 'markdown',
'.md': 'markdown',
}
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'NVTX Plugins for Deep Learning'
copyright = u'Nvidia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
from package_info import __version__
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['requirements.txt', 'venv/*']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
intersphinx_mapping = {'rtd': ('https://docs.readthedocs.io/en/latest/', None)}
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nvidia_theme'
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'display_version': True,
'project_version': __version__,
'project_name': 'NVTX Plugins for Deep Learning',
'logo_path': None,
'logo_only': True,
}
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'NVTX Plugins Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = html_theme_options["logo_path"]
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SphinxRTDthemedemodoc'
| nvtx-plugins-master | docs/conf.py |
"""Sphinx ReadTheDocs theme.
From https://github.com/ryan-roemer/sphinx-bootstrap-theme.
"""
from os import path
__version__ = '0.1.0a1'
__version_full__ = __version__
def get_html_theme_path():
"""Return list of HTML theme paths."""
cur_dir = path.abspath(path.dirname(path.dirname(__file__)))
return cur_dir
# See http://www.sphinx-doc.org/en/stable/theming.html#distribute-your-theme-as-a-python-package
def setup(app):
app.add_html_theme('sphinx_nvidia_theme', path.abspath(path.dirname(__file__)))
| nvtx-plugins-master | docs/nvidia_theme/__init__.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import nvtx.plugins.tf as nvtx_tf
from nvtx.plugins.tf.estimator import NVTXHook
ENABLE_NVTX = True
TRAINING_STEPS = 5000
def batch_generator(features, labels, batch_size, steps):
dataset_len = len(labels)
idxs = list(range(dataset_len))
idxs_trunc = None
steps_per_epoch = dataset_len // batch_size
for step in range(steps):
start_idx = batch_size * (step % steps_per_epoch)
end_idx = batch_size * ((step + 1) % steps_per_epoch)
end_idx = end_idx if end_idx != 0 else (steps_per_epoch * batch_size)
if step % (steps_per_epoch) == 0:
np.random.shuffle(idxs)
idxs_trunc = idxs[0:batch_size * steps_per_epoch]
x_batch = np.array([features[j] for j in idxs_trunc[start_idx:end_idx]])
y_batch = np.array([labels[j] for j in idxs_trunc[start_idx:end_idx]])
y_batch = np.expand_dims(y_batch, axis=1)
yield x_batch, y_batch
# Option 1: use decorators
@nvtx_tf.ops.trace(message='Dense Block', grad_message='Dense Block grad',
domain_name='Forward', grad_domain_name='Gradient',
enabled=ENABLE_NVTX, trainable=True)
def DenseBinaryClassificationNet(inputs):
x = inputs
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 1',
grad_message='Dense 1 grad', domain_name='Forward',
grad_domain_name='Gradient', trainable=True, enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_1')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 2', grad_message='Dense 2 grad',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 3', grad_message='Dense 3 grad',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 512, activation=tf.nn.relu, name='dense_3')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 4', grad_message='Dense 4 grad',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 512, activation=tf.nn.relu, name='dense_4')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 5', grad_message='Dense 5 grad',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 1, activation=None, name='dense_5')
x = nvtx_tf.ops.end(x, nvtx_context)
predictions = x
return predictions
tf.compat.v1.disable_eager_execution()
# Load Dataset
dataset = np.loadtxt('examples/pima-indians-diabetes.data.csv', delimiter=',')
features = dataset[:, 0:8]
labels = dataset[:, 8]
# tf Graph Inputs
features_plh = tf.compat.v1.placeholder('float', [None, 8])
labels_plh = tf.compat.v1.placeholder('float', [None, 1])
logits = DenseBinaryClassificationNet(inputs=features_plh)
loss = tf.math.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels_plh))
acc = tf.math.reduce_mean(tf.compat.v1.metrics.accuracy(labels=labels_plh, predictions=tf.round(tf.nn.sigmoid(logits))))
optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(loss)
# Initialize variables. local variables are needed to be initialized for tf.metrics.*
init_g = tf.compat.v1.global_variables_initializer()
init_l = tf.compat.v1.local_variables_initializer()
nvtx_callback = NVTXHook(skip_n_steps=1, name='Train')
# Start training
with tf.compat.v1.train.MonitoredSession(hooks=[nvtx_callback]) as sess:
sess.run([init_g, init_l])
# Run graph
for step, (x, y) in enumerate(batch_generator(features, labels, batch_size=128, steps=TRAINING_STEPS)):
_, loss_, acc_ = sess.run(
[optimizer, loss, acc],
feed_dict={features_plh: x, labels_plh: y}
)
if step % 100 == 0:
print('Step: %04d, loss=%f acc=%f' % (step, loss_, acc_))
print('\nFinal loss=%f acc=%f' % (loss_, acc_))
print('Optimization Finished!')
| nvtx-plugins-master | examples/tf_session_example.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import tensorflow as tf
import nvtx.plugins.tf as nvtx_tf
from nvtx.plugins.tf.estimator import NVTXHook
ENABLE_NVTX = True
NUM_EPOCHS = 200
def batch_generator(features, labels, batch_size=128):
dataset_len = len(labels)
idxs = list(range(dataset_len))
np.random.shuffle(idxs)
for i in range(dataset_len // batch_size):
features_batch = [features[j] for j in idxs[batch_size*i:batch_size*(i+1)]]
label_batch = [labels[j] for j in idxs[batch_size*i:batch_size*(i+1)]]
label_batch = np.expand_dims(label_batch, axis=1)
yield (features_batch, features_batch), label_batch
# Option 1: use decorators
@nvtx_tf.ops.trace(message='Dense Block', domain_name='Forward',
grad_domain_name='Gradient', enabled=ENABLE_NVTX, trainable=True)
def DenseBinaryClassificationNet(inputs):
x1, x2 = inputs
(x1, x2), nvtx_context = nvtx_tf.ops.start(inputs=(x1, x2), message='Dense 1',
domain_name='Forward', grad_domain_name='Gradient',
trainable=True, enabled=ENABLE_NVTX)
net1 = tf.compat.v1.layers.dense(x1, 1024, activation=tf.nn.relu, name='dense_1_1')
net2 = tf.compat.v1.layers.dense(x2, 1024, activation=tf.nn.relu, name='dense_1_2')
net1, net2 = nvtx_tf.ops.end([net1, net2], nvtx_context)
x = tf.concat([net1, net2], axis=-1)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 2',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 1024, activation=tf.nn.relu, name='dense_2')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 3',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 512, activation=tf.nn.relu, name='dense_3')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 4',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 512, activation=tf.nn.relu, name='dense_4')
x = nvtx_tf.ops.end(x, nvtx_context)
x, nvtx_context = nvtx_tf.ops.start(x, message='Dense 5',
domain_name='Forward', grad_domain_name='Gradient', enabled=ENABLE_NVTX)
x = tf.compat.v1.layers.dense(x, 1, activation=None, name='dense_5')
x = nvtx_tf.ops.end(x, nvtx_context)
predictions = x
probs = tf.sigmoid(x)
return predictions, probs
tf.compat.v1.disable_eager_execution()
# Load Dataset
dataset = np.loadtxt('examples/pima-indians-diabetes.data.csv', delimiter=',')
features = dataset[:,0:8]
labels = dataset[:,8]
# tf Graph Inputs
features_plh_1 = tf.compat.v1.placeholder('float', [None, 8])
features_plh_2 = tf.compat.v1.placeholder('float', [None, 8])
labels_plh = tf.compat.v1.placeholder('float', [None, 1])
logits, probs = DenseBinaryClassificationNet(inputs=(features_plh_1, features_plh_2))
loss = tf.math.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels_plh))
acc = tf.math.reduce_mean(tf.compat.v1.metrics.accuracy(labels=labels_plh, predictions=tf.round(tf.nn.sigmoid(logits))))
optimizer = tf.compat.v1.train.MomentumOptimizer(learning_rate=0.01, momentum=0.9, use_nesterov=True).minimize(loss)
# Initialize variables. local variables are needed to be initialized for tf.metrics.*
init_g = tf.compat.v1.global_variables_initializer()
init_l = tf.compat.v1.local_variables_initializer()
nvtx_callback = NVTXHook(skip_n_steps=1, name='Train')
# Start training
with tf.compat.v1.train.MonitoredSession(hooks=[nvtx_callback]) as sess:
sess.run([init_g, init_l])
# Run graph
for epoch in range(NUM_EPOCHS):
for (x1, x2), y in batch_generator(features, labels, batch_size=128):
optimizer_, loss_, acc_ = sess.run(
[optimizer, loss, acc],
feed_dict={
features_plh_1: x1,
features_plh_2: x2,
labels_plh: y
}
)
print('Epoch: %d. loss=%f acc=%f' % (epoch+1, loss_, acc_))
print('Optimization Finished!')
| nvtx-plugins-master | examples/tf_session_multi_ins_outs_example.py |
# -*- coding: utf-8 -*-
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
from tensorflow.keras import optimizers
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Dense
from nvtx.plugins.tf.keras.layers import NVTXStart, NVTXEnd
from nvtx.plugins.tf.keras.callbacks import NVTXCallback
TRAINING_STEPS = 5000
# load pima indians dataset
dataset = np.loadtxt('examples/pima-indians-diabetes.data.csv', delimiter=',')
features = dataset[:, 0:8]
labels = dataset[:, 8]
def DenseBinaryClassificationNet(input_shape=(8,)):
inputs = Input(input_shape)
x = inputs
x, marker_id, domain_id = NVTXStart(message='Dense 1',
domain_name='forward',
trainable=True)(x)
x = Dense(1024, activation='relu')(x)
x = NVTXEnd(grad_message='Dense 1 grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
x, marker_id, domain_id = NVTXStart(message='Dense 2',
domain_name='forward')(x)
x = Dense(1024, activation='relu')(x)
x = NVTXEnd(grad_message='Dense 2 grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
x, marker_id, domain_id = NVTXStart(message='Dense 3',
domain_name='forward')(x)
x = Dense(512, activation='relu')(x)
x = NVTXEnd(grad_message='Dense 3 grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
x, marker_id, domain_id = NVTXStart(message='Dense 4',
domain_name='forward')(x)
x = Dense(512, activation='relu')(x)
x = NVTXEnd(grad_message='Dense 4 grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
x, marker_id, domain_id = NVTXStart(message='Dense 5',
domain_name='forward')(x)
x = Dense(1, activation='sigmoid')(x)
x = NVTXEnd(grad_message='Dense 5 grad',
grad_domain_name='backwards')([x, marker_id, domain_id])
predictions = x
model = Model(inputs=inputs, outputs=predictions)
return model
nvtx_callback = NVTXCallback()
model = DenseBinaryClassificationNet()
sgd = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,
loss='binary_crossentropy',
metrics=['accuracy'])
model.fit(
features,
labels,
batch_size=128,
callbacks=[nvtx_callback],
epochs=1,
steps_per_epoch=TRAINING_STEPS
)
| nvtx-plugins-master | examples/keras_example.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.