python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
import torch
import numpy as np
from torch.utils.data.sampler import Sampler
class DistributedSampler(Sampler):
def __init__(self, dataset, batch_size, world_size, rank):
"""
Constructor for the DistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def distribute_batches(self, indices):
"""
Assigns batches to workers.
Consecutive ranks are getting consecutive batches.
:param indices: torch.tensor with batch indices
"""
assert len(indices) == self.num_samples
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return indices
def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
# generate permutation
indices = torch.randperm(self.data_len, generator=rng)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# assign batches to workers
indices = self.distribute_batches(indices)
return iter(indices)
def set_epoch(self, epoch):
"""
Sets current epoch index.
Epoch index is used to seed RNG in __iter__() function.
:param epoch: index of current epoch
"""
self.epoch = epoch
def __len__(self):
return self.num_samples // self.world_size
class BucketingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, num_buckets, world_size, rank):
"""
Bucketing sampler with approx. equally-sized buckets.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param num_buckets: number of buckets
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, world_size, rank)
self.num_buckets = num_buckets
len_ids = np.argsort([sample['duration'] for sample in dataset.samples])
self.buckets = [torch.from_numpy(t)
for t in np.array_split(len_ids, num_buckets)]
global_bs = self.global_batch_size
def __iter__(self):
g = torch.Generator()
g.manual_seed(self.epoch)
global_bsz = self.global_batch_size
indices = []
for bid in range(self.num_buckets):
# random shuffle within current bucket
perm = torch.randperm(len(self.buckets[bid]), generator=g)
bucket_indices = self.buckets[bid][perm]
# add samples from current bucket to indices for current epoch
indices.append(bucket_indices)
indices = torch.cat(indices)
# make indices evenly divisible by global batch size
length = len(indices) // global_bsz * global_bsz
indices = indices[:length]
assert len(indices) % self.global_batch_size == 0
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, g)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/sampler.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from collections import OrderedDict
import torch
import torch.distributed as dist
from .metrics import word_error_rate
def print_once(msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(msg)
def add_ctc_blank(symbols):
return symbols + ['<BLANK>']
def ctc_decoder_predictions_tensor(tensor, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = {i: labels[i] for i in range(len(labels))}
prediction_cpu_tensor = tensor.long().cpu()
# iterate over batch
for ind in range(prediction_cpu_tensor.shape[0]):
prediction = prediction_cpu_tensor[ind].numpy().tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def greedy_wer(preds, tgt, tgt_lens, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Prints wer and prediction examples to screen
Args:
tensors: A list of 3 tensors (predictions, targets, target_lengths)
labels: A list of labels
Returns:
word error rate
"""
with torch.no_grad():
references = gather_transcripts([tgt], [tgt_lens], labels)
hypotheses = ctc_decoder_predictions_tensor(preds, labels)
wer, _, _ = word_error_rate(hypotheses, references)
return wer, hypotheses[0], references[0]
def gather_losses(losses_list):
return [torch.mean(torch.stack(losses_list))]
def gather_predictions(predictions_list, labels):
results = []
for prediction in predictions_list:
results += ctc_decoder_predictions_tensor(prediction, labels=labels)
return results
def gather_transcripts(transcript_list, transcript_len_list, labels):
results = []
labels_map = {i: labels[i] for i in range(len(labels))}
# iterate over workers
for txt, lens in zip(transcript_list, transcript_len_list):
for t, l in zip(txt.long().cpu(), lens.long().cpu()):
t = list(t.numpy())
results.append(''.join([labels_map[c] for c in t[:l]]))
return results
def process_evaluation_batch(tensors, global_vars, labels):
"""
Processes results of an iteration and saves it in global_vars
Args:
tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output
global_vars: dictionary where processes results of iteration are saved
labels: A list of labels
"""
for kv, v in tensors.items():
if kv.startswith('loss'):
global_vars['EvalLoss'] += gather_losses(v)
elif kv.startswith('predictions'):
global_vars['preds'] += gather_predictions(v, labels)
elif kv.startswith('transcript_length'):
transcript_len_list = v
elif kv.startswith('transcript'):
transcript_list = v
elif kv.startswith('output'):
global_vars['logits'] += v
global_vars['txts'] += gather_transcripts(
transcript_list, transcript_len_list, labels)
def process_evaluation_epoch(aggregates, tag=None):
"""
Processes results from each worker at the end of evaluation and combine to final result
Args:
aggregates: dictionary containing information of entire evaluation
Return:
wer: final word error rate
loss: final loss
"""
if 'losses' in aggregates:
eloss = torch.mean(torch.stack(aggregates['losses'])).item()
else:
eloss = None
hypotheses = aggregates['preds']
references = aggregates['txts']
wer, scores, num_words = word_error_rate(hypotheses, references)
multi_gpu = dist.is_initialized()
if multi_gpu:
if eloss is not None:
eloss /= dist.get_world_size()
eloss_tensor = torch.tensor(eloss).cuda()
dist.all_reduce(eloss_tensor)
eloss = eloss_tensor.item()
scores_tensor = torch.tensor(scores).cuda()
dist.all_reduce(scores_tensor)
scores = scores_tensor.item()
num_words_tensor = torch.tensor(num_words).cuda()
dist.all_reduce(num_words_tensor)
num_words = num_words_tensor.item()
wer = scores * 1.0 / num_words
return wer, eloss
def num_weights(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad)
class Checkpointer(object):
def __init__(self, save_dir, model_name, keep_milestones=[100, 200, 300]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
self.model_name = model_name
tracked = [
(int(re.search('epoch(\d+)_', f).group(1)), f)
for f in glob.glob(f'{save_dir}/{self.model_name}_epoch*_checkpoint.pt')]
tracked = sorted(tracked, key=lambda t: t[0])
self.tracked = OrderedDict(tracked)
def save(self, model, ema_model, optimizer, scaler, epoch, step, best_wer,
is_best=False):
"""Saves model checkpoint for inference/resuming training.
Args:
model: the model, optionally wrapped by DistributedDataParallel
ema_model: model with averaged weights, can be None
optimizer: optimizer
epoch (int): epoch during which the model is saved
step (int): number of steps since beginning of training
best_wer (float): lowest recorded WER on the dev set
is_best (bool, optional): set name of checkpoint to 'best'
and overwrite the previous one
"""
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
# Checkpoint already saved
if not is_best and epoch in self.tracked:
return
unwrap_ddp = lambda model: getattr(model, 'module', model)
state = {
'epoch': epoch,
'step': step,
'best_wer': best_wer,
'state_dict': unwrap_ddp(model).state_dict(),
'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,
'optimizer': optimizer.state_dict(),
'scaler': scaler.state_dict(),
}
if is_best:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_best_checkpoint.pt")
else:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_epoch{epoch}_checkpoint.pt")
print_once(f"Saving {fpath}...")
torch.save(state, fpath)
if not is_best:
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = fpath
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
def last_checkpoint(self):
tracked = list(self.tracked.values())
if len(tracked) >= 1:
try:
torch.load(tracked[-1], map_location='cpu')
return tracked[-1]
except:
print_once(f'Last checkpoint {tracked[-1]} appears corrupted.')
elif len(tracked) >= 2:
return tracked[-2]
else:
return None
def load(self, fpath, model, ema_model, optimizer, scaler, meta):
print_once(f'Loading model from {fpath}')
checkpoint = torch.load(fpath, map_location="cpu")
unwrap_ddp = lambda model: getattr(model, 'module', model)
state_dict = checkpoint['state_dict']
unwrap_ddp(model).load_state_dict(state_dict, strict=True)
if ema_model is not None:
if checkpoint.get('ema_state_dict') is not None:
key = 'ema_state_dict'
else:
key = 'state_dict'
print_once('WARNING: EMA weights not found in the checkpoint.')
print_once('WARNING: Initializing EMA model with regular params.')
state_dict = checkpoint[key]
unwrap_ddp(ema_model).load_state_dict(state_dict, strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
meta['start_epoch'] = checkpoint.get('epoch')
meta['best_wer'] = checkpoint.get('best_wer', meta['best_wer'])
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/helpers.py |
# Copyright (c) 2017 Keith Ito
""" from https://github.com/keithito/tacotron """
import re
import string
from . import cleaners
def _clean_text(text, cleaner_names, *args):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text, *args)
return text
def punctuation_map(labels):
# Punctuation to remove
punctuation = string.punctuation
punctuation = punctuation.replace("+", "")
punctuation = punctuation.replace("&", "")
# TODO We might also want to consider:
# @ -> at
# # -> number, pound, hashtag
# ~ -> tilde
# _ -> underscore
# % -> percent
# If a punctuation symbol is inside our vocab, we do not remove from text
for l in labels:
punctuation = punctuation.replace(l, "")
# Turn all punctuation to whitespace
table = str.maketrans(punctuation, " " * len(punctuation))
return table
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/__init__.py |
# Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modifed to add support for time and slight tweaks to _expand_number
"""
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
_time_re = re.compile(r'([0-9]{1,2}):([0-9]{2})')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
if int(m.group(0)[0]) == 0:
return _inflect.number_to_words(m.group(0), andword='', group=1)
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
# Add check for number phones and other large numbers
elif num > 1000000000 and num % 10000 != 0:
return _inflect.number_to_words(num, andword='', group=1)
else:
return _inflect.number_to_words(num, andword='')
def _expand_time(m):
mins = int(m.group(2))
if mins == 0:
return _inflect.number_to_words(m.group(1))
return " ".join([_inflect.number_to_words(m.group(1)), _inflect.number_to_words(m.group(2))])
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
text = re.sub(_time_re, _expand_time, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/numbers.py |
# Copyright (c) 2017 Keith Ito
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/symbols.py |
# Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modified to add puncturation removal
"""
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecoder(text)
def remove_punctuation(text, table):
text = text.translate(table)
text = re.sub(r'&', " and ", text)
text = re.sub(r'\+', " plus ", text)
return text
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text, table=None):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
if table is not None:
text = remove_punctuation(text, table)
text = collapse_whitespace(text)
return text
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/cleaners.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/text/unidecoder/homoglyphs.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import math
import os
import torch
import torch.distributed as dist
from .iterator import DaliIterator, SyntheticDataIterator
from .pipeline import make_dali_asr_pipeline
from common.helpers import print_once
def _parse_json(json_path: str, start_label=0, predicate=lambda json: True):
"""
Parses json file to the format required by DALI.
Args:
json_path: path to json file
start_label: the label, starting from which DALI will assign
consecutive int numbers to every transcript
predicate: function, that accepts a sample descriptor
(i.e. json dictionary) as an argument. If the predicate for a given
sample returns True, it will be included in the dataset.
Returns:
output_files: dict that maps file name to label assigned by DALI
transcripts: dict that maps label assigned by DALI to the transcript
"""
global cnt
with open(json_path) as f:
librispeech_json = json.load(f)
output_files = {}
transcripts = {}
curr_label = start_label
for original_sample in librispeech_json:
if not predicate(original_sample):
continue
transcripts[curr_label] = original_sample['transcript']
output_files[original_sample['files'][-1]['fname']] = curr_label
curr_label += 1
return output_files, transcripts
def _dict_to_file(dict: dict, filename: str):
with open(filename, "w") as f:
for key, value in dict.items():
f.write("{} {}\n".format(key, value))
class DaliDataLoader:
"""
DataLoader is the main entry point to the data preprocessing pipeline.
To use, create an object and then just iterate over `data_iterator`.
DataLoader will do the rest for you.
Example:
data_layer = DataLoader(DaliTrainPipeline, path, json, bs, ngpu)
data_it = data_layer.data_iterator
for data in data_it:
print(data) # Here's your preprocessed data
Args:
device_type: Which device to use for preprocessing. Choose: "cpu", "gpu"
pipeline_type: Choose: "train", "val", "synth"
"""
def __init__(self, gpu_id, dataset_path: str, config_data: dict,
config_features: dict, json_names: list, symbols: list,
batch_size: int, pipeline_type: str,
grad_accumulation_steps: int = 1,
synth_iters_per_epoch: int = 544, device_type: str = "gpu"):
self.batch_size = batch_size
self.grad_accumulation_steps = grad_accumulation_steps
self.drop_last = (pipeline_type == 'train')
self.device_type = device_type
pipeline_type = self._parse_pipeline_type(pipeline_type)
if pipeline_type == "synth":
self._dali_data_iterator = self._init_synth_iterator(
self.batch_size,
config_features['nfilt'],
iters_per_epoch=synth_iters_per_epoch,
ngpus=torch.distributed.get_world_size())
else:
self._dali_data_iterator = self._init_iterator(
gpu_id=gpu_id,
dataset_path=dataset_path,
config_data=config_data,
config_features=config_features,
json_names=json_names,
symbols=symbols,
train_pipeline=pipeline_type == "train")
def _init_iterator(self, gpu_id, dataset_path, config_data,
config_features, json_names: list, symbols: list,
train_pipeline: bool):
"""Returns an iterator over data preprocessed with Dali."""
def hash_list_of_strings(li):
return str(abs(hash(''.join(li))))
output_files, transcripts = {}, {}
max_duration = config_data['max_duration']
for jname in json_names:
of, tr = _parse_json(
jname if jname[0] == '/' else os.path.join(dataset_path, jname),
len(output_files),
predicate=lambda json: json['original_duration'] <= max_duration)
output_files.update(of)
transcripts.update(tr)
file_list_path = os.path.join(
"/tmp", "asr_dali.file_list." + hash_list_of_strings(json_names))
_dict_to_file(output_files, file_list_path)
self.dataset_size = len(output_files)
print_once('Dataset read by DALI. '
f'Number of samples: {self.dataset_size}')
pipeline = make_dali_asr_pipeline(
config_data=config_data,
config_features=config_features,
device_id=gpu_id,
file_root=dataset_path,
file_list=file_list_path,
device_type=self.device_type,
batch_size=self.batch_size,
train_pipeline=train_pipeline)
return DaliIterator([pipeline], transcripts=transcripts,
symbols=symbols, batch_size=self.batch_size,
reader_name="file_reader",
train_iterator=train_pipeline)
def _init_synth_iterator(self, batch_size, nfeatures, iters_per_epoch,
ngpus):
self.dataset_size = ngpus * iters_per_epoch * batch_size
return SyntheticDataIterator(batch_size, nfeatures, regenerate=True)
@staticmethod
def _parse_pipeline_type(pipeline_type):
pipe = pipeline_type.lower()
assert pipe in ("train", "val", "synth"), \
'Invalid pipeline type (choices: "train", "val", "synth").'
return pipe
def _shard_size(self):
"""
Total number of samples handled by a single GPU in a single epoch.
"""
world_size = dist.get_world_size() if dist.is_initialized() else 1
if self.drop_last:
divisor = world_size * self.batch_size * self.grad_accumulation_steps
return self.dataset_size // divisor * divisor // world_size
else:
return int(math.ceil(self.dataset_size / world_size))
def __len__(self):
"""
Number of batches handled by each GPU.
"""
if self.drop_last:
assert self._shard_size() % self.batch_size == 0, \
f'{self._shard_size()} {self.batch_size}'
return int(math.ceil(self._shard_size() / self.batch_size))
def data_iterator(self):
return self._dali_data_iterator
def __iter__(self):
return self._dali_data_iterator
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/dali/data_loader.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/dali/__init__.py |
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
from nvidia.dali.plugin.pytorch import DALIGenericIterator
from common.helpers import print_once
from common.text import _clean_text, punctuation_map
def normalize_string(s, symbols, punct_map):
"""
Normalizes string.
Example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
"""
labels = set(symbols)
try:
text = _clean_text(s, ["english_cleaners"], punct_map).strip()
return ''.join([tok for tok in text if all(t in labels for t in tok)])
except Exception as e:
print_once(f"WARNING: Normalizing failed: {s} {e}")
class DaliIterator(object):
"""Returns batches of data.
Batches are in the form:
(preprocessed_signal, preprocessed_signal_length, transcript,
transcript_length)
This iterator is not meant to be the entry point to a Dali pipeline.
Use DataLoader instead.
"""
def __init__(self, dali_pipelines, transcripts, symbols, batch_size,
reader_name, train_iterator: bool):
self.transcripts = transcripts
self.symbols = symbols
self.batch_size = batch_size
# in train pipeline shard_size is set to divisable by batch_size,
# so PARTIAL policy is safe
self.dali_it = DALIGenericIterator(
dali_pipelines,
["audio", "label", "audio_shape"],
reader_name=reader_name,
dynamic_shape=True,
auto_reset=True,
last_batch_policy=LastBatchPolicy.DROP)
@staticmethod
def _str2list(s: str):
"""
Returns list of floats, that represents given string.
'0.' denotes separator
'1.' denotes 'a'
'27.' denotes "'"
Assumes, that the string is lower case.
"""
list = []
for c in s:
if c == "'":
list.append(27.)
else:
list.append(max(0., ord(c) - 96.))
return list
@staticmethod
def _pad_lists(lists: list, pad_val=0):
"""
Pads lists, so that all have the same size.
Returns list with actual sizes of corresponding input lists
"""
max_length = 0
sizes = []
for li in lists:
sizes.append(len(li))
max_length = max_length if len(li) < max_length else len(li)
for li in lists:
li += [pad_val] * (max_length - len(li))
return sizes
def _gen_transcripts(self, labels, normalize_transcripts: bool = True):
"""
Generate transcripts in format expected by NN
"""
if normalize_transcripts:
lists = [
self._str2list(normalize_string(self.transcripts[lab.item()],
self.symbols, punctuation_map(self.symbols)))
for lab in labels]
else:
lists = [self._str2list(self.transcripts[lab.item()])
for lab in labels]
sizes = self._pad_lists(lists)
return (torch.tensor(lists).cuda(),
torch.tensor(sizes, dtype=torch.int32).cuda())
def __next__(self):
data = self.dali_it.__next__()
transcripts, transcripts_lengths = self._gen_transcripts(
data[0]["label"])
return (data[0]["audio"], data[0]["audio_shape"][:, 1], transcripts,
transcripts_lengths)
def next(self):
return self.__next__()
def __iter__(self):
return self
# TODO: refactor
class SyntheticDataIterator(object):
def __init__(self, batch_size, nfeatures, feat_min=-5., feat_max=0.,
txt_min=0., txt_max=23., feat_lens_max=1760, txt_lens_max=231,
regenerate=False):
"""
Args:
batch_size
nfeatures: number of features for melfbanks
feat_min: minimum value in `feat` tensor, used for randomization
feat_max: maximum value in `feat` tensor, used for randomization
txt_min: minimum value in `txt` tensor, used for randomization
txt_max: maximum value in `txt` tensor, used for randomization
regenerate: If True, regenerate random tensors for every iterator
step. If False, generate them only at start.
"""
self.batch_size = batch_size
self.nfeatures = nfeatures
self.feat_min = feat_min
self.feat_max = feat_max
self.feat_lens_max = feat_lens_max
self.txt_min = txt_min
self.txt_max = txt_max
self.txt_lens_max = txt_lens_max
self.regenerate = regenerate
if not self.regenerate:
(self.feat, self.feat_lens, self.txt, self.txt_lens
) = self._generate_sample()
def _generate_sample(self):
feat = ((self.feat_max - self.feat_min)
* np.random.random_sample(
(self.batch_size, self.nfeatures, self.feat_lens_max))
+ self.feat_min)
feat_lens = np.random.randint(0, int(self.feat_lens_max) - 1,
size=self.batch_size)
txt = (self.txt_max - self.txt_min) * np.random.random_sample(
(self.batch_size, self.txt_lens_max)) + self.txt_min
txt_lens = np.random.randint(0, int(self.txt_lens_max) - 1,
size=self.batch_size)
return (torch.Tensor(feat).cuda(),
torch.Tensor(feat_lens).cuda(),
torch.Tensor(txt).cuda(),
torch.Tensor(txt_lens).cuda())
def __next__(self):
if self.regenerate:
return self._generate_sample()
return self.feat, self.feat_lens, self.txt, self.txt_lens
def next(self):
return self.__next__()
def __iter__(self):
return self
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/dali/iterator.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import math
import multiprocessing
import numpy as np
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import torch
import torch.distributed as dist
def _interleave_lists(*lists):
"""
[*, **, ***], [1, 2, 3], [a, b, c] -> [*, 1, a, **, 2, b, ***, 3, c]
Returns:
iterator over interleaved list
"""
assert all((len(lists[0]) == len(test_l) for test_l in lists)), \
"All lists have to have the same length"
return itertools.chain(*zip(*lists))
def _generate_cutouts(mask_params, nfeatures):
"""
Returns:
Generates anchors and shapes of the cutout regions.
Single call generates one batch of data.
The output shall be passed to DALI's Erase operator
anchors = [f0 t0 f1 t1 ...]
shapes = [f0w t0h f1w t1h ...]
"""
MAX_TIME_DIMENSION = 20 * 16000
freq_anchors = np.random.random(mask_params['freq_num_regions'])
time_anchors = np.random.random(mask_params['time_num_regions'])
both_anchors_freq = np.random.random(mask_params['both_num_regions'])
both_anchors_time = np.random.random(mask_params['both_num_regions'])
anchors = []
for anch in freq_anchors:
anchors.extend([anch, 0])
for anch in time_anchors:
anchors.extend([0, anch])
for t, f in zip(both_anchors_time, both_anchors_freq):
anchors.extend([f, t])
shapes = []
shapes.extend(
_interleave_lists(
np.random.randint(mask_params['freq_min'],
mask_params['freq_max'] + 1,
mask_params['freq_num_regions']),
# XXX: Here, a time dimension of the spectrogram shall be passed.
# However, in DALI ArgumentInput can't come from GPU.
# So we leave the job for Erase (masking operator) to get it together.
[int(MAX_TIME_DIMENSION)] * mask_params['freq_num_regions']
)
)
shapes.extend(
_interleave_lists(
[nfeatures] * mask_params['time_num_regions'],
np.random.randint(mask_params['time_min'],
mask_params['time_max'] + 1,
mask_params['time_num_regions'])
)
)
shapes.extend(
_interleave_lists(
np.random.randint(mask_params['both_min_freq'],
mask_params['both_max_freq'] + 1,
mask_params['both_num_regions']),
np.random.randint(mask_params['both_min_time'],
mask_params['both_max_time'] + 1,
mask_params['both_num_regions'])
)
)
return anchors, shapes
def _tuples2list(tuples: list):
"""
[(a, b), (c, d)] -> [[a, c], [b, d]]
"""
return map(list, zip(*tuples))
def _dali_init_log(args: dict):
if not dist.is_initialized() or dist.get_rank() == 0:
max_len = max([len(ii) for ii in args.keys()])
fmt_string = '\t%' + str(max_len) + 's : %s'
print('Initializing DALI with parameters:')
for keyPair in sorted(args.items()):
print(fmt_string % keyPair)
@dali.pipeline_def
def dali_asr_pipeline(train_pipeline, # True if training, False if validation
file_root,
file_list,
sample_rate,
silence_threshold,
resample_range,
discrete_resample_range,
window_size,
window_stride,
nfeatures,
nfft,
frame_splicing_factor,
dither_coeff,
pad_align,
preemph_coeff,
do_spectrogram_masking=False,
cutouts_generator=None,
shard_id=0,
n_shards=1,
preprocessing_device="gpu"):
do_remove_silence = silence_threshold is not None
def _div_ceil(dividend, divisor):
return (dividend + (divisor - 1)) // divisor
encoded, label = fn.readers.file(
device="cpu", name="file_reader", file_root=file_root,
file_list=file_list, shard_id=shard_id, num_shards=n_shards,
shuffle_after_epoch=train_pipeline)
speed_perturbation_coeffs = None
if resample_range is not None:
if discrete_resample_range:
values = [resample_range[0], 1.0, resample_range[1]]
speed_perturbation_coeffs = fn.random.uniform(device="cpu",
values=values)
else:
speed_perturbation_coeffs = fn.random.uniform(device="cpu",
range=resample_range)
if train_pipeline and speed_perturbation_coeffs is not None:
dec_sample_rate_arg = speed_perturbation_coeffs * sample_rate
elif resample_range is None:
dec_sample_rate_arg = sample_rate
else:
dec_sample_rate_arg = None
audio, _ = fn.decoders.audio(encoded, sample_rate=dec_sample_rate_arg,
dtype=types.FLOAT, downmix=True)
if do_remove_silence:
begin, length = fn.nonsilent_region(audio, cutoff_db=silence_threshold)
audio = fn.slice(audio, begin, length, axes=[0])
# Max duration drop is performed at DataLayer stage
if preprocessing_device == "gpu":
audio = audio.gpu()
if dither_coeff != 0.:
audio = audio + fn.random.normal(audio) * dither_coeff
audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff)
spec = fn.spectrogram(audio, nfft=nfft,
window_length=window_size * sample_rate,
window_step=window_stride * sample_rate)
mel_spec = fn.mel_filter_bank(spec, sample_rate=sample_rate,
nfilter=nfeatures, normalize=True)
log_features = fn.to_decibels(mel_spec, multiplier=np.log(10),
reference=1.0, cutoff_db=math.log(1e-20))
log_features_len = fn.shapes(log_features)
if frame_splicing_factor != 1:
log_features_len = _div_ceil(log_features_len, frame_splicing_factor)
log_features = fn.normalize(log_features, axes=[1])
log_features = fn.pad(log_features, axes=[1], fill_value=0, align=pad_align)
if train_pipeline and do_spectrogram_masking:
anchors, shapes = fn.external_source(source=cutouts_generator,
num_outputs=2, cycle=True)
log_features = fn.erase(log_features, anchor=anchors, shape=shapes,
axes=[0, 1], fill_value=0,
normalized_anchor=True)
# When modifying DALI pipeline returns, make sure you update `output_map`
# in DALIGenericIterator invocation
return log_features.gpu(), label.gpu(), log_features_len.gpu()
def make_dali_asr_pipeline(train_pipeline: bool, device_id, batch_size,
file_root: str, file_list: str, config_data: dict,
config_features: dict, device_type: str = "gpu",
do_resampling: bool = True,
num_cpu_threads: int = multiprocessing.cpu_count()):
max_duration = config_data['max_duration']
sample_rate = config_data['sample_rate']
silence_threshold = -60 if config_data['trim_silence'] else None
# TODO Take into account resampling probablity
# TODO config_features['speed_perturbation']['p']
if do_resampling and config_data['speed_perturbation'] is not None:
resample_range = [config_data['speed_perturbation']['min_rate'],
config_data['speed_perturbation']['max_rate']]
discrete_resample_range = config_data['speed_perturbation']['discrete']
else:
resample_range = None
discrete_resample_range = False
window_size = config_features['window_size']
window_stride = config_features['window_stride']
nfeatures = config_features['n_filt']
nfft = config_features['n_fft']
frame_splicing_factor = config_features['frame_splicing']
dither_coeff = config_features['dither']
pad_align = config_features['pad_align']
pad_to_max_duration = config_features['pad_to_max_duration']
assert not pad_to_max_duration, \
"Padding to max duration currently not supported in DALI"
preemph_coeff = .97
config_spec = config_features['spec_augment']
if config_spec is not None:
mask_time_num_regions = config_spec['time_masks']
mask_time_min = config_spec['min_time']
mask_time_max = config_spec['max_time']
mask_freq_num_regions = config_spec['freq_masks']
mask_freq_min = config_spec['min_freq']
mask_freq_max = config_spec['max_freq']
else:
mask_time_num_regions = 0
mask_time_min = 0
mask_time_max = 0
mask_freq_num_regions = 0
mask_freq_min = 0
mask_freq_max = 0
config_cutout = config_features['cutout_augment']
if config_cutout is not None:
mask_both_num_regions = config_cutout['masks']
mask_both_min_time = config_cutout['min_time']
mask_both_max_time = config_cutout['max_time']
mask_both_min_freq = config_cutout['min_freq']
mask_both_max_freq = config_cutout['max_freq']
else:
mask_both_num_regions = 0
mask_both_min_time = 0
mask_both_max_time = 0
mask_both_min_freq = 0
mask_both_max_freq = 0
nfeatures = config_features['n_filt']
do_spectrogram_masking = \
mask_time_num_regions > 0 or mask_freq_num_regions > 0 or \
mask_both_num_regions > 0
do_remove_silence = silence_threshold is not None
del(config_spec)
del(config_cutout)
del(config_data)
del(config_features)
_dali_init_log(locals())
mask_params = {
'time_num_regions': mask_time_num_regions,
'time_min': mask_time_min,
'time_max': mask_time_max,
'freq_num_regions': mask_freq_num_regions,
'freq_min': mask_freq_min,
'freq_max': mask_freq_max,
'both_num_regions': mask_both_num_regions,
'both_min_time': mask_both_min_time,
'both_max_time': mask_both_max_time,
'both_min_freq': mask_both_min_freq,
'both_max_freq': mask_both_max_freq,
}
def _cutouts_generator():
"""
Generator, that wraps cutouts creation in order to randomize inputs
and allow passing them to DALI's ExternalSource operator
"""
[anchors, shapes] = _tuples2list(
[_generate_cutouts(mask_params, nfeatures)
for _ in range(batch_size)])
yield (np.array(anchors, dtype=np.float32),
np.array(shapes, dtype=np.float32))
cutouts_gen = _cutouts_generator if do_spectrogram_masking else None
if torch.distributed.is_initialized():
shard_id = torch.distributed.get_rank()
n_shards = torch.distributed.get_world_size()
else:
shard_id = 0
n_shards = 1
preprocessing_device = device_type.lower()
assert preprocessing_device == "cpu" or preprocessing_device == "gpu", \
"Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'"
pipe = dali_asr_pipeline(
train_pipeline=train_pipeline,
file_root=file_root,
file_list=file_list,
sample_rate=sample_rate,
silence_threshold=silence_threshold,
resample_range=resample_range,
discrete_resample_range=discrete_resample_range,
window_size=window_size,
window_stride=window_stride,
nfeatures=nfeatures,
nfft=nfft,
frame_splicing_factor=frame_splicing_factor,
dither_coeff=dither_coeff,
pad_align=pad_align,
preemph_coeff=preemph_coeff,
do_spectrogram_masking=do_spectrogram_masking,
cutouts_generator=cutouts_gen,
shard_id=shard_id,
n_shards=n_shards,
preprocessing_device=preprocessing_device,
batch_size=batch_size,
num_threads=num_cpu_threads,
device_id=device_id
)
return pipe
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/common/dali/pipeline.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
from ast import literal_eval
from contextlib import suppress
from numbers import Number
import yaml
from common.audio import GainPerturbation, ShiftPerturbation, SpeedPerturbation
from common.dataset import AudioDataset
from common.features import (CutoutAugment, FilterbankFeatures, SpecAugment)
from quartznet.model import JasperDecoderForCTC, JasperBlock, JasperEncoder
def default_args(klass):
sig = inspect.signature(klass.__init__)
return {k: v.default for k, v in sig.parameters.items() if k != 'self'}
def load(fpath):
cfg = yaml.safe_load(open(fpath, 'r'))
# Reload to deep copy shallow copies, which were made with yaml anchors
yaml.Dumper.ignore_aliases = lambda *args: True
cfg = yaml.dump(cfg)
cfg = yaml.safe_load(cfg)
return cfg
def validate_and_fill(klass, user_conf, ignore_unk=[], optional=[]):
conf = default_args(klass)
for k, v in user_conf.items():
assert k in conf or k in ignore_unk, f'Unknown param {k} for {klass}'
conf[k] = v
# Keep only mandatory or optional-nonempty
conf = {k: v for k, v in conf.items()
if k not in optional or v is not inspect.Parameter.empty}
# Validate
for k, v in conf.items():
assert v is not inspect.Parameter.empty, \
f'Value for {k} not specified for {klass}'
return conf
def input(conf_yaml, split='train'):
conf = copy.deepcopy(conf_yaml[f'input_{split}'])
conf_dataset = conf.pop('audio_dataset')
conf_features = conf.pop('filterbank_features')
# Validate known inner classes
inner_classes = [
(conf_dataset, 'speed_perturbation', SpeedPerturbation),
(conf_dataset, 'gain_perturbation', GainPerturbation),
(conf_dataset, 'shift_perturbation', ShiftPerturbation),
(conf_features, 'spec_augment', SpecAugment),
(conf_features, 'cutout_augment', CutoutAugment),
]
for conf_tgt, key, klass in inner_classes:
if key in conf_tgt:
conf_tgt[key] = validate_and_fill(klass, conf_tgt[key])
for k in conf:
raise ValueError(f'Unknown key {k}')
# Validate outer classes
conf_dataset = validate_and_fill(
AudioDataset, conf_dataset,
optional=['data_dir', 'labels', 'manifest_fpaths'])
# klass = feature_class(conf_features['feature_type'])
# conf_features = validate_and_fill(
# klass, conf_features, ignore_unk=['feature_type'])
conf_features = validate_and_fill(
FilterbankFeatures, conf_features) # , ignore_unk=['feature_type'])
# Check params shared between classes
shared = ['sample_rate', 'max_duration', 'pad_to_max_duration']
for sh in shared:
assert conf_dataset[sh] == conf_features[sh], (
f'{sh} should match in Dataset and FeatureProcessor: '
f'{conf_dataset[sh]}, {conf_features[sh]}')
return conf_dataset, conf_features
def encoder(conf):
"""Validate config for JasperEncoder and subsequent JasperBlocks"""
# Validate, but don't overwrite with defaults
for blk in conf['quartznet']['encoder']['blocks']:
validate_and_fill(JasperBlock, blk, optional=['infilters'],
ignore_unk=['residual_dense'])
return validate_and_fill(JasperEncoder, conf['quartznet']['encoder'])
def decoder(conf, n_classes):
decoder_kw = {'n_classes': n_classes, **conf['quartznet']['decoder']}
return validate_and_fill(JasperDecoderForCTC, decoder_kw)
def apply_config_overrides(conf, args):
if args.override_config is None:
return
for override_key_val in args.override_config:
key, val = override_key_val.split('=')
with suppress(TypeError, ValueError):
val = literal_eval(val)
apply_nested_config_override(conf, key, val)
def apply_nested_config_override(conf, key_str, val):
fields = key_str.split('.')
for f in fields[:-1]:
conf = conf[f]
f = fields[-1]
assert (f not in conf
or type(val) is type(conf[f])
or (isinstance(val, Number) and isinstance(conf[f], Number)))
conf[f] = val
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/quartznet/config.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
activations = {
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
}
def init_weights(m, mode='xavier_uniform'):
if type(m) == nn.Conv1d or type(m) == MaskedConv1d:
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif type(m) == nn.BatchNorm1d:
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def compute_new_kernel_size(kernel_size, kernel_width):
new_kernel_size = max(int(kernel_size * kernel_width), 1)
# If kernel is even shape, round up to make it odd
if new_kernel_size % 2 == 0:
new_kernel_size += 1
return new_kernel_size
def get_same_padding(kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (kernel_size // 2) * dilation
class GroupShuffle(nn.Module):
def __init__(self, groups, channels):
super(GroupShuffle, self).__init__()
self.groups = groups
self.channels_per_group = channels // groups
def forward(self, x):
sh = x.shape
x = x.view(-1, self.groups, self.channels_per_group, sh[-1])
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(-1, self.groups * self.channels_per_group, sh[-1])
return x
class MaskedConv1d(nn.Conv1d):
"""1D convolution with sequence masking
"""
__constants__ = ["masked"]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, use_mask=True,
heads=-1):
# Jasper refactor compat
assert heads == -1 # Unsupported
masked = use_mask
super(MaskedConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.masked = masked
def get_seq_len(self, lens):
pad, ks = self.padding[0], self.kernel_size[0]
return torch.div(lens + 2 * pad - self.dilation[0] * (ks - 1) - 1,
self.stride[0], rounding_mode='trunc') + 1
def forward(self, x, x_lens=None):
if self.masked:
max_len = x.size(2)
idxs = torch.arange(max_len, dtype=x_lens.dtype, device=x.device)
mask = idxs.expand(x_lens.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)
x_lens = self.get_seq_len(x_lens)
return super(MaskedConv1d, self).forward(x), x_lens
class JasperBlock(nn.Module):
__constants__ = ["conv_mask", "separable", "res", "mconv"]
def __init__(self, infilters, filters, repeat=3, kernel_size=11,
kernel_size_factor=1, stride=1, dilation=1, padding='same',
dropout=0.2, activation=None, residual=True, groups=1,
separable=False, heads=-1, normalization="batch",
norm_groups=1, residual_panes=[], use_conv_masks=False):
super(JasperBlock, self).__init__()
# Fix params being passed as list, but default to ints
wrap = lambda v: [v] if type(v) is int else v
kernel_size = wrap(kernel_size)
dilation = wrap(dilation)
padding = wrap(padding)
stride = wrap(stride)
if padding != "same":
raise ValueError("currently only 'same' padding is supported")
kernel_size_factor = float(kernel_size_factor)
if type(kernel_size) in (list, tuple):
kernel_size = [compute_new_kernel_size(k, kernel_size_factor)
for k in kernel_size]
else:
kernel_size = compute_new_kernel_size(kernel_size,
kernel_size_factor)
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
self.conv_mask = use_conv_masks
self.separable = separable
infilters_loop = infilters
conv = nn.ModuleList()
for _ in range(repeat - 1):
conv.extend(
self._get_conv_bn_layer(
infilters_loop, filters, kernel_size=kernel_size,
stride=stride, dilation=dilation, padding=padding_val,
groups=groups, heads=heads, separable=separable,
normalization=normalization, norm_groups=norm_groups)
)
conv.extend(self._get_act_dropout_layer(drop_prob=dropout,
activation=activation))
infilters_loop = filters
conv.extend(
self._get_conv_bn_layer(
infilters_loop, filters, kernel_size=kernel_size, stride=stride,
dilation=dilation, padding=padding_val, groups=groups,
heads=heads, separable=separable, normalization=normalization,
norm_groups=norm_groups)
)
self.mconv = conv
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
res_list = nn.ModuleList()
if len(residual_panes) == 0:
res_panes = [infilters]
self.dense_residual = False
for ip in res_panes:
res_list.append(nn.ModuleList(
self._get_conv_bn_layer(ip, filters, kernel_size=1,
normalization=normalization,
norm_groups=norm_groups, stride=[1])
))
self.res = res_list
else:
self.res = None
self.mout = nn.Sequential(*self._get_act_dropout_layer(
drop_prob=dropout, activation=activation))
def _get_conv(self, in_channels, out_channels, kernel_size=11, stride=1,
dilation=1, padding=0, bias=False, groups=1, heads=-1,
separable=False):
kw = {'in_channels': in_channels, 'out_channels': out_channels,
'kernel_size': kernel_size, 'stride': stride, 'dilation': dilation,
'padding': padding, 'bias': bias, 'groups': groups}
if self.conv_mask:
return MaskedConv1d(**kw, heads=heads, use_mask=self.conv_mask)
else:
return nn.Conv1d(**kw)
def _get_conv_bn_layer(self, in_channels, out_channels, kernel_size=11,
stride=1, dilation=1, padding=0, bias=False,
groups=1, heads=-1, separable=False,
normalization="batch", norm_groups=1):
if norm_groups == -1:
norm_groups = out_channels
if separable:
layers = [
self._get_conv(in_channels, in_channels, kernel_size,
stride=stride, dilation=dilation, padding=padding,
bias=bias, groups=in_channels, heads=heads),
self._get_conv(in_channels, out_channels, kernel_size=1,
stride=1, dilation=1, padding=0, bias=bias,
groups=groups),
]
else:
layers = [
self._get_conv(in_channels, out_channels, kernel_size,
stride=stride, dilation=dilation,
padding=padding, bias=bias, groups=groups)
]
if normalization == "group":
layers.append(nn.GroupNorm(num_groups=norm_groups,
num_channels=out_channels))
elif normalization == "instance":
layers.append(nn.GroupNorm(num_groups=out_channels,
num_channels=out_channels))
elif normalization == "layer":
layers.append(nn.GroupNorm(num_groups=1, num_channels=out_channels))
elif normalization == "batch":
layers.append(nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1))
else:
raise ValueError(
f"Normalization method ({normalization}) does not match"
f" one of [batch, layer, group, instance]."
)
if groups > 1:
layers.append(GroupShuffle(groups, out_channels))
return layers
def _get_act_dropout_layer(self, drop_prob=0.2, activation=None):
if activation is None:
activation = nn.Hardtanh(min_val=0.0, max_val=20.0)
layers = [activation, nn.Dropout(p=drop_prob)]
return layers
def forward(self, xs, xs_lens=None):
if not self.conv_mask:
xs_lens = 0
# compute forward convolutions
out = xs[-1]
lens = xs_lens
for i, l in enumerate(self.mconv):
# if we're doing masked convolutions, we need to pass in and
# possibly update the sequence lengths
# if (i % 4) == 0 and self.conv_mask:
if isinstance(l, MaskedConv1d):
out, lens = l(out, lens)
else:
out = l(out)
# compute the residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if isinstance(res_layer, MaskedConv1d):
res_out, _ = res_layer(res_out, xs_lens)
else:
res_out = res_layer(res_out)
out = out + res_out
# compute the output
out = self.mout(out)
if self.res is not None and self.dense_residual:
out = xs + [out]
else:
out = [out]
return (out, lens) if self.conv_mask else (out, None)
class JasperEncoder(nn.Module):
__constants__ = ["use_conv_masks"]
def __init__(self, in_feats, activation, frame_splicing=1,
init='xavier_uniform', use_conv_masks=False, blocks=[]):
super(JasperEncoder, self).__init__()
self.use_conv_masks = use_conv_masks
self.layers = nn.ModuleList()
in_feats *= frame_splicing
all_residual_panes = []
for i, blk in enumerate(blocks):
blk['activation'] = activations[activation]()
has_residual_dense = blk.pop('residual_dense', False)
if has_residual_dense:
all_residual_panes += [in_feats]
blk['residual_panes'] = all_residual_panes
else:
blk['residual_panes'] = []
self.layers.append(
JasperBlock(in_feats, use_conv_masks=use_conv_masks, **blk))
in_feats = blk['filters']
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, x, x_lens=None):
out, out_lens = [x], x_lens
for layer in self.layers:
out, out_lens = layer(out, out_lens)
return out, out_lens
class JasperDecoderForCTC(nn.Module):
def __init__(self, in_feats, n_classes, init='xavier_uniform'):
super(JasperDecoderForCTC, self).__init__()
self.layers = nn.Sequential(
nn.Conv1d(in_feats, n_classes, kernel_size=1, bias=True),)
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, enc_out):
out = self.layers(enc_out[-1]).transpose(1, 2)
return F.log_softmax(out, dim=2)
class GreedyCTCDecoder(nn.Module):
@torch.no_grad()
def forward(self, log_probs):
return log_probs.argmax(dim=-1, keepdim=False).int()
class QuartzNet(nn.Module):
def __init__(self, encoder_kw, decoder_kw, transpose_in=False):
super(QuartzNet, self).__init__()
self.transpose_in = transpose_in
self.encoder = JasperEncoder(**encoder_kw)
self.decoder = JasperDecoderForCTC(**decoder_kw)
def forward(self, x, x_lens=None):
if self.encoder.use_conv_masks:
assert x_lens is not None
enc, enc_lens = self.encoder(x, x_lens)
out = self.decoder(enc)
return out, enc_lens
else:
if self.transpose_in:
x = x.transpose(1, 2)
enc, _ = self.encoder(x)
out = self.decoder(enc)
return out # XXX torchscript refuses to output None
# TODO Explicitly add x_lens=None for inference (now x can be a Tensor or tuple)
def infer(self, x):
if self.encoder.use_conv_masks:
return self.forward(x)
else:
ret = self.forward(x[0])
return ret, len(ret)
class CTCLossNM:
def __init__(self, n_classes):
self._criterion = nn.CTCLoss(blank=n_classes-1, reduction='none')
def __call__(self, log_probs, targets, input_length, target_length):
input_length = input_length.long()
target_length = target_length.long()
targets = targets.long()
loss = self._criterion(log_probs.transpose(1, 0), targets,
input_length, target_length)
# note that this is different from reduction = 'mean'
# because we are not dividing by target lengths
return torch.mean(loss)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/QuartzNet/quartznet/model.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import os
import random
import time
import torch
import numpy as np
import torch.distributed as dist
from contextlib import suppress as empty_context
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import AudioDataset, get_data_loader
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import (Checkpointer, greedy_wer, num_weights, print_once,
process_evaluation_epoch)
from common.optimizers import AdamW, lr_policy, Novograd
from common.tb_dllogger import flush_log, init_log, log
from common.utils import BenchmarkStats
from jasper import config
from jasper.model import CTCLossNM, GreedyCTCDecoder, Jasper
def parse_args():
parser = argparse.ArgumentParser(description='Jasper')
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', default=400, type=int,
help='Number of epochs for the entire training; influences the lr schedule')
training.add_argument("--warmup_epochs", default=0, type=int,
help='Initial epochs of increasing learning rate')
training.add_argument("--hold_epochs", default=0, type=int,
help='Constant max learning rate epochs after warmup')
training.add_argument('--epochs_this_job', default=0, type=int,
help=('Run for a number of epochs with no effect on the lr schedule.'
'Useful for re-starting the training.'))
training.add_argument('--cudnn_benchmark', action='store_true', default=True,
help='Enable cudnn benchmark')
training.add_argument('--amp', '--fp16', action='store_true', default=False,
help='Use pytorch native mixed precision training')
training.add_argument('--seed', default=42, type=int, help='Random seed')
training.add_argument('--local_rank', '--local-rank', default=os.getenv('LOCAL_RANK', 0),
type=int, help='GPU id used for distributed training')
training.add_argument('--pre_allocate_range', default=None, type=int, nargs=2,
help='Warmup with batches of length [min, max] before training')
optim = parser.add_argument_group('optimization setup')
optim.add_argument('--batch_size', default=32, type=int,
help='Global batch size')
optim.add_argument('--lr', default=1e-3, type=float,
help='Peak learning rate')
optim.add_argument("--min_lr", default=1e-5, type=float,
help='minimum learning rate')
optim.add_argument("--lr_policy", default='exponential', type=str,
choices=['exponential', 'legacy'], help='lr scheduler')
optim.add_argument("--lr_exp_gamma", default=0.99, type=float,
help='gamma factor for exponential lr scheduler')
optim.add_argument('--weight_decay', default=1e-3, type=float,
help='Weight decay for the optimizer')
optim.add_argument('--grad_accumulation_steps', default=1, type=int,
help='Number of accumulation steps')
optim.add_argument('--optimizer', default='novograd', type=str,
choices=['novograd', 'adamw'], help='Optimization algorithm')
optim.add_argument('--ema', type=float, default=0.0,
help='Discount factor for exp averaging of model weights')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--resume', action='store_true',
help='Try to resume from last saved checkpoint.')
io.add_argument('--ckpt', default=None, type=str,
help='Path to a checkpoint for resuming training')
io.add_argument('--save_frequency', default=10, type=int,
help='Checkpoint saving frequency in epochs')
io.add_argument('--keep_milestones', default=[100, 200, 300], type=int, nargs='+',
help='Milestone checkpoints to keep from removing')
io.add_argument('--save_best_from', default=380, type=int,
help='Epoch on which to begin tracking best checkpoint (dev WER)')
io.add_argument('--eval_frequency', default=200, type=int,
help='Number of steps between evaluations on dev set')
io.add_argument('--log_frequency', default=25, type=int,
help='Number of steps between printing training stats')
io.add_argument('--prediction_frequency', default=100, type=int,
help='Number of steps between printing sample decodings')
io.add_argument('--model_config', type=str, required=True,
help='Path of the model configuration file')
io.add_argument('--train_manifests', type=str, required=True, nargs='+',
help='Paths of the training dataset manifest file')
io.add_argument('--val_manifests', type=str, required=True, nargs='+',
help='Paths of the evaluation datasets manifest files')
io.add_argument('--dataset_dir', required=True, type=str,
help='Root dir of dataset')
io.add_argument('--output_dir', type=str, required=True,
help='Directory for logs and checkpoints')
io.add_argument('--log_file', type=str, default=None,
help='Path to save the training logfile.')
io.add_argument('--benchmark_epochs_num', type=int, default=1,
help='Number of epochs accounted in final average throughput.')
io.add_argument('--override_config', type=str, action='append',
help='Overrides a value from a config .yaml.'
' Syntax: `--override_config nested.config.key=val`.')
return parser.parse_args()
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(num_gpus)
def apply_ema(model, ema_model, decay):
if not decay:
return
sd = getattr(model, 'module', model).state_dict()
for k, v in ema_model.state_dict().items():
v.copy_(decay * v + (1 - decay) * sd[k])
@torch.no_grad()
def evaluate(epoch, step, val_loader, val_feat_proc, labels, model,
ema_model, ctc_loss, greedy_decoder, use_amp, use_dali=False):
for model, subset in [(model, 'dev'), (ema_model, 'dev_ema')]:
if model is None:
continue
model.eval()
torch.cuda.synchronize()
start_time = time.time()
agg = {'losses': [], 'preds': [], 'txts': []}
for batch in val_loader:
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if val_feat_proc is not None:
feat, feat_lens = val_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = val_feat_proc(audio, audio_lens)
with torch.cuda.amp.autocast(enabled=use_amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
pred = greedy_decoder(log_probs)
agg['losses'] += helpers.gather_losses([loss])
agg['preds'] += helpers.gather_predictions([pred], labels)
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens], labels)
wer, loss = process_evaluation_epoch(agg)
torch.cuda.synchronize()
log(() if epoch is None else (epoch,),
step, subset, {'loss': loss, 'wer': 100.0 * wer,
'took': time.time() - start_time})
model.train()
return wer
def main():
args = parse_args()
assert(torch.cuda.is_available())
assert args.prediction_frequency % args.log_frequency == 0
torch.backends.cudnn.benchmark = args.cudnn_benchmark
# set up distributed training
multi_gpu = int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl', init_method='env://')
world_size = dist.get_world_size()
print_once(f'Distributed training with {world_size} GPUs\n')
else:
world_size = 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
init_log(args)
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
assert args.grad_accumulation_steps >= 1
assert args.batch_size % args.grad_accumulation_steps == 0
batch_size = args.batch_size // args.grad_accumulation_steps
print_once('Setting up datasets...')
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
use_dali = args.dali_device in ('cpu', 'gpu')
if use_dali:
assert train_dataset_kw['ignore_offline_speed_perturbation'], \
"DALI doesn't support offline speed perturbation"
# pad_to_max_duration is not supported by DALI - have simple padders
if train_features_kw['pad_to_max_duration']:
train_feat_proc = BaseFeatures(
pad_align=train_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=train_features_kw['max_duration'],
sample_rate=train_features_kw['sample_rate'],
window_size=train_features_kw['window_size'],
window_stride=train_features_kw['window_stride'])
train_features_kw['pad_to_max_duration'] = False
else:
train_feat_proc = None
if val_features_kw['pad_to_max_duration']:
val_feat_proc = BaseFeatures(
pad_align=val_features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=val_features_kw['max_duration'],
sample_rate=val_features_kw['sample_rate'],
window_size=val_features_kw['window_size'],
window_stride=val_features_kw['window_stride'])
val_features_kw['pad_to_max_duration'] = False
else:
val_feat_proc = None
train_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=train_dataset_kw,
config_features=train_features_kw,
json_names=args.train_manifests,
batch_size=batch_size,
grad_accumulation_steps=args.grad_accumulation_steps,
pipeline_type="train",
device_type=args.dali_device,
symbols=symbols)
val_loader = DaliDataLoader(gpu_id=args.local_rank,
dataset_path=args.dataset_dir,
config_data=val_dataset_kw,
config_features=val_features_kw,
json_names=args.val_manifests,
batch_size=batch_size,
pipeline_type="val",
device_type=args.dali_device,
symbols=symbols)
else:
train_dataset_kw, train_features_kw = config.input(cfg, 'train')
train_dataset = AudioDataset(args.dataset_dir,
args.train_manifests,
symbols,
**train_dataset_kw)
train_loader = get_data_loader(train_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=True,
num_workers=4)
train_feat_proc = FilterbankFeatures(**train_features_kw)
val_dataset_kw, val_features_kw = config.input(cfg, 'val')
val_dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**val_dataset_kw)
val_loader = get_data_loader(val_dataset,
batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
val_feat_proc = FilterbankFeatures(**val_features_kw)
dur = train_dataset.duration / 3600
dur_f = train_dataset.duration_filtered / 3600
nsampl = len(train_dataset)
print_once(f'Training samples: {nsampl} ({dur:.1f}h, '
f'filtered {dur_f:.1f}h)')
if train_feat_proc is not None:
train_feat_proc.cuda()
if val_feat_proc is not None:
val_feat_proc.cuda()
steps_per_epoch = len(train_loader) // args.grad_accumulation_steps
# set up the model
model = Jasper(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
model.cuda()
ctc_loss = CTCLossNM(n_classes=len(symbols))
greedy_decoder = GreedyCTCDecoder()
print_once(f'Model size: {num_weights(model) / 10**6:.1f}M params\n')
# optimization
kw = {'lr': args.lr, 'weight_decay': args.weight_decay}
if args.optimizer == "novograd":
optimizer = Novograd(model.parameters(), **kw)
elif args.optimizer == "adamw":
optimizer = AdamW(model.parameters(), **kw)
else:
raise ValueError(f'Invalid optimizer "{args.optimizer}"')
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
adjust_lr = lambda step, epoch, optimizer: lr_policy(
step, epoch, args.lr, optimizer, steps_per_epoch=steps_per_epoch,
warmup_epochs=args.warmup_epochs, hold_epochs=args.hold_epochs,
num_epochs=args.epochs, policy=args.lr_policy, min_lr=args.min_lr,
exp_gamma=args.lr_exp_gamma)
if args.ema > 0:
ema_model = copy.deepcopy(model)
else:
ema_model = None
if multi_gpu:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank)
# load checkpoint
meta = {'best_wer': 10**6, 'start_epoch': 0}
checkpointer = Checkpointer(args.output_dir, 'Jasper',
args.keep_milestones)
if args.resume:
args.ckpt = checkpointer.last_checkpoint() or args.ckpt
if args.ckpt is not None:
checkpointer.load(args.ckpt, model, ema_model, optimizer, scaler, meta)
start_epoch = meta['start_epoch']
best_wer = meta['best_wer']
epoch = 1
step = start_epoch * steps_per_epoch + 1
# training loop
model.train()
# pre-allocate
if args.pre_allocate_range is not None:
n_feats = train_features_kw['n_filt']
pad_align = train_features_kw['pad_align']
a, b = args.pre_allocate_range
for n_frames in range(a, b + pad_align, pad_align):
print_once(f'Pre-allocation ({batch_size}x{n_feats}x{n_frames})...')
feat = torch.randn(batch_size, n_feats, n_frames, device='cuda')
feat_lens = torch.ones(batch_size, device='cuda').fill_(n_frames)
txt = torch.randint(high=len(symbols)-1, size=(batch_size, 100),
device='cuda')
txt_lens = torch.ones(batch_size, device='cuda').fill_(100)
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
del feat
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss.backward()
model.zero_grad()
torch.cuda.empty_cache()
bmark_stats = BenchmarkStats()
for epoch in range(start_epoch + 1, args.epochs + 1):
if multi_gpu and not use_dali:
train_loader.sampler.set_epoch(epoch)
torch.cuda.synchronize()
epoch_start_time = time.time()
epoch_utts = 0
epoch_loss = 0
accumulated_batches = 0
for batch in train_loader:
if accumulated_batches == 0:
step_loss = 0
step_utts = 0
step_start_time = time.time()
if use_dali:
# with DALI, the data is already on GPU
feat, feat_lens, txt, txt_lens = batch
if train_feat_proc is not None:
feat, feat_lens = train_feat_proc(feat, feat_lens)
else:
batch = [t.cuda(non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feat, feat_lens = train_feat_proc(audio, audio_lens)
# Use context manager to prevent redundant accumulation of gradients
if (multi_gpu and accumulated_batches + 1 < args.grad_accumulation_steps):
ctx = model.no_sync()
else:
ctx = empty_context()
with ctx:
with torch.cuda.amp.autocast(enabled=args.amp):
log_probs, enc_lens = model(feat, feat_lens)
loss = ctc_loss(log_probs, txt, enc_lens, txt_lens)
loss /= args.grad_accumulation_steps
if multi_gpu:
reduced_loss = reduce_tensor(loss.data, world_size)
else:
reduced_loss = loss
if torch.isnan(reduced_loss).any():
print_once(f'WARNING: loss is NaN; skipping update')
continue
else:
step_loss += reduced_loss.item()
step_utts += batch[0].size(0) * world_size
epoch_utts += batch[0].size(0) * world_size
accumulated_batches += 1
scaler.scale(loss).backward()
if accumulated_batches % args.grad_accumulation_steps == 0:
epoch_loss += step_loss
scaler.step(optimizer)
scaler.update()
adjust_lr(step, epoch, optimizer)
optimizer.zero_grad()
apply_ema(model, ema_model, args.ema)
if step % args.log_frequency == 0:
preds = greedy_decoder(log_probs)
wer, pred_utt, ref = greedy_wer(preds, txt, txt_lens, symbols)
if step % args.prediction_frequency == 0:
print_once(f' Decoded: {pred_utt[:90]}')
print_once(f' Reference: {ref[:90]}')
step_time = time.time() - step_start_time
log((epoch, step % steps_per_epoch or steps_per_epoch, steps_per_epoch),
step, 'train',
{'loss': step_loss,
'wer': 100.0 * wer,
'throughput': step_utts / step_time,
'took': step_time,
'lrate': optimizer.param_groups[0]['lr']})
step_start_time = time.time()
if step % args.eval_frequency == 0:
wer = evaluate(epoch, step, val_loader, val_feat_proc,
symbols, model, ema_model, ctc_loss,
greedy_decoder, args.amp, use_dali)
if wer < best_wer and epoch >= args.save_best_from:
checkpointer.save(model, ema_model, optimizer, scaler,
epoch, step, best_wer, is_best=True)
best_wer = wer
step += 1
accumulated_batches = 0
# end of step
# DALI iterator need to be exhausted;
# if not using DALI, simulate drop_last=True with grad accumulation
if not use_dali and step > steps_per_epoch * epoch:
break
torch.cuda.synchronize()
epoch_time = time.time() - epoch_start_time
epoch_loss /= steps_per_epoch
log((epoch,), None, 'train_avg', {'throughput': epoch_utts / epoch_time,
'took': epoch_time,
'loss': epoch_loss})
bmark_stats.update(epoch_utts, epoch_time, epoch_loss)
if epoch % args.save_frequency == 0 or epoch in args.keep_milestones:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
if 0 < args.epochs_this_job <= epoch - start_epoch:
print_once(f'Finished after {args.epochs_this_job} epochs.')
break
# end of epoch
log((), None, 'train_avg', bmark_stats.get(args.benchmark_epochs_num))
evaluate(None, step, val_loader, val_feat_proc, symbols, model,
ema_model, ctc_loss, greedy_decoder, args.amp, use_dali)
if epoch == args.epochs:
checkpointer.save(model, ema_model, optimizer, scaler, epoch, step,
best_wer)
flush_log()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/train.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import os
import random
import time
from heapq import nlargest
from itertools import chain, repeat
from pathlib import Path
from tqdm import tqdm
import dllogger
import torch
import numpy as np
import torch.distributed as distrib
import torch.nn.functional as F
from apex import amp
from apex.parallel import DistributedDataParallel
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
from jasper import config
from common import helpers
from common.dali.data_loader import DaliDataLoader
from common.dataset import (AudioDataset, FilelistDataset, get_data_loader,
SingleAudioDataset)
from common.features import BaseFeatures, FilterbankFeatures
from common.helpers import print_once, process_evaluation_epoch
from jasper.model import GreedyCTCDecoder, Jasper
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
def get_parser():
parser = argparse.ArgumentParser(description='Jasper')
parser.add_argument('--batch_size', default=16, type=int,
help='Data batch size')
parser.add_argument('--steps', default=0, type=int,
help='Eval this many steps for every worker')
parser.add_argument('--warmup_steps', default=0, type=int,
help='Burn-in period before measuring latencies')
parser.add_argument('--model_config', type=str, required=True,
help='Relative model config path given dataset folder')
parser.add_argument('--dataset_dir', type=str,
help='Absolute path to dataset folder')
parser.add_argument('--val_manifests', type=str, nargs='+',
help='Relative path to evaluation dataset manifest files')
parser.add_argument('--ckpt', default=None, type=str,
help='Path to model checkpoint')
parser.add_argument('--pad_leading', type=int, default=16,
help='Pads every batch with leading zeros '
'to counteract conv shifts of the field of view')
parser.add_argument('--amp', '--fp16', action='store_true',
help='Use FP16 precision')
parser.add_argument('--cudnn_benchmark', action='store_true',
help='Enable cudnn benchmark')
parser.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument("--seed", default=None, type=int, help='Random seed')
parser.add_argument('--local_rank', default=os.getenv('LOCAL_RANK', 0),
type=int, help='GPU id used for distributed training')
io = parser.add_argument_group('feature and checkpointing setup')
io.add_argument('--dali_device', type=str, choices=['none', 'cpu', 'gpu'],
default='gpu', help='Use DALI pipeline for fast data processing')
io.add_argument('--save_predictions', type=str, default=None,
help='Save predictions in text form at this location')
io.add_argument('--save_logits', default=None, type=str,
help='Save output logits under specified path')
io.add_argument('--transcribe_wav', type=str,
help='Path to a single .wav file (16KHz)')
io.add_argument('--transcribe_filelist', type=str,
help='Path to a filelist with one .wav path per line')
io.add_argument('-o', '--output_dir', default='results/',
help='Output folder to save audio (file per phrase)')
io.add_argument('--log_file', type=str, default=None,
help='Path to a DLLogger log file')
io.add_argument('--ema', action='store_true',
help='Load averaged model weights')
io.add_argument('--torchscript', action='store_true',
help='Evaluate with a TorchScripted model')
io.add_argument('--torchscript_export', action='store_true',
help='Export the model with torch.jit to the output_dir')
io.add_argument('--override_config', type=str, action='append',
help='Overrides a value from a config .yaml.'
' Syntax: `--override_config nested.config.key=val`.')
return parser
def durs_to_percentiles(durations, ratios):
durations = np.asarray(durations) * 1000 # in ms
latency = durations
latency = latency[5:]
mean_latency = np.mean(latency)
latency_worst = nlargest(math.ceil((1 - min(ratios)) * len(latency)), latency)
latency_ranges = get_percentile(ratios, latency_worst, len(latency))
latency_ranges[0.5] = mean_latency
return latency_ranges
def get_percentile(ratios, arr, nsamples):
res = {}
for a in ratios:
idx = max(int(nsamples * (1 - a)), 0)
res[a] = arr[idx]
return res
def torchscript_export(data_loader, audio_processor, model, greedy_decoder,
output_dir, use_amp, use_conv_masks, model_config, device,
save):
audio_processor.to(device)
for batch in data_loader:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_len, _, _ = batch
feats, feat_lens = audio_processor(audio, audio_len)
break
print("\nExporting featurizer...")
print("\nNOTE: Dithering causes warnings about non-determinism.\n")
ts_feat = torch.jit.trace(audio_processor, (audio, audio_len))
print("\nExporting acoustic model...")
model(feats, feat_lens)
ts_acoustic = torch.jit.trace(model, (feats, feat_lens))
print("\nExporting decoder...")
log_probs = model(feats, feat_lens)
ts_decoder = torch.jit.script(greedy_decoder, log_probs)
print("\nJIT export complete.")
if save:
precision = "fp16" if use_amp else "fp32"
module_name = f'{os.path.basename(model_config)}_{precision}'
ts_feat.save(os.path.join(output_dir, module_name + "_feat.pt"))
ts_acoustic.save(os.path.join(output_dir, module_name + "_acoustic.pt"))
ts_decoder.save(os.path.join(output_dir, module_name + "_decoder.pt"))
return ts_feat, ts_acoustic, ts_decoder
def main():
parser = get_parser()
args = parser.parse_args()
log_fpath = args.log_file or str(Path(args.output_dir, 'nvlog_infer.json'))
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
[dllogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
for step in ['DNN', 'data+DNN', 'data']:
for c in [0.99, 0.95, 0.9, 0.5]:
cs = 'avg' if c == 0.5 else f'{int(100*c)}%'
dllogger.metadata(f'{step.lower()}_latency_{c}',
{'name': f'{step} latency {cs}',
'format': ':>7.2f', 'unit': 'ms'})
dllogger.metadata(
'eval_wer', {'name': 'WER', 'format': ':>3.2f', 'unit': '%'})
if args.cpu:
device = torch.device('cpu')
else:
assert torch.cuda.is_available()
device = torch.device('cuda')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.seed is not None:
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
random.seed(args.seed + args.local_rank)
# set up distributed training
multi_gpu = not args.cpu and int(os.environ.get('WORLD_SIZE', 1)) > 1
if multi_gpu:
torch.cuda.set_device(args.local_rank)
distrib.init_process_group(backend='nccl', init_method='env://')
print_once(f'Inference with {distrib.get_world_size()} GPUs')
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = helpers.add_ctc_blank(cfg['labels'])
use_dali = args.dali_device in ('cpu', 'gpu')
dataset_kw, features_kw = config.input(cfg, 'val')
measure_perf = args.steps > 0
# dataset
if args.transcribe_wav or args.transcribe_filelist:
if use_dali:
print("DALI supported only with input .json files; disabling")
use_dali = False
assert not (args.transcribe_wav and args.transcribe_filelist)
if args.transcribe_wav:
dataset = SingleAudioDataset(args.transcribe_wav)
else:
dataset = FilelistDataset(args.transcribe_filelist)
data_loader = get_data_loader(dataset,
batch_size=1,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=0,
drop_last=(True if measure_perf else False))
_, features_kw = config.input(cfg, 'val')
assert not features_kw['pad_to_max_duration']
feat_proc = FilterbankFeatures(**features_kw)
elif use_dali:
# pad_to_max_duration is not supported by DALI - have simple padders
if features_kw['pad_to_max_duration']:
feat_proc = BaseFeatures(
pad_align=features_kw['pad_align'],
pad_to_max_duration=True,
max_duration=features_kw['max_duration'],
sample_rate=features_kw['sample_rate'],
window_size=features_kw['window_size'],
window_stride=features_kw['window_stride'])
features_kw['pad_to_max_duration'] = False
else:
feat_proc = None
data_loader = DaliDataLoader(
gpu_id=args.local_rank or 0,
dataset_path=args.dataset_dir,
config_data=dataset_kw,
config_features=features_kw,
json_names=args.val_manifests,
batch_size=args.batch_size,
pipeline_type=("train" if measure_perf else "val"), # no drop_last
device_type=args.dali_device,
symbols=symbols)
else:
dataset = AudioDataset(args.dataset_dir,
args.val_manifests,
symbols,
**dataset_kw)
data_loader = get_data_loader(dataset,
args.batch_size,
multi_gpu=multi_gpu,
shuffle=False,
num_workers=4,
drop_last=False)
feat_proc = FilterbankFeatures(**features_kw)
model = Jasper(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
if args.ckpt is not None:
print(f'Loading the model from {args.ckpt} ...')
checkpoint = torch.load(args.ckpt, map_location="cpu")
key = 'ema_state_dict' if args.ema else 'state_dict'
state_dict = helpers.convert_v1_state_dict(checkpoint[key])
model.load_state_dict(state_dict, strict=True)
model.to(device)
model.eval()
if feat_proc is not None:
feat_proc.to(device)
feat_proc.eval()
if args.amp:
model = model.half()
if args.torchscript:
greedy_decoder = GreedyCTCDecoder()
feat_proc, model, greedy_decoder = torchscript_export(
data_loader, feat_proc, model, greedy_decoder, args.output_dir,
use_amp=args.amp, use_conv_masks=True, model_toml=args.model_toml,
device=device, save=args.torchscript_export)
if multi_gpu:
model = DistributedDataParallel(model)
agg = {'txts': [], 'preds': [], 'logits': []}
dur = {'data': [], 'dnn': [], 'data+dnn': []}
looped_loader = chain.from_iterable(repeat(data_loader))
greedy_decoder = GreedyCTCDecoder()
sync = lambda: torch.cuda.synchronize() if device.type == 'cuda' else None
steps = args.steps + args.warmup_steps or len(data_loader)
with torch.no_grad():
for it, batch in enumerate(tqdm(looped_loader, initial=1, total=steps)):
if use_dali:
feats, feat_lens, txt, txt_lens = batch
if feat_proc is not None:
feats, feat_lens = feat_proc(feats, feat_lens)
else:
batch = [t.to(device, non_blocking=True) for t in batch]
audio, audio_lens, txt, txt_lens = batch
feats, feat_lens = feat_proc(audio, audio_lens)
sync()
t1 = time.time()
if args.amp:
feats = feats.half()
feats = F.pad(feats, (args.pad_leading, 0))
feat_lens += args.pad_leading
if model.encoder.use_conv_masks:
log_probs, log_prob_lens = model(feats, feat_lens)
else:
log_probs = model(feats, feat_lens)
preds = greedy_decoder(log_probs)
sync()
t2 = time.time()
# burn-in period; wait for a new loader due to num_workers
if it >= 1 and (args.steps == 0 or it >= args.warmup_steps):
dur['data'].append(t1 - t0)
dur['dnn'].append(t2 - t1)
dur['data+dnn'].append(t2 - t0)
if txt is not None:
agg['txts'] += helpers.gather_transcripts([txt], [txt_lens],
symbols)
agg['preds'] += helpers.gather_predictions([preds], symbols)
agg['logits'].append(log_probs)
if it + 1 == steps:
break
sync()
t0 = time.time()
# communicate the results
if args.transcribe_wav:
for idx, p in enumerate(agg['preds']):
print_once(f'Prediction {idx+1: >3}: {p}')
elif args.transcribe_filelist:
pass
elif not multi_gpu or distrib.get_rank() == 0:
wer, _ = process_evaluation_epoch(agg)
dllogger.log(step=(), data={'eval_wer': 100 * wer})
if args.save_predictions:
with open(args.save_predictions, 'w') as f:
f.write('\n'.join(agg['preds']))
if args.save_logits:
logits = torch.cat(agg['logits'], dim=0).cpu()
torch.save(logits, args.save_logits)
# report timings
if len(dur['data']) >= 20:
ratios = [0.9, 0.95, 0.99]
for stage in dur:
lat = durs_to_percentiles(dur[stage], ratios)
for k in [0.99, 0.95, 0.9, 0.5]:
kk = str(k).replace('.', '_')
dllogger.log(step=(), data={f'{stage.lower()}_latency_{kk}': lat[k]})
else:
print_once('Not enough samples to measure latencies.')
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/inference.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import sys
sys.path.append("./")
class FeatureCollate:
def __init__(self, feature_proc):
self.feature_proc = feature_proc
def __call__(self, batch):
bs = len(batch)
max_len = lambda l,idx: max(el[idx].size(0) for el in l)
audio = torch.zeros(bs, max_len(batch, 0))
audio_lens = torch.zeros(bs, dtype=torch.int32)
for i, sample in enumerate(batch):
audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])
audio_lens[i] = sample[1]
ret = (audio, audio_lens)
if self.feature_proc is not None:
feats, feat_lens = self.feature_proc(audio, audio_lens)
ret = (feats,)
return ret
def get_dataloader(model_args_list):
''' return dataloader for inference '''
from inference import get_parser
from common.helpers import add_ctc_blank
from jasper import config
from common.dataset import (AudioDataset, FilelistDataset, get_data_loader,
SingleAudioDataset)
from common.features import FilterbankFeatures
parser = get_parser()
parser.add_argument('--component', type=str, default="model",
choices=["feature-extractor", "model", "decoder"],
help='Component to convert')
args = parser.parse_args(model_args_list)
if args.component == "decoder":
return None
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
symbols = add_ctc_blank(cfg['labels'])
dataset_kw, features_kw = config.input(cfg, 'val')
dataset = AudioDataset(args.dataset_dir, args.val_manifests,
symbols, **dataset_kw)
data_loader = get_data_loader(dataset, args.batch_size, multi_gpu=False,
shuffle=False, num_workers=4, drop_last=False)
feature_proc = None
if args.component == "model":
feature_proc = FilterbankFeatures(**features_kw)
data_loader.collate_fn = FeatureCollate(feature_proc)
return data_loader
def init_feature_extractor(args):
from jasper import config
from common.features import FilterbankFeatures
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
_, features_kw = config.input(cfg, 'val')
feature_proc = FilterbankFeatures(**features_kw)
return feature_proc
def init_acoustic_model(args):
from common.helpers import add_ctc_blank
from jasper.model import Jasper
from jasper import config
cfg = config.load(args.model_config)
config.apply_config_overrides(cfg, args)
if cfg['jasper']['encoder']['use_conv_masks'] == True:
print("[Jasper module]: Warning: setting 'use_conv_masks' \
to False; masked convolutions are not supported.")
cfg['jasper']['encoder']['use_conv_masks'] = False
symbols = add_ctc_blank(cfg['labels'])
model = Jasper(encoder_kw=config.encoder(cfg),
decoder_kw=config.decoder(cfg, n_classes=len(symbols)))
if args.ckpt is not None:
checkpoint = torch.load(args.ckpt, map_location="cpu")
key = 'ema_state_dict' if args.ema else 'state_dict'
state_dict = checkpoint[key]
model.load_state_dict(state_dict, strict=True)
return model
def init_decoder(args):
class GreedyCTCDecoderSimple(torch.nn.Module):
@torch.no_grad()
def forward(self, log_probs):
return log_probs.argmax(dim=-1, keepdim=False).int()
return GreedyCTCDecoderSimple()
def init_model(model_args_list, precision, device):
''' Return either of the components: feature-extractor, model, or decoder.
The returned compoenent is ready to convert '''
from inference import get_parser
parser = get_parser()
parser.add_argument('--component', type=str, default="model",
choices=["feature-extractor", "model", "decoder"],
help='Component to convert')
args = parser.parse_args(model_args_list)
init_comp = {"feature-extractor": init_feature_extractor,
"model": init_acoustic_model,
"decoder": init_decoder}
comp = init_comp[args.component](args)
torch_device = torch.device(device)
print(f"[Jasper module]: using device {torch_device}")
comp.to(torch_device)
comp.eval()
if precision == "fp16":
print("[Jasper module]: using mixed precision")
comp.half()
else:
print("[Jasper module]: using fp32 precision")
return comp
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/jasper_module.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import json
import torch
import argparse
import importlib
from pytorch.utils import extract_io_props, load_io_props
import logging
def get_parser():
parser = argparse.ArgumentParser()
# required args
parser.add_argument("--model-module", type=str, default="", required=True,
help="Module with model initializer and data loader")
parser.add_argument('--convert', choices=['ts-script', 'ts-trace',
'onnx', 'tensorrt'],
required=True, help='convert to '
'ts-script: TorchScript using torch.jit.script, '
'ts-trace: TorchScript using torch.jit.trace, '
'onnx: ONNX using torch.onnx.export, '
'tensorrt: TensorRT using OnnxParser, ')
parser.add_argument("--max_workspace_size", type=int,
default=512*1024*1024,
help="set the size of the workspace for TensorRT \
conversion")
parser.add_argument("--precision", choices=['fp16', 'fp32'],
default='fp32', help="convert TensorRT or \
TorchScript model in a given precision")
parser.add_argument('--convert-filename', type=str, default='',
help='Saved model name')
parser.add_argument('--save-dir', type=str, default='',
help='Saved model directory')
parser.add_argument("--max-batch-size", type=int, default=1,
help="Specifies the 'max_batch_size' in the Triton \
model config and in TensorRT builder. See the \
Triton and TensorRT documentation for more info.")
parser.add_argument('--device', type=str, default='cuda',
help='Select device for conversion.')
parser.add_argument('model_arguments', nargs=argparse.REMAINDER,
help='arguments that will be ignored by \
converter lib and will be forwarded to your convert \
script')
return parser
class Converter:
def __init__(self, model, dataloader):
self.model = model
self.dataloader = dataloader
self.convert_props = {
'ts-script': {
'convert_func': self.to_torchscript,
'convert_filename': 'model.pt'
},
'ts-trace': {
'convert_func' : self.to_torchscript,
'convert_filename': 'model.pt'
},
'onnx': {
'convert_func' : self.to_onnx,
'convert_filename': 'model.onnx'
},
'tensorrt': {
'convert_func' : self.to_tensorrt,
'convert_filename': 'model.plan'
}
}
def convert(self, convert_type, save_dir, convert_filename,
device, precision='fp32',
max_batch_size=1,
# args for TensorRT:
max_workspace_size=None):
''' convert the model '''
self.convert_type = convert_type
self.max_workspace_size = max_workspace_size
self.max_batch_size = max_batch_size
self.precision = precision
# override default name if user provided name
if convert_filename != '':
self.convert_props[convert_type]['convert_filename'] = convert_filename
# setup device
torch_device = torch.device(device)
# prepare model
self.model.to(torch_device)
self.model.eval()
assert (not self.model.training), \
"[Converter error]: could not set the model to eval() mode!"
io_props = None
if self.dataloader is not None:
io_props = extract_io_props(self.model, self.dataloader,
torch_device, precision, max_batch_size)
assert self.convert_type == "ts-script" or io_props is not None, \
"Input and output properties are empty. For conversion types \
other than \'ts-script\' input shapes are required to generate dummy input. \
Make sure that dataloader works correctly or that IO props file is provided."
# prepare save path
model_name = self.convert_props[convert_type]['convert_filename']
convert_model_path = os.path.join(save_dir, model_name)
# get convert method depending on the convert type
convert_func = self.convert_props[convert_type]['convert_func']
# convert the model - will be saved to disk
if self.convert_type == "tensorrt":
io_filepath = "triton/tensorrt_io_props_" + str(precision) + ".json"
io_props = load_io_props(io_filepath)
convert_func(model, torch_device, io_props, convert_model_path)
assert (os.path.isfile(convert_model_path)), \
f"[Converter error]: saving model to {convert_model_path} failed!"
def generate_dummy_input(self, io_props, device):
from pytorch.utils import triton_type_to_torch_type
dummy_input = []
for s,t in zip(io_props['opt_shapes'], io_props['input_types']):
t = triton_type_to_torch_type[t]
tensor = torch.empty(size=s, dtype=t, device=device).random_()
dummy_input.append(tensor)
dummy_input = tuple(dummy_input)
return dummy_input
def to_onnx(self, model, device, io_props, convert_model_path):
''' convert the model to onnx '''
dummy_input = self.generate_dummy_input(io_props, device)
opset_version = 11
# convert the model to onnx
with torch.no_grad():
torch.onnx.export(model, dummy_input,
convert_model_path,
do_constant_folding=True,
input_names=io_props['input_names'],
output_names=io_props['output_names'],
dynamic_axes=io_props['dynamic_axes'],
opset_version=opset_version,
enable_onnx_checker=True)
def to_tensorrt(self, model, device, io_props, convert_model_path):
''' convert the model to tensorrt '''
assert (self.max_workspace_size), "[Converter error]: for TensorRT conversion you must provide \'max_workspace_size\'."
import tensorrt as trt
from pytorch.utils import build_tensorrt_engine
# convert the model to onnx first
self.to_onnx(model, device, io_props, convert_model_path)
del model
torch.cuda.empty_cache()
zipped = zip(io_props['input_names'], io_props['min_shapes'],
io_props['opt_shapes'], io_props['max_shapes'])
shapes = []
for name,min_shape,opt_shape,max_shape in zipped:
d = {"name":name, "min": min_shape,
"opt": opt_shape, "max": max_shape}
shapes.append(d)
tensorrt_fp16 = True if self.precision == 'fp16' else False
# build tensorrt engine
engine = build_tensorrt_engine(convert_model_path, shapes,
self.max_workspace_size,
self.max_batch_size,
tensorrt_fp16)
assert engine is not None, "[Converter error]: TensorRT build failure"
# write tensorrt engine
with open(convert_model_path, 'wb') as f:
f.write(engine.serialize())
def to_torchscript(self, model, device, io_props, convert_model_path):
''' convert the model to torchscript '''
if self.convert_type == 'ts-script':
model_ts = torch.jit.script(model)
else: # self.convert_type == 'ts-trace'
dummy_input = self.generate_dummy_input(io_props, device)
with torch.no_grad():
model_ts = torch.jit.trace(model, dummy_input)
# save the model
torch.jit.save(model_ts, convert_model_path)
if __name__=='__main__':
parser = get_parser()
args = parser.parse_args()
model_args_list = args.model_arguments[1:]
logging.basicConfig(level=logging.INFO)
mm = importlib.import_module(args.model_module)
model = mm.init_model(model_args_list, args.precision, args.device)
dataloader = mm.get_dataloader(model_args_list)
converter = Converter(model, dataloader)
converter.convert(args.convert, args.save_dir, args.convert_filename,
args.device, args.precision,
args.max_batch_size,
args.max_workspace_size)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/converter.py |
#!/usr/bin/python
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import soundfile as sf
import math
from os import system
import numpy as np
import tritonclient.grpc, tritonclient.http
import tritonclient.grpc.model_config_pb2 as model_config
from tritonclient.utils import triton_to_np_dtype, np_to_triton_dtype
import grpc
import sys
import os
if "./triton" not in sys.path:
sys.path.append(os.path.join(sys.path[0], "../"))
from common.text import _clean_text
WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None}
triton_type_to_np_dtype = {
'TYPE_BOOL': np.bool,
'TYPE_INT8': np.int8,
'TYPE_INT16': np.int16,
'TYPE_INT32': np.int32,
'TYPE_INT64': np.int64,
'TYPE_UINT8': np.uint8,
'TYPE_FP16': np.float16,
'TYPE_FP32': np.float32,
'TYPE_FP64': np.float64
}
model_dtype_to_np_dtype = {
"BOOL": np.bool,
"INT8": np.int8,
"INT16": np.int16,
"INT32": np.int32,
"INT64": np.int64,
"UINT8": np.uint8,
"UINT16": np.uint16,
"FP16": np.float16,
"FP32": np.float32,
"FP64": np.float64,
"BYTES": np.dtype(object)
}
def load_transcript(transcript_path):
with open(transcript_path, 'r', encoding="utf-8") as transcript_file:
transcript = transcript_file.read().replace('\n', '')
return transcript
def parse_transcript(transcript, labels_map, blank_index):
chars = [labels_map.get(x, blank_index) for x in list(transcript)]
transcript = list(filter(lambda x: x != blank_index, chars))
return transcript
def normalize_string(s, labels, table, **unused_kwargs):
"""
Normalizes string. For example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
Args:
s: string to normalize
labels: labels used during model training.
Returns:
Normalized string
"""
def good_token(token, labels):
s = set(labels)
for t in token:
if not t in s:
return False
return True
try:
text = _clean_text(s, ["english_cleaners"], table).strip()
return ''.join([t for t in text if good_token(t, labels=labels)])
except:
print("WARNING: Normalizing {} failed".format(s))
return None
def ctc_decoder_predictions_tensor(prediction_cpu_tensor, batch_size, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = dict([(i, labels[i]) for i in range(len(labels))])
# iterate over batch
prediction_cpu_tensor = prediction_cpu_tensor.reshape((batch_size, int(prediction_cpu_tensor.size/batch_size)))
for ind in range(batch_size):
prediction = prediction_cpu_tensor[ind].tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
class SpeechClient(object):
def __init__(self, url, protocol, model_name, model_version, batch_size,
model_platform=None, verbose=False,
mode="batch",
from_features=True):
self.model_name = model_name
self.model_version = model_version
self.verbose = verbose
self.batch_size = batch_size
self.transpose_audio_features = False
self.grpc_stub = None
self.ctx = None
self.correlation_id = 0
self.first_run = True
if mode == "streaming" or mode == "asynchronous":
self.correlation_id = 1
self.buffer = []
if protocol == "grpc":
# Create gRPC client for communicating with the server
self.prtcl_client = tritonclient.grpc
else:
# Create HTTP client for communicating with the server
self.prtcl_client = tritonclient.http
self.triton_client = self.prtcl_client.InferenceServerClient(
url=url, verbose=self.verbose)
self.audio_signals_name, self.num_samples_name, self.transcripts_name, \
self.audio_signals_type, self.num_samples_type, self.transcripts_type = self.parse_model(# server_status,
model_name,
batch_size, model_platform, verbose)
self.labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"]
def postprocess(self, transcript_values, labels):
res = []
for transcript, filename in zip(transcript_values,
labels):
print('---')
print('File: ', filename)
t=ctc_decoder_predictions_tensor(transcript, self.batch_size, self.labels)
print("Final transcript: ", t)
print('---')
res.append(t)
return res
def check_num_samples(self, num_samples, model_name):
if num_samples['data_type'] != 'TYPE_UINT32' and num_samples['data_type'] != 'TYPE_INT32':
raise Exception(
"expecting num_samples datatype to be TYPE_UINT32/TYPE_INT32, "
"model '" + model_name + "' output type is " +
model_config.DataType.Name(num_samples['data_type']))
if len(num_samples['dims']) != 1:
raise Exception("Expecting num_samples to have 1 dimension, "
"model '{}' num_samples has {}".format(
model_name,len(num_samples['dims'])))
def parse_model(self, # server_status,
model_name, batch_size,
model_platform=None, verbose=False):
"""
Check the configuration of the ensemble model
"""
if self.prtcl_client is tritonclient.grpc:
config = self.triton_client.get_model_config(model_name, as_json=True)
else:
config = self.triton_client.get_model_config(model_name)
self.model_platform = model_platform
# Inputs are:
# 1) audio_signal: raw audio samples [num_samples]
# 2) sample_rate: sample rate of audio
# 3) num_samples: length of audio
if len(config['input']) < 2:
raise Exception(
"expecting 2-3 inputs, got {}".format(len(config['input'])))
# Outputs are:
# 1) transcripts: candidate transcripts
if len(config['output']) != 1:
raise Exception(
"expecting 1 output, got {}".format(len(config['output'])))
audio_signal = config['input'][0]
if len(config['input']) > 1:
num_samples = config['input'][1]
self.check_num_samples(num_samples, model_name);
transcripts = config['output'][0]
expected_audio_signal_dim = 1
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config['max_batch_size']
if max_batch_size == 0:
if batch_size != 1:
raise Exception(
"batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception(
"expecting batch size <= {} for model {}".format(
max_batch_size, model_name))
if len(audio_signal['dims']) != expected_audio_signal_dim:
raise Exception("Expecting audio signal to have {} dimensions, "
"model '{}' audio_signal has {}".format(
expected_audio_signal_dim,
model_name,
len(audio_signal.dims)))
return (audio_signal['name'],
num_samples['name'],
transcripts['name'],
triton_type_to_np_dtype[audio_signal['data_type']],
triton_type_to_np_dtype[num_samples['data_type']],
triton_type_to_np_dtype[transcripts['data_type']])
def recognize(self, audio_signal, filenames):
# Send requests of FLAGS.batch_size audio signals. If the number of
# audios isn't an exact multiple of FLAGS.batch_size then just
# start over with the first audio until the batch is filled.
input_batch = []
input_filenames = []
max_num_samples_batch = 0
for idx in range(self.batch_size):
input_batch.append(audio_signal[idx].astype(
self.audio_signals_type))
input_filenames.append(filenames[idx])
num_samples = audio_signal[idx].shape[0]
if (num_samples > max_num_samples_batch):
max_num_samples_batch = num_samples
for idx in range(self.batch_size):
num_samples = input_batch[idx].shape[0]
mean = np.mean(input_batch[idx])
std_var = np.std(input_batch[idx])
gauss_noise = np.random.normal(
mean,std_var,
max_num_samples_batch-num_samples)
input_batch[idx]= np.concatenate(
(input_batch[idx], gauss_noise.astype(
self.audio_signals_type)))
max_num_samples_batch = np.asarray([max_num_samples_batch],
dtype=self.num_samples_type)
num_samples_batch = [max_num_samples_batch]*self.batch_size
# Send request
print("Sending request to transcribe file(s):", ",".join(
input_filenames))
inputs = []
input_batch = np.asarray(input_batch)
num_samples_batch = np.asarray(num_samples_batch)
inputs.append(self.prtcl_client.InferInput(self.audio_signals_name,
input_batch.shape,
np_to_triton_dtype(input_batch.dtype)))
inputs.append(self.prtcl_client.InferInput(self.num_samples_name,
num_samples_batch.shape,
np_to_triton_dtype(num_samples_batch.dtype)))
if self.prtcl_client is tritonclient.grpc:
inputs[0].set_data_from_numpy(input_batch)
inputs[1].set_data_from_numpy(num_samples_batch)
else: # http
inputs[0].set_data_from_numpy(input_batch, binary_data=True)
inputs[1].set_data_from_numpy(num_samples_batch, binary_data=True)
outputs = []
if self.prtcl_client is tritonclient.grpc:
outputs.append(self.prtcl_client.InferRequestedOutput(self.transcripts_name))
else:
outputs.append(self.prtcl_client.InferRequestedOutput(self.transcripts_name,
binary_data=True))
triton_result = self.triton_client.infer(self.model_name, inputs=inputs,
outputs=outputs)
transcripts = triton_result.as_numpy(self.transcripts_name)
result = self.postprocess(transcripts, input_filenames)
return result
def preemphasis(signal, coeff=0.97):
return np.append(signal[0], signal[1:] - coeff*signal[:-1])
def normalize_signal(signal, gain=None):
"""
Normalize float32 signal to [-1, 1] range
"""
if gain is None:
gain = 1.0/(np.max(np.abs(signal)) + 1e-5)
return signal*gain
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=16000, trim=False,
trim_db=60):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@classmethod
def from_file(cls, filename, target_sr=16000, int_values=False, offset=0,
duration=0, trim=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr, trim=trim)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
# define our clear function
def clear_screen():
_ = system('clear')
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/speech_utils.py |
#!/usr/bin/python
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import argparse
import numpy as np
import os
from speech_utils import AudioSegment, SpeechClient
import soundfile
import pyaudio as pa
import threading
import math
import time
import glob
FLAGS = None
# read audio chunk from a file
def get_audio_chunk_from_soundfile(sf, chunk_size, int_values):
dtype = 'int32' if int_values else 'float32'
audio_signal = sf.read(chunk_size, dtype=dtype)
end = False
# pad to chunk size
if len(audio_signal) < chunk_size:
end = True
audio_signal = np.pad(audio_signal, (0, chunk_size-len(
audio_signal)), mode='constant')
return audio_signal, end
# generator that returns chunks of audio data from file
def audio_generator_from_file(input_filename, target_sr, int_values,
chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
while not end:
audio_signal, end = get_audio_chunk_from_soundfile(
sf, chunk_size, int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
yield audio_segment.samples, target_sr, start, end
start = False
sf.close()
# generator that returns chunks of audio data from file
class AudioGeneratorFromMicrophone:
def __init__(self,input_device_id, target_sr, chunk_duration):
self.recording_state = "init"
self.target_sr = target_sr
self.chunk_duration = chunk_duration
self.p = pa.PyAudio()
device_info = self.p.get_host_api_info_by_index(0)
num_devices = device_info.get('deviceCount')
devices = {}
for i in range(0, num_devices):
if (self.p.get_device_info_by_host_api_device_index(0, i).get(
'maxInputChannels')) > 0:
devices[i] = self.p.get_device_info_by_host_api_device_index(
0, i)
if (len(devices) == 0):
raise RuntimeError("Cannot find any valid input devices")
if input_device_id is None or input_device_id not in \
devices.keys():
print("\nInput Devices:")
for id, info in devices.items():
print("{}: {}".format(id,info.get("name")))
input_device_id = int(input("Enter device id to use: "))
self.input_device_id = input_device_id
def generate_audio(self):
chunk_size = int(self.chunk_duration*self.target_sr)
self. recording_state = "init"
def keyboard_listener():
input("Press Enter to start and end recording...")
self.recording_state = "capture"
print("Recording...")
input("")
self.recording_state = "release"
listener = threading.Thread(target=keyboard_listener)
listener.start()
start = True
end = False
stream_initialized = False
step = 0
while self.recording_state != "release":
try:
if self.recording_state == "capture":
if not stream_initialized:
stream = self.p.open(
format=pa.paInt16,
channels=1,
rate=self.target_sr,
input=True,
input_device_index=self.input_device_id,
frames_per_buffer=chunk_size)
stream_initialized = True
# Read audio chunk from microphone
audio_signal = stream.read(chunk_size)
audio_signal = np.frombuffer(audio_signal,dtype=np.int16)
audio_segment = AudioSegment(audio_signal,
self.target_sr,
self.target_sr)
yield audio_segment.samples, self.target_sr, start, end
start = False
step += 1
except Exception as e:
print(e)
break
stream.close()
self.p.terminate()
def generate_audio_signal(self):
#chunk_size = int(self.chunk_duration*self.target_sr)
chunk_size = int(0.2*self.target_sr)
self. recording_state = "init"
def keyboard_listener():
input("Press Enter to start and end recording...")
self.recording_state = "capture"
print("Recording...")
input("")
self.recording_state = "release"
listener = threading.Thread(target=keyboard_listener)
listener.start()
audio_samples = []
stream_initialized = False
step = 0
while self.recording_state != "release":
try:
if self.recording_state == "capture":
if not stream_initialized:
stream = self.p.open(
format=pa.paInt16,
channels=1,
rate=self.target_sr,
input=True,
input_device_index=self.input_device_id,
frames_per_buffer=chunk_size)
stream_initialized = True
# Read audio chunk from microphone
audio_signal = stream.read(chunk_size)
audio_signal = np.frombuffer(audio_signal,dtype=np.int16)
audio_segment = AudioSegment(audio_signal,
self.target_sr,
self.target_sr)
if step == 0:
audio_samples = audio_segment.samples
else:
audio_samples = np.concatenate((audio_samples,
audio_segment.samples))
start = False
step += 1
except Exception as e:
print(e)
break
stream.close()
self.p.terminate()
return audio_samples
# generator that returns chunks of audio features from file
def audio_features_generator(input_filename, speech_features_params,
target_sr, int_values, chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
while not end:
audio_signal, end = get_audio_chunk_from_soundfile(sf, chunk_size,
int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
audio_features, features_length = get_speech_features(
audio_segment.samples, target_sr, speech_features_params)
yield audio_features, start, end
start = False
sf.close()
def audio_features_generator_with_buffer(input_filename,
speech_features_params, target_sr,
int_values, chunk_duration):
sf = soundfile.SoundFile(input_filename, 'rb')
chunk_size = int(chunk_duration*sf.samplerate)
start = True
end = False
audio_signal = np.zeros(shape=3*chunk_size, dtype=np.float32)
while not end:
audio_signal[-chunk_size:], end = get_audio_chunk_from_soundfile(sf, chunk_size, int_values)
audio_segment = AudioSegment(audio_signal, sf.samplerate, target_sr)
audio_features, features_length = get_speech_features(
audio_segment.samples, target_sr, speech_features_params)
yield audio_features, start, end
start = False
audio_signal[:-chunk_size] = audio_signal[chunk_size:]
sf.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action="store_true", required=False,
default=False, help='Enable verbose output')
parser.add_argument('--fixed_size', type=int, required=False,
default=0,
help="send fixed_size requests, pad or truncate")
parser.add_argument('--batch_size', type=int, required=False, default=1,
help='batch size')
parser.add_argument('--model_platform', required=False,
default='trt',
help='Jasper model platform')
parser.add_argument('-u', '--url', type=str, required=False,
default='localhost:8000',
help='Inference server URL. Default is '
'localhost:8000.')
parser.add_argument('-i', '--protocol', type=str, required=False,
default='HTTP',
help='Protocol (HTTP/gRPC) used to communicate with '
'inference service. Default is HTTP.')
parser.add_argument('--audio_filename', type=str, required=False,
default=None,
help='Input audio filename')
parser.add_argument('--data_dir', type=str, required=False,
default=None,
help='data directory')
parser.add_argument('--manifest_filename', type=str, required=False,
default=None,
help='relative manifest paths to --data_dir directory.')
FLAGS = parser.parse_args()
protocol = FLAGS.protocol.lower()
valid_model_platforms = {"ts-trace","onnx", "tensorrt"}
if FLAGS.model_platform not in valid_model_platforms:
raise ValueError("Invalid model_platform {}. Valid choices are {"
"}".format(FLAGS.model_platform,
valid_model_platforms))
model_name = "jasper-" + FLAGS.model_platform + "-ensemble"
speech_client = SpeechClient(
FLAGS.url, protocol, model_name, 1,
FLAGS.batch_size, model_platform=FLAGS.model_platform,
verbose=FLAGS.verbose, mode="synchronous",
from_features=False
)
filenames = []
transcripts = []
if FLAGS.audio_filename is not None:
audio_file = os.path.join(FLAGS.data_dir, FLAGS.audio_filename)
if os.path.isdir(audio_file):
filenames = glob.glob(os.path.join(os.path.abspath(audio_file), "**", "*.wav"),
recursive=True)
else:
filenames = [audio_file]
elif FLAGS.manifest_filename is not None:
filter_speed=1.0
data_dir=FLAGS.data_dir
labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"]
labels_map = dict([(labels[i], i) for i in range(len(labels))])
blank_index = len(labels)-1
table = None
import string
punctuation = string.punctuation
punctuation = punctuation.replace("+", "")
punctuation = punctuation.replace("&", "")
table = str.maketrans(punctuation, " " * len(punctuation))
import json
if "./triton" not in sys.path:
sys.path.append("./")
sys.path.append("./triton")
from speech_utils import normalize_string, parse_transcript
FLAGS.manifest_filename = FLAGS.manifest_filename.split(',')
for manifest in FLAGS.manifest_filename:
manifest=os.path.join(data_dir, manifest)
print(manifest)
with open(manifest, "r", encoding="utf-8") as fh:
a=json.load(fh)
for data in a:
files_and_speeds = data['files']
audio_path = [x['fname'] for x in files_and_speeds if x['speed'] == filter_speed][0]
filenames.append(os.path.join(data_dir, audio_path))
transcript_text = data['transcript']
transcript_text = normalize_string(transcript_text, labels=labels, table=table)
transcripts.append(transcript_text) #parse_transcript(transcript_text, labels_map, blank_index)) # convert to vocab indices
# Read the audio files
# Group requests in batches
audio_idx = 0
last_request = False
predictions = []
while not last_request:
batch_audio_samples = []
batch_filenames = []
for idx in range(FLAGS.batch_size):
filename = filenames[audio_idx]
print("Reading audio file: ", filename)
audio = AudioSegment.from_file(
filename,
offset=0, duration=FLAGS.fixed_size).samples
if FLAGS.fixed_size:
audio = np.resize(audio, FLAGS.fixed_size)
audio_idx = (audio_idx + 1) % len(filenames)
if audio_idx == 0:
last_request = True
batch_audio_samples.append(audio)
batch_filenames.append(filename)
predictions += speech_client.recognize(
batch_audio_samples,
batch_filenames)
if transcripts:
predictions = [x for l in predictions for x in l ]
from metrics import word_error_rate
wer, scores, num_words = word_error_rate(predictions, transcripts)
print(wer)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/jasper-client.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/pytorch/__init__.py |
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import tensorrt as trt
import torch
from collections import Counter
import json
import logging
triton_type_to_torch_type = {
'TYPE_BOOL': torch.bool,
'TYPE_INT8': torch.int8,
'TYPE_INT16': torch.int16,
'TYPE_INT32': torch.int32,
'TYPE_INT64': torch.int64,
'TYPE_UINT8': torch.uint8,
'TYPE_FP16': torch.float16,
'TYPE_FP32': torch.float32,
'TYPE_FP64': torch.float64
}
torch_type_to_triton_type = {
torch.bool: 'TYPE_BOOL',
torch.int8: 'TYPE_INT8',
torch.int16: 'TYPE_INT16',
torch.int32: 'TYPE_INT32',
torch.int64: 'TYPE_INT64',
torch.uint8: 'TYPE_UINT8',
torch.float16: 'TYPE_FP16',
torch.float32: 'TYPE_FP32',
torch.float64: 'TYPE_FP64'
}
def build_tensorrt_engine(model_file, shapes, max_workspace_size,
max_batch_size, fp16_mode):
''' takes a path to an onnx file, and shape information, returns a tensorrt engine
:: model_file :: path to an onnx model
:: shapes :: dictionary containing min shape, max shape, opt shape for the tensorrt engine
'''
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
#
config = builder.create_builder_config()
config.max_workspace_size = max_workspace_size
if fp16_mode:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
for s in shapes:
profile.set_shape(s['name'], min=s['min'], opt=s['opt'], max=s['max'])
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
#
with trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, 'rb') as model:
parser.parse(model.read())
for i in range(parser.num_errors):
print("[Converter error]: OnnxParser:", parser.get_error(i))
engine = builder.build_engine(network, config=config)
return engine
def get_inputs(dataloader, device, precision):
''' load sample inputs to device '''
inputs = []
logging.info("Loading sample inputs to device.")
for idx, batch in enumerate(dataloader):
if idx % (len(dataloader)//100) == 0:
logging.info(f"{idx}/{len(dataloader)}")
if type(batch) is torch.Tensor:
batch_d = batch.to(device)
if batch_d.is_floating_point() and precision == 'fp16':
batch_d = batch_d.to(torch.float16)
batch_d = (batch_d,)
inputs.append(batch_d)
else:
batch_d = []
for x in batch:
assert type(x) is torch.Tensor, "input is not a tensor"
x = x.to(device)
if x.is_floating_point() and precision == 'fp16':
x = x.to(torch.float16)
batch_d.append(x)
batch_d = tuple(batch_d)
inputs.append(batch_d)
logging.info("Finished loading sample inputs to device.")
return inputs
def get_list_of_shapes(l, fun):
''' returns the list of min/max shapes, depending on fun
:: l :: list of tuples of tensors
:: fun :: min or max
'''
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
assert len(tensor_tuple) == len(shapes), "tensors with varying shape lengths are not supported"
for i,x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
shapes[i][j] = fun(shapes[i][j], x.shape[j])
return shapes # a list of shapes
def get_min_shapes(l):
''' returns the tuple of min shapes
:: l :: list of tuples of tensors '''
shapes = get_list_of_shapes(l, min)
min_batch = 1
shapes = [[min_batch,*shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of min shapes
def get_max_shapes(l):
''' returns the tuple of max shapes
:: l :: list of tuples of tensors '''
shapes = get_list_of_shapes(l, max)
max_batch = max(1,shapes[0][0])
shapes = [[max_batch,*shape[1:]] for shape in shapes]
shapes = tuple(shapes)
return shapes # tuple of max shapes
def get_opt_shapes(l):
''' returns the tuple of opt shapes
:: l :: list of tuples of tensors '''
counter = Counter()
for tensor_tuple in l:
shapes = [tuple(x.shape) for x in tensor_tuple]
shapes = tuple(shapes)
counter[shapes] += 1
shapes = counter.most_common(1)[0][0]
return shapes # tuple of most common occuring shapes
def get_shapes(l, max_batch_size):
''' returns a tuple of dynamic shapes: variable tensor dimensions
(for ex. batch size) occur as -1 in the tuple
:: l :: list of tuples of tensors '''
tensor_tuple = l[0]
shapes = [list(x.shape) for x in tensor_tuple]
for tensor_tuple in l:
err_msg = "tensors with varying shape lengths are not supported"
assert len(tensor_tuple) == len(shapes), err_msg
for i,x in enumerate(tensor_tuple):
for j in range(len(x.shape)):
if shapes[i][j] != x.shape[j] or j == 0 and max_batch_size > 1:
shapes[i][j] = -1
shapes = tuple(shapes)
return shapes # tuple of dynamic shapes
def get_io_properties(inputs, outputs, max_batch_size):
# generate input shapes - dynamic tensor shape support
input_shapes = get_shapes(inputs, max_batch_size)
# generate output shapes - dynamic tensor shape support
output_shapes = get_shapes(outputs, max_batch_size)
# generate input types
input_types = [torch_type_to_triton_type[x.dtype] for x in inputs[0]]
# generate output types
output_types = [torch_type_to_triton_type[x.dtype] for x in outputs[0]]
# get input names
rng = range(len(input_types))
input_names = ["input__" + str(num) for num in rng]
# get output names
rng = range(len(output_types))
output_names = ["output__" + str(num) for num in rng]
# get indices of dynamic input and output shapes
dynamic_axes = {}
for input_name,input_shape in zip(input_names,input_shapes):
dynamic_axes[input_name] = [i for i,x in enumerate(input_shape) if x == -1]
for output_name,output_shape in zip(output_names,output_shapes):
dynamic_axes[output_name] = [i for i,x in enumerate(output_shape) if x == -1]
# min, opt, max shapes for TensorRT
min_shapes = get_min_shapes(inputs)
opt_shapes = get_opt_shapes(inputs)
max_shapes = get_max_shapes(inputs)
res = {"input_shapes": input_shapes,
"output_shapes": output_shapes,
"input_types": input_types,
"output_types": output_types,
"input_names": input_names,
"output_names": output_names,
"dynamic_axes": dynamic_axes,
"min_shapes": min_shapes,
"opt_shapes": opt_shapes,
"max_shapes": max_shapes}
return res
def extract_io_props(model, dataloader, device, precision, max_batch_size):
# prepare inputs
inputs = get_inputs(dataloader, device, precision)
# generate outputs
outputs = []
for input in inputs:
with torch.no_grad():
output = model(*input)
if type(output) is torch.Tensor:
output = [output]
outputs.append(output)
# prepare input/output properties
io_props = get_io_properties(inputs, outputs, max_batch_size)
return io_props
def save_io_props(io_props, io_props_path):
with open(io_props_path, "w") as f:
f.write(json.dumps(io_props))
def load_io_props(io_props_path):
with open(io_props_path, "r") as f:
data = json.loads(f.read())
if "dynamic_axes" not in data.keys():
return data
return data
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/triton/pytorch/utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import argparse
import os
import glob
import multiprocessing
import json
import pandas as pd
from preprocessing_utils import parallel_preprocess
parser = argparse.ArgumentParser(description='Preprocess LibriSpeech.')
parser.add_argument('--input_dir', type=str, required=True,
help='LibriSpeech collection input dir')
parser.add_argument('--dest_dir', type=str, required=True,
help='Output dir')
parser.add_argument('--output_json', type=str, default='./',
help='name of the output json file.')
parser.add_argument('-s','--speed', type=float, nargs='*',
help='Speed perturbation ratio')
parser.add_argument('--target_sr', type=int, default=None,
help='Target sample rate. '
'defaults to the input sample rate')
parser.add_argument('--overwrite', action='store_true',
help='Overwrite file if exists')
parser.add_argument('--parallel', type=int, default=multiprocessing.cpu_count(),
help='Number of threads to use when processing audio files')
args = parser.parse_args()
args.input_dir = args.input_dir.rstrip('/')
args.dest_dir = args.dest_dir.rstrip('/')
def build_input_arr(input_dir):
txt_files = glob.glob(os.path.join(input_dir, '**', '*.trans.txt'),
recursive=True)
input_data = []
for txt_file in txt_files:
rel_path = os.path.relpath(txt_file, input_dir)
with open(txt_file) as fp:
for line in fp:
fname, _, transcript = line.partition(' ')
input_data.append(dict(input_relpath=os.path.dirname(rel_path),
input_fname=fname+'.flac',
transcript=transcript))
return input_data
print("[%s] Scaning input dir..." % args.output_json)
dataset = build_input_arr(input_dir=args.input_dir)
print("[%s] Converting audio files..." % args.output_json)
dataset = parallel_preprocess(dataset=dataset,
input_dir=args.input_dir,
dest_dir=args.dest_dir,
target_sr=args.target_sr,
speed=args.speed,
overwrite=args.overwrite,
parallel=args.parallel)
print("[%s] Generating json..." % args.output_json)
df = pd.DataFrame(dataset, dtype=object)
# Save json with python. df.to_json() produces back slashed in file paths
dataset = df.to_dict(orient='records')
with open(args.output_json, 'w') as fp:
json.dump(dataset, fp, indent=2)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/utils/convert_librispeech.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import argparse
import pandas as pd
from download_utils import download_file, md5_checksum, extract
parser = argparse.ArgumentParser(description='Download, verify and extract dataset files')
parser.add_argument('csv', type=str,
help='CSV file with urls and checksums to download.')
parser.add_argument('dest', type=str,
help='Download destnation folder.')
parser.add_argument('-e', type=str, default=None,
help='Extraction destnation folder. Defaults to download folder if not provided')
parser.add_argument('--skip_download', action='store_true',
help='Skip downloading the files')
parser.add_argument('--skip_checksum', action='store_true',
help='Skip checksum')
parser.add_argument('--skip_extract', action='store_true',
help='Skip extracting files')
args = parser.parse_args()
args.e = args.e or args.dest
df = pd.read_csv(args.csv, delimiter=',')
if not args.skip_download:
for url in df.url:
fname = url.split('/')[-1]
print("Downloading %s:" % fname)
download_file(url=url, dest_folder=args.dest, fname=fname)
else:
print("Skipping file download")
if not args.skip_checksum:
for index, row in df.iterrows():
url = row['url']
md5 = row['md5']
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Verifing %s: " % fname, end='')
ret = md5_checksum(fpath=fpath, target_hash=md5)
print("Passed" if ret else "Failed")
else:
print("Skipping checksum")
if not args.skip_extract:
for url in df.url:
fname = url.split('/')[-1]
fpath = os.path.join(args.dest, fname)
print("Decompressing %s:" % fpath)
extract(fpath=fpath, dest_folder=args.e)
else:
print("Skipping file extraction")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/utils/download_librispeech.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/utils/__init__.py |
|
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import os
import multiprocessing
import functools
import sox
from tqdm import tqdm
def preprocess(data, input_dir, dest_dir, target_sr=None, speed=None,
overwrite=True):
speed = speed or []
speed.append(1)
speed = list(set(speed)) # Make uniqe
input_fname = os.path.join(input_dir,
data['input_relpath'],
data['input_fname'])
input_sr = sox.file_info.sample_rate(input_fname)
target_sr = target_sr or input_sr
os.makedirs(os.path.join(dest_dir, data['input_relpath']), exist_ok=True)
output_dict = {}
output_dict['transcript'] = data['transcript'].lower().strip()
output_dict['files'] = []
fname = os.path.splitext(data['input_fname'])[0]
for s in speed:
output_fname = fname + '{}.wav'.format('' if s==1 else '-{}'.format(s))
output_fpath = os.path.join(dest_dir,
data['input_relpath'],
output_fname)
if not os.path.exists(output_fpath) or overwrite:
cbn = sox.Transformer().speed(factor=s).convert(target_sr)
cbn.build(input_fname, output_fpath)
file_info = sox.file_info.info(output_fpath)
file_info['fname'] = os.path.join(os.path.basename(dest_dir),
data['input_relpath'],
output_fname)
file_info['speed'] = s
output_dict['files'].append(file_info)
if s == 1:
file_info = sox.file_info.info(output_fpath)
output_dict['original_duration'] = file_info['duration']
output_dict['original_num_samples'] = file_info['num_samples']
return output_dict
def parallel_preprocess(dataset, input_dir, dest_dir, target_sr, speed, overwrite, parallel):
with multiprocessing.Pool(parallel) as p:
func = functools.partial(preprocess,
input_dir=input_dir, dest_dir=dest_dir,
target_sr=target_sr, speed=speed, overwrite=overwrite)
dataset = list(tqdm(p.imap(func, dataset), total=len(dataset)))
return dataset
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/utils/preprocessing_utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env python
import hashlib
import requests
import os
import tarfile
import tqdm
def download_file(url, dest_folder, fname, overwrite=False):
fpath = os.path.join(dest_folder, fname)
if os.path.isfile(fpath):
if overwrite:
print("Overwriting existing file")
else:
print("File exists, skipping download.")
return
tmp_fpath = fpath + '.tmp'
if not os.path.exists(os.path.dirname(tmp_fpath)):
os.makedirs(os.path.dirname(tmp_fpath))
r = requests.get(url, stream=True)
file_size = int(r.headers['Content-Length'])
chunk_size = 1024 * 1024 # 1MB
total_chunks = int(file_size / chunk_size)
with open(tmp_fpath, 'wb') as fp:
content_iterator = r.iter_content(chunk_size=chunk_size)
chunks = tqdm.tqdm(content_iterator, total=total_chunks,
unit='MB', desc=fpath, leave=True)
for chunk in chunks:
fp.write(chunk)
os.rename(tmp_fpath, fpath)
def md5_checksum(fpath, target_hash):
file_hash = hashlib.md5()
with open(fpath, "rb") as fp:
for chunk in iter(lambda: fp.read(1024*1024), b""):
file_hash.update(chunk)
return file_hash.hexdigest() == target_hash
def extract(fpath, dest_folder):
if fpath.endswith('.tar.gz'):
mode = 'r:gz'
elif fpath.endswith('.tar'):
mode = 'r:'
else:
raise IOError('fpath has unknown extention: %s' % fpath)
with tarfile.open(fpath, mode) as tar:
members = tar.getmembers()
for member in tqdm.tqdm(iterable=members, total=len(members), leave=True):
tar.extract(path=dest_folder, member=member)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/utils/download_utils.py |
import atexit
import glob
import os
import re
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import dllogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=os.path.join(log_dir, name),
flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('aaa', 'zzz'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>4}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
format = f'{{{metadata.get("format", "")}}}'
fields = [name, format.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def init_log(args):
enabled = (args.local_rank == 0)
if enabled:
fpath = args.log_file or os.path.join(args.output_dir, 'nvlog.json')
backends = [
JSONStreamBackend(Verbosity.DEFAULT, fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
]
else:
backends = []
dllogger.init(backends=backends)
dllogger.metadata("train_lrate", {"name": "lrate", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('dev', ' avg dev '), ('dev_ema', ' EMA dev ')]:
dllogger.metadata(f"{id_}_loss",
{"name": f"{pref}loss", "unit": None, "format": ":>7.2f"})
dllogger.metadata(f"{id_}_wer",
{"name": f"{pref}wer", "unit": "%", "format": ":>6.2f"})
dllogger.metadata(f"{id_}_throughput",
{"name": f"{pref}utts/s", "unit": "samples/s", "format": ":>5.0f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>5.2f"})
tb_subsets = ['train', 'dev', 'dev_ema'] if args.ema else ['train', 'dev']
global tb_loggers
tb_loggers = {s: TBLogger(enabled, args.output_dir, name=s)
for s in tb_subsets}
log_parameters(vars(args), tb_subset='train')
def log(step, tb_total_steps=None, subset='train', data={}):
if tb_total_steps is not None:
tb_loggers[subset].log(tb_total_steps, data)
if subset != '':
data = {f'{subset}_{key}': v for key,v in data.items()}
dllogger.log(step, data=data)
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def log_parameters(data, verbosity=0, tb_subset=None):
for k,v in data.items():
dllogger.log(step="PARAMETER", data={k:v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k:v for k,v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush_log():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/tb_dllogger.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def __levenshtein(a, b):
"""Calculates the Levenshtein distance between two sequences."""
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a, b = b, a
n, m = m, n
current = list(range(n + 1))
for i in range(1, m + 1):
previous, current = current, [i] + [0] * n
for j in range(1, n + 1):
add, delete = previous[j] + 1, current[j - 1] + 1
change = previous[j - 1]
if a[j - 1] != b[i - 1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
def word_error_rate(hypotheses, references):
"""Computes average Word Error Rate (WER) between two text lists."""
scores = 0
words = 0
len_diff = len(references) - len(hypotheses)
if len_diff > 0:
raise ValueError("Uneqal number of hypthoses and references: "
"{0} and {1}".format(len(hypotheses), len(references)))
elif len_diff < 0:
hypotheses = hypotheses[:len_diff]
for h, r in zip(hypotheses, references):
h_list = h.split()
r_list = r.split()
words += len(r_list)
scores += __levenshtein(h_list, r_list)
if words!=0:
wer = 1.0*scores/words
else:
wer = float('inf')
return wer, scores, words
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/metrics.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutes known and unrelated PyTorch warnings.
The warnings module keeps a list of filters. Importing it as late as possible
prevents its filters from being overriden.
"""
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# 22.08-py3 container
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/filter_warnings.py |
DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/__init__.py |
|
import math
import random
import librosa
import torch
import torch.nn as nn
class BaseFeatures(nn.Module):
"""Base class for GPU accelerated audio preprocessing."""
__constants__ = ["pad_align", "pad_to_max_duration", "max_len"]
def __init__(self, pad_align, pad_to_max_duration, max_duration,
sample_rate, window_size, window_stride, spec_augment=None,
cutout_augment=None):
super(BaseFeatures, self).__init__()
self.pad_align = pad_align
self.pad_to_max_duration = pad_to_max_duration
self.win_length = int(sample_rate * window_size) # frame size
self.hop_length = int(sample_rate * window_stride)
# Calculate maximum sequence length (# frames)
if pad_to_max_duration:
self.max_len = 1 + math.ceil(
(max_duration * sample_rate - self.win_length) / self.hop_length
)
if spec_augment is not None:
self.spec_augment = SpecAugment(**spec_augment)
else:
self.spec_augment = None
if cutout_augment is not None:
self.cutout_augment = CutoutAugment(**cutout_augment)
else:
self.cutout_augment = None
@torch.no_grad()
def calculate_features(self, audio, audio_lens):
return audio, audio_lens
def __call__(self, audio, audio_lens):
dtype = audio.dtype
audio = audio.float()
feat, feat_lens = self.calculate_features(audio, audio_lens)
feat = self.apply_padding(feat)
if self.cutout_augment is not None:
feat = self.cutout_augment(feat)
if self.spec_augment is not None:
feat = self.spec_augment(feat)
feat = feat.to(dtype)
return feat, feat_lens
def apply_padding(self, x):
if self.pad_to_max_duration:
x_size = max(x.size(-1), self.max_len)
else:
x_size = x.size(-1)
if self.pad_align > 0:
pad_amt = x_size % self.pad_align
else:
pad_amt = 0
padded_len = x_size + (self.pad_align - pad_amt if pad_amt > 0 else 0)
return nn.functional.pad(x, (0, padded_len - x.size(-1)))
class SpecAugment(nn.Module):
"""Spec augment. refer to https://arxiv.org/abs/1904.08779
"""
def __init__(self, freq_masks=0, min_freq=0, max_freq=10, time_masks=0,
min_time=0, max_time=10):
super(SpecAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.freq_masks = freq_masks
self.min_freq = min_freq
self.max_freq = max_freq
self.time_masks = time_masks
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for _ in range(self.freq_masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
f0 = torch.randint(0, max(1, sh[1] - w), size=(1,))
mask[idx, f0:f0+w] = 1
for _ in range(self.time_masks):
w = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
t0 = torch.randint(0, max(1, sh[2] - w), size=(1,))
mask[idx, :, t0:t0+w] = 1
return x.masked_fill(mask, 0)
class CutoutAugment(nn.Module):
"""Cutout. refer to https://arxiv.org/pdf/1708.04552.pdf
"""
def __init__(self, masks=0, min_freq=20, max_freq=20, min_time=5, max_time=5):
super(CutoutAugment, self).__init__()
assert 0 <= min_freq <= max_freq
assert 0 <= min_time <= max_time
self.masks = masks
self.min_freq = min_freq
self.max_freq = max_freq
self.min_time = min_time
self.max_time = max_time
@torch.no_grad()
def forward(self, x):
sh = x.shape
mask = torch.zeros(x.shape, dtype=torch.bool, device=x.device)
for idx in range(sh[0]):
for i in range(self.masks):
w = torch.randint(self.min_freq, self.max_freq + 1, size=(1,)).item()
h = torch.randint(self.min_time, self.max_time + 1, size=(1,)).item()
f0 = int(random.uniform(0, sh[1] - w))
t0 = int(random.uniform(0, sh[2] - h))
mask[idx, f0:f0+w, t0:t0+h] = 1
return x.masked_fill(mask, 0)
@torch.jit.script
def normalize_batch(x, seq_len, normalize_type: str):
# print ("normalize_batch: x, seq_len, shapes: ", x.shape, seq_len, seq_len.shape)
if normalize_type == "per_feature":
x_mean = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
x_std = torch.zeros((seq_len.shape[0], x.shape[1]), dtype=x.dtype,
device=x.device)
for i in range(x.shape[0]):
x_mean[i, :] = x[i, :, :seq_len[i]].mean(dim=1)
x_std[i, :] = x[i, :, :seq_len[i]].std(dim=1)
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.unsqueeze(2)) / x_std.unsqueeze(2)
elif normalize_type == "all_features":
x_mean = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
x_std = torch.zeros(seq_len.shape, dtype=x.dtype, device=x.device)
for i in range(x.shape[0]):
x_mean[i] = x[i, :, :int(seq_len[i])].mean()
x_std[i] = x[i, :, :int(seq_len[i])].std()
# make sure x_std is not zero
x_std += 1e-5
return (x - x_mean.view(-1, 1, 1)) / x_std.view(-1, 1, 1)
else:
return x
@torch.jit.script
def stack_subsample_frames(x, x_lens, stacking: int = 1, subsampling: int = 1):
""" Stacks frames together across feature dim, and then subsamples
input is batch_size, feature_dim, num_frames
output is batch_size, feature_dim * stacking, num_frames / subsampling
"""
seq = [x]
for n in range(1, stacking):
tmp = torch.zeros_like(x)
tmp[:, :, :-n] = x[:, :, n:]
seq.append(tmp)
x = torch.cat(seq, dim=1)[:, :, ::subsampling]
if subsampling > 1:
x_lens = torch.ceil(x_lens.float() / subsampling).int()
if x.size(2) > x_lens.max().item():
assert abs(x.size(2) - x_lens.max().item()) <= 1
x = x[:,:,:x_lens.max().item()]
return x, x_lens
class FilterbankFeatures(BaseFeatures):
# For JIT, https://pytorch.org/docs/stable/jit.html#python-defined-constants
__constants__ = ["dither", "preemph", "n_fft", "hop_length", "win_length",
"log", "frame_splicing", "normalize"]
# torchscript: "center" removed due to a bug
def __init__(self, spec_augment=None, cutout_augment=None,
sample_rate=8000, window_size=0.02, window_stride=0.01,
window="hamming", normalize="per_feature", n_fft=None,
preemph=0.97, n_filt=64, lowfreq=0, highfreq=None, log=True,
dither=1e-5, pad_align=8, pad_to_max_duration=False,
max_duration=float('inf'), frame_splicing=1):
super(FilterbankFeatures, self).__init__(
pad_align=pad_align, pad_to_max_duration=pad_to_max_duration,
max_duration=max_duration, sample_rate=sample_rate,
window_size=window_size, window_stride=window_stride,
spec_augment=spec_augment, cutout_augment=cutout_augment)
torch_windows = {
'hann': torch.hann_window,
'hamming': torch.hamming_window,
'blackman': torch.blackman_window,
'bartlett': torch.bartlett_window,
'none': None,
}
self.n_fft = n_fft or 2 ** math.ceil(math.log2(self.win_length))
self.normalize = normalize
self.log = log
#TORCHSCRIPT: Check whether or not we need this
self.dither = dither
self.frame_splicing = frame_splicing
self.n_filt = n_filt
self.preemph = preemph
highfreq = highfreq or sample_rate / 2
window_fn = torch_windows.get(window, None)
window_tensor = window_fn(self.win_length,
periodic=False) if window_fn else None
filterbanks = torch.tensor(
librosa.filters.mel(sr=sample_rate, n_fft=self.n_fft, n_mels=n_filt,
fmin=lowfreq, fmax=highfreq),
dtype=torch.float).unsqueeze(0)
# torchscript
self.register_buffer("fb", filterbanks)
self.register_buffer("window", window_tensor)
def get_seq_len(self, seq_len):
return torch.ceil(seq_len.to(dtype=torch.float) / self.hop_length).to(
dtype=torch.int)
# TORCHSCRIPT: center removed due to bug
def stft(self, x):
spec = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length,
win_length=self.win_length,
window=self.window.to(dtype=torch.float),
return_complex=True)
return torch.view_as_real(spec)
@torch.no_grad()
def calculate_features(self, x, seq_len):
dtype = x.dtype
seq_len = self.get_seq_len(seq_len)
# dither
if self.dither > 0:
x += self.dither * torch.randn_like(x)
# do preemphasis
if self.preemph is not None:
x = torch.cat(
(x[:, 0].unsqueeze(1), x[:, 1:] - self.preemph * x[:, :-1]), dim=1)
x = self.stft(x)
# get power spectrum
x = x.pow(2).sum(-1)
# dot with filterbank energies
x = torch.matmul(self.fb.to(x.dtype), x)
# log features if required
if self.log:
x = torch.log(x + 1e-20)
# frame splicing if required
if self.frame_splicing > 1:
raise ValueError('Frame splicing not supported')
# normalize if required
x = normalize_batch(x, seq_len, normalize_type=self.normalize)
# mask to zero any values beyond seq_len in batch,
# pad to multiple of `pad_align` (for efficiency)
max_len = x.size(-1)
mask = torch.arange(max_len, dtype=seq_len.dtype, device=x.device)
mask = mask.expand(x.size(0), max_len) >= seq_len.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1), 0)
# TORCHSCRIPT: Is this del important? It breaks scripting
# del mask
return x.to(dtype), seq_len
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/features.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from torch.optim import Optimizer
import math
def lr_policy(step, epoch, initial_lr, optimizer, steps_per_epoch, warmup_epochs,
hold_epochs, num_epochs=None, policy='linear', min_lr=1e-5,
exp_gamma=None):
"""
learning rate decay
Args:
initial_lr: base learning rate
step: current iteration number
N: total number of iterations over which learning rate is decayed
lr_steps: list of steps to apply exp_gamma
"""
warmup_steps = warmup_epochs * steps_per_epoch
hold_steps = hold_epochs * steps_per_epoch
if policy == 'legacy':
assert num_epochs is not None
tot_steps = num_epochs * steps_per_epoch
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = (((tot_steps - step)
/ (tot_steps - warmup_steps - hold_steps)) ** 2)
elif policy == 'exponential':
assert exp_gamma is not None
if step < warmup_steps:
a = (step + 1) / (warmup_steps + 1)
elif step < warmup_steps + hold_steps:
a = 1.0
else:
a = exp_gamma ** (epoch - warmup_epochs - hold_epochs)
else:
raise ValueError
new_lr = max(a * initial_lr, min_lr)
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
class AdamW(Optimizer):
"""Implements AdamW algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay, amsgrad=amsgrad)
super(AdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(AdamW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = 1 - beta1 ** state['step']
bias_correction2 = 1 - beta2 ** state['step']
step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1
p.data.add_(torch.mul(p.data, group['weight_decay']).addcdiv_(1, exp_avg, denom), alpha=-step_size)
return loss
class Novograd(Optimizer):
"""
Implements Novograd algorithm.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.95, 0))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
grad_averaging: gradient averaging
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False)
"""
def __init__(self, params, lr=1e-3, betas=(0.95, 0), eps=1e-8,
weight_decay=0, grad_averaging=False, amsgrad=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay,
grad_averaging=grad_averaging,
amsgrad=amsgrad)
super(Novograd, self).__init__(params, defaults)
def __setstate__(self, state):
super(Novograd, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('amsgrad', False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Sparse gradients are not supported.')
amsgrad = group['amsgrad']
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
if amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state['max_exp_avg_sq'] = torch.zeros([]).to(state['exp_avg'].device)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
if amsgrad:
max_exp_avg_sq = state['max_exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
norm = torch.sum(torch.pow(grad, 2))
if exp_avg_sq == 0:
exp_avg_sq.copy_(norm)
else:
exp_avg_sq.mul_(beta2).add_(norm, alpha=1 - beta2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = max_exp_avg_sq.sqrt().add_(group['eps'])
else:
denom = exp_avg_sq.sqrt().add_(group['eps'])
grad.div_(denom)
if group['weight_decay'] != 0:
grad.add_(p.data, alpha=group['weight_decay'])
if group['grad_averaging']:
grad.mul_(1 - beta1)
exp_avg.mul_(beta1).add_(grad)
p.data.add_(exp_avg, alpha=-group['lr'])
return loss
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/optimizers.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
from .audio import (audio_from_file, AudioSegment, GainPerturbation,
ShiftPerturbation, SpeedPerturbation)
from .text import _clean_text, punctuation_map
def normalize_string(s, labels, punct_map):
"""Normalizes string.
Example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
"""
labels = set(labels)
try:
text = _clean_text(s, ["english_cleaners"], punct_map).strip()
return ''.join([tok for tok in text if all(t in labels for t in tok)])
except:
print(f"WARNING: Normalizing failed: {s}")
return None
class FilelistDataset(Dataset):
def __init__(self, filelist_fpath):
self.samples = [line.strip() for line in open(filelist_fpath, 'r')]
def __len__(self):
return len(self.samples)
def __getitem__(self, index):
audio, audio_len = audio_from_file(self.samples[index])
return (audio.squeeze(0), audio_len, torch.LongTensor([0]),
torch.LongTensor([0]))
class SingleAudioDataset(FilelistDataset):
def __init__(self, audio_fpath):
self.samples = [audio_fpath]
class AudioDataset(Dataset):
def __init__(self, data_dir, manifest_fpaths, labels,
sample_rate=16000, min_duration=0.1, max_duration=float("inf"),
pad_to_max_duration=False, max_utts=0, normalize_transcripts=True,
sort_by_duration=False, trim_silence=False,
speed_perturbation=None, gain_perturbation=None,
shift_perturbation=None, ignore_offline_speed_perturbation=False):
"""Loads audio, transcript and durations listed in a .json file.
Args:
data_dir: absolute path to dataset folder
manifest_filepath: relative path from dataset folder
to manifest json as described above. Can be coma-separated paths.
labels (str): all possible output symbols
min_duration (int): skip audio shorter than threshold
max_duration (int): skip audio longer than threshold
pad_to_max_duration (bool): pad all sequences to max_duration
max_utts (int): limit number of utterances
normalize_transcripts (bool): normalize transcript text
sort_by_duration (bool): sort sequences by increasing duration
trim_silence (bool): trim leading and trailing silence from audio
ignore_offline_speed_perturbation (bool): use precomputed speed perturbation
Returns:
tuple of Tensors
"""
self.data_dir = data_dir
self.labels = labels
self.labels_map = dict([(labels[i], i) for i in range(len(labels))])
self.punctuation_map = punctuation_map(labels)
self.blank_index = len(labels)
self.pad_to_max_duration = pad_to_max_duration
self.sort_by_duration = sort_by_duration
self.max_utts = max_utts
self.normalize_transcripts = normalize_transcripts
self.ignore_offline_speed_perturbation = ignore_offline_speed_perturbation
self.min_duration = min_duration
self.max_duration = max_duration
self.trim_silence = trim_silence
self.sample_rate = sample_rate
perturbations = []
if speed_perturbation is not None:
perturbations.append(SpeedPerturbation(**speed_perturbation))
if gain_perturbation is not None:
perturbations.append(GainPerturbation(**gain_perturbation))
if shift_perturbation is not None:
perturbations.append(ShiftPerturbation(**shift_perturbation))
self.perturbations = perturbations
self.max_duration = max_duration
self.samples = []
self.duration = 0.0
self.duration_filtered = 0.0
for fpath in manifest_fpaths:
self._load_json_manifest(fpath)
if sort_by_duration:
self.samples = sorted(self.samples, key=lambda s: s['duration'])
def __getitem__(self, index):
s = self.samples[index]
rn_indx = np.random.randint(len(s['audio_filepath']))
duration = s['audio_duration'][rn_indx] if 'audio_duration' in s else 0
offset = s.get('offset', 0)
segment = AudioSegment(
s['audio_filepath'][rn_indx], target_sr=self.sample_rate,
offset=offset, duration=duration, trim=self.trim_silence)
for p in self.perturbations:
p.maybe_apply(segment, self.sample_rate)
segment = torch.FloatTensor(segment.samples)
return (segment,
torch.tensor(segment.shape[0]).int(),
torch.tensor(s["transcript"]),
torch.tensor(len(s["transcript"])).int())
def __len__(self):
return len(self.samples)
def _load_json_manifest(self, fpath):
for s in json.load(open(fpath, "r", encoding="utf-8")):
if self.pad_to_max_duration and not self.ignore_offline_speed_perturbation:
# require all perturbed samples to be < self.max_duration
s_max_duration = max(f['duration'] for f in s['files'])
else:
# otherwise we allow perturbances to be > self.max_duration
s_max_duration = s['original_duration']
s['duration'] = s.pop('original_duration')
if not (self.min_duration <= s_max_duration <= self.max_duration):
self.duration_filtered += s['duration']
continue
# Prune and normalize according to transcript
tr = (s.get('transcript', None) or
self.load_transcript(s['text_filepath']))
if not isinstance(tr, str):
print(f'WARNING: Skipped sample (transcript not a str): {tr}.')
self.duration_filtered += s['duration']
continue
if self.normalize_transcripts:
tr = normalize_string(tr, self.labels, self.punctuation_map)
s["transcript"] = self.to_vocab_inds(tr)
files = s.pop('files')
if self.ignore_offline_speed_perturbation:
files = [f for f in files if f['speed'] == 1.0]
s['audio_duration'] = [f['duration'] for f in files]
s['audio_filepath'] = [str(Path(self.data_dir, f['fname']))
for f in files]
self.samples.append(s)
self.duration += s['duration']
if self.max_utts > 0 and len(self.samples) >= self.max_utts:
print(f'Reached max_utts={self.max_utts}. Finished parsing {fpath}.')
break
def load_transcript(self, transcript_path):
with open(transcript_path, 'r', encoding="utf-8") as transcript_file:
transcript = transcript_file.read().replace('\n', '')
return transcript
def to_vocab_inds(self, transcript):
chars = [self.labels_map.get(x, self.blank_index) for x in list(transcript)]
transcript = list(filter(lambda x: x != self.blank_index, chars))
return transcript
def collate_fn(batch):
bs = len(batch)
max_len = lambda l, idx: max(el[idx].size(0) for el in l)
audio = torch.zeros(bs, max_len(batch, 0))
audio_lens = torch.zeros(bs, dtype=torch.int32)
transcript = torch.zeros(bs, max_len(batch, 2))
transcript_lens = torch.zeros(bs, dtype=torch.int32)
for i, sample in enumerate(batch):
audio[i].narrow(0, 0, sample[0].size(0)).copy_(sample[0])
audio_lens[i] = sample[1]
transcript[i].narrow(0, 0, sample[2].size(0)).copy_(sample[2])
transcript_lens[i] = sample[3]
return audio, audio_lens, transcript, transcript_lens
def get_data_loader(dataset, batch_size, multi_gpu=True, shuffle=True,
drop_last=True, num_workers=4):
kw = {'dataset': dataset, 'collate_fn': collate_fn,
'num_workers': num_workers, 'pin_memory': True}
if multi_gpu:
loader_shuffle = False
sampler = DistributedSampler(dataset, shuffle=shuffle)
else:
loader_shuffle = shuffle
sampler = None
return DataLoader(batch_size=batch_size, drop_last=drop_last,
sampler=sampler, shuffle=loader_shuffle, **kw)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/dataset.py |
import numpy as np
class BenchmarkStats:
""" Tracks statistics used for benchmarking. """
def __init__(self):
self.utts = []
self.times = []
self.losses = []
def update(self, utts, times, losses):
self.utts.append(utts)
self.times.append(times)
self.losses.append(losses)
def get(self, n_epochs):
throughput = sum(self.utts[-n_epochs:]) / sum(self.times[-n_epochs:])
return {'throughput': throughput, 'benchmark_epochs_num': n_epochs,
'loss': np.mean(self.losses[-n_epochs:])}
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/utils.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import soundfile as sf
import librosa
import torch
import numpy as np
import sox
def audio_from_file(file_path, offset=0, duration=0, trim=False, target_sr=16000):
audio = AudioSegment(file_path, target_sr=target_sr, int_values=False,
offset=offset, duration=duration, trim=trim)
samples = torch.tensor(audio.samples, dtype=torch.float).cuda()
num_samples = torch.tensor(samples.shape[0]).int().cuda()
return (samples.unsqueeze(0), num_samples.unsqueeze(0))
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, filename, target_sr=None, int_values=False, offset=0,
duration=0, trim=False, trim_db=60):
"""Create audio segment from samples.
Samples are converted to float32 internally, with int scaled to [-1, 1].
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
samples = samples.transpose()
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.resample(samples, orig_sr=sample_rate,
target_sr=target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, top_db=trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
def __eq__(self, other):
"""Return whether two objects are equal."""
if type(other) is not type(self):
return False
if self._sample_rate != other._sample_rate:
return False
if self._samples.shape != other._samples.shape:
return False
if np.any(self.samples != other._samples):
return False
return True
def __ne__(self, other):
"""Return whether two objects are unequal."""
return not self.__eq__(other)
def __str__(self):
"""Return human-readable representation of segment."""
return ("%s: num_samples=%d, sample_rate=%d, duration=%.2fsec, "
"rms=%.2fdB" % (type(self), self.num_samples, self.sample_rate,
self.duration, self.rms_db))
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
@property
def num_samples(self):
return self._samples.shape[0]
@property
def duration(self):
return self._samples.shape[0] / float(self._sample_rate)
@property
def rms_db(self):
mean_square = np.mean(self._samples ** 2)
return 10 * np.log10(mean_square)
def gain_db(self, gain):
self._samples *= 10. ** (gain / 20.)
def pad(self, pad_size, symmetric=False):
"""Add zero padding to the sample.
The pad size is given in number of samples. If symmetric=True,
`pad_size` will be added to both sides. If false, `pad_size` zeros
will be added only to the end.
"""
self._samples = np.pad(self._samples,
(pad_size if symmetric else 0, pad_size),
mode='constant')
def subsegment(self, start_time=None, end_time=None):
"""Cut the AudioSegment between given boundaries.
Note that this is an in-place transformation.
:param start_time: Beginning of subsegment in seconds.
:type start_time: float
:param end_time: End of subsegment in seconds.
:type end_time: float
:raise ValueError: If start_time or end_time is incorrectly set, e.g. out
of bounds in time.
"""
start_time = 0.0 if start_time is None else start_time
end_time = self.duration if end_time is None else end_time
if start_time < 0.0:
start_time = self.duration + start_time
if end_time < 0.0:
end_time = self.duration + end_time
if start_time < 0.0:
raise ValueError("The slice start position (%f s) is out of "
"bounds." % start_time)
if end_time < 0.0:
raise ValueError("The slice end position (%f s) is out of bounds." %
end_time)
if start_time > end_time:
raise ValueError("The slice start position (%f s) is later than "
"the end position (%f s)." % (start_time, end_time))
if end_time > self.duration:
raise ValueError("The slice end position (%f s) is out of bounds "
"(> %f s)" % (end_time, self.duration))
start_sample = int(round(start_time * self._sample_rate))
end_sample = int(round(end_time * self._sample_rate))
self._samples = self._samples[start_sample:end_sample]
class Perturbation:
def __init__(self, p=0.1, rng=None):
self.p = p
self._rng = random.Random() if rng is None else rng
def maybe_apply(self, segment, sample_rate=None):
if self._rng.random() < self.p:
self(segment, sample_rate)
class SpeedPerturbation(Perturbation):
def __init__(self, min_rate=0.85, max_rate=1.15, discrete=False, p=0.1, rng=None):
super(SpeedPerturbation, self).__init__(p, rng)
assert 0 < min_rate < max_rate
self.min_rate = min_rate
self.max_rate = max_rate
self.discrete = discrete
def __call__(self, data, sample_rate):
if self.discrete:
rate = np.random.choice([self.min_rate, None, self.max_rate])
else:
rate = self._rng.uniform(self.min_rate, self.max_rate)
if rate is not None:
data._samples = sox.Transformer().speed(factor=rate).build_array(
input_array=data._samples, sample_rate_in=sample_rate)
class GainPerturbation(Perturbation):
def __init__(self, min_gain_dbfs=-10, max_gain_dbfs=10, p=0.1, rng=None):
super(GainPerturbation, self).__init__(p, rng)
self._rng = random.Random() if rng is None else rng
self._min_gain_dbfs = min_gain_dbfs
self._max_gain_dbfs = max_gain_dbfs
def __call__(self, data, sample_rate=None):
del sample_rate
gain = self._rng.uniform(self._min_gain_dbfs, self._max_gain_dbfs)
data._samples = data._samples * (10. ** (gain / 20.))
class ShiftPerturbation(Perturbation):
def __init__(self, min_shift_ms=-5.0, max_shift_ms=5.0, p=0.1, rng=None):
super(ShiftPerturbation, self).__init__(p, rng)
self._min_shift_ms = min_shift_ms
self._max_shift_ms = max_shift_ms
def __call__(self, data, sample_rate):
shift_ms = self._rng.uniform(self._min_shift_ms, self._max_shift_ms)
if abs(shift_ms) / 1000 > data.duration:
# TODO: do something smarter than just ignore this condition
return
shift_samples = int(shift_ms * data.sample_rate // 1000)
# print("DEBUG: shift:", shift_samples)
if shift_samples < 0:
data._samples[-shift_samples:] = data._samples[:shift_samples]
data._samples[:-shift_samples] = 0
elif shift_samples > 0:
data._samples[:-shift_samples] = data._samples[shift_samples:]
data._samples[-shift_samples:] = 0
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/audio.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
from collections import OrderedDict
import torch
import torch.distributed as dist
from .metrics import word_error_rate
def print_once(msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(msg)
def add_ctc_blank(symbols):
return symbols + ['<BLANK>']
def ctc_decoder_predictions_tensor(tensor, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = {i: labels[i] for i in range(len(labels))}
prediction_cpu_tensor = tensor.long().cpu()
# iterate over batch
for ind in range(prediction_cpu_tensor.shape[0]):
prediction = prediction_cpu_tensor[ind].numpy().tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
def greedy_wer(preds, tgt, tgt_lens, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Prints wer and prediction examples to screen
Args:
tensors: A list of 3 tensors (predictions, targets, target_lengths)
labels: A list of labels
Returns:
word error rate
"""
with torch.no_grad():
references = gather_transcripts([tgt], [tgt_lens], labels)
hypotheses = ctc_decoder_predictions_tensor(preds, labels)
wer, _, _ = word_error_rate(hypotheses, references)
return wer, hypotheses[0], references[0]
def gather_losses(losses_list):
return [torch.mean(torch.stack(losses_list))]
def gather_predictions(predictions_list, labels):
results = []
for prediction in predictions_list:
results += ctc_decoder_predictions_tensor(prediction, labels=labels)
return results
def gather_transcripts(transcript_list, transcript_len_list, labels):
results = []
labels_map = {i: labels[i] for i in range(len(labels))}
# iterate over workers
for txt, lens in zip(transcript_list, transcript_len_list):
for t, l in zip(txt.long().cpu(), lens.long().cpu()):
t = list(t.numpy())
results.append(''.join([labels_map[c] for c in t[:l]]))
return results
def process_evaluation_batch(tensors, global_vars, labels):
"""
Processes results of an iteration and saves it in global_vars
Args:
tensors: dictionary with results of an evaluation iteration, e.g. loss, predictions, transcript, and output
global_vars: dictionary where processes results of iteration are saved
labels: A list of labels
"""
for kv, v in tensors.items():
if kv.startswith('loss'):
global_vars['EvalLoss'] += gather_losses(v)
elif kv.startswith('predictions'):
global_vars['preds'] += gather_predictions(v, labels)
elif kv.startswith('transcript_length'):
transcript_len_list = v
elif kv.startswith('transcript'):
transcript_list = v
elif kv.startswith('output'):
global_vars['logits'] += v
global_vars['txts'] += gather_transcripts(
transcript_list, transcript_len_list, labels)
def process_evaluation_epoch(aggregates, tag=None):
"""
Processes results from each worker at the end of evaluation and combine to final result
Args:
aggregates: dictionary containing information of entire evaluation
Return:
wer: final word error rate
loss: final loss
"""
if 'losses' in aggregates:
eloss = torch.mean(torch.stack(aggregates['losses'])).item()
else:
eloss = None
hypotheses = aggregates['preds']
references = aggregates['txts']
wer, scores, num_words = word_error_rate(hypotheses, references)
multi_gpu = dist.is_initialized()
if multi_gpu:
if eloss is not None:
eloss /= dist.get_world_size()
eloss_tensor = torch.tensor(eloss).cuda()
dist.all_reduce(eloss_tensor)
eloss = eloss_tensor.item()
scores_tensor = torch.tensor(scores).cuda()
dist.all_reduce(scores_tensor)
scores = scores_tensor.item()
num_words_tensor = torch.tensor(num_words).cuda()
dist.all_reduce(num_words_tensor)
num_words = num_words_tensor.item()
wer = scores * 1.0 / num_words
return wer, eloss
def num_weights(module):
return sum(p.numel() for p in module.parameters() if p.requires_grad)
def convert_v1_state_dict(state_dict):
rules = [
('^jasper_encoder.encoder.', 'encoder.layers.'),
('^jasper_decoder.decoder_layers.', 'decoder.layers.'),
]
ret = {}
for k, v in state_dict.items():
if k.startswith('acoustic_model.'):
continue
if k.startswith('audio_preprocessor.'):
continue
for pattern, to in rules:
k = re.sub(pattern, to, k)
ret[k] = v
return ret
class Checkpointer(object):
def __init__(self, save_dir, model_name, keep_milestones=[100, 200, 300]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
self.model_name = model_name
tracked = [
(int(re.search('epoch(\d+)_', f).group(1)), f)
for f in glob.glob(f'{save_dir}/{self.model_name}_epoch*_checkpoint.pt')]
tracked = sorted(tracked, key=lambda t: t[0])
self.tracked = OrderedDict(tracked)
def save(self, model, ema_model, optimizer, scaler, epoch, step, best_wer,
is_best=False):
"""Saves model checkpoint for inference/resuming training.
Args:
model: the model, optionally wrapped by DistributedDataParallel
ema_model: model with averaged weights, can be None
optimizer: optimizer
epoch (int): epoch during which the model is saved
step (int): number of steps since beginning of training
best_wer (float): lowest recorded WER on the dev set
is_best (bool, optional): set name of checkpoint to 'best'
and overwrite the previous one
"""
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
# Checkpoint already saved
if not is_best and epoch in self.tracked:
return
unwrap_ddp = lambda model: getattr(model, 'module', model)
state = {
'epoch': epoch,
'step': step,
'best_wer': best_wer,
'state_dict': unwrap_ddp(model).state_dict(),
'ema_state_dict': unwrap_ddp(ema_model).state_dict() if ema_model is not None else None,
'optimizer': optimizer.state_dict(),
'scaler': scaler.state_dict(),
}
if is_best:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_best_checkpoint.pt")
else:
fpath = os.path.join(
self.save_dir, f"{self.model_name}_epoch{epoch}_checkpoint.pt")
print_once(f"Saving {fpath}...")
torch.save(state, fpath)
if not is_best:
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = fpath
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
def last_checkpoint(self):
tracked = list(self.tracked.values())
if len(tracked) >= 1:
try:
torch.load(tracked[-1], map_location='cpu')
return tracked[-1]
except:
print_once(f'Last checkpoint {tracked[-1]} appears corrupted.')
elif len(tracked) >= 2:
return tracked[-2]
else:
return None
def load(self, fpath, model, ema_model, optimizer, scaler, meta):
print_once(f'Loading model from {fpath}')
checkpoint = torch.load(fpath, map_location="cpu")
unwrap_ddp = lambda model: getattr(model, 'module', model)
state_dict = convert_v1_state_dict(checkpoint['state_dict'])
unwrap_ddp(model).load_state_dict(state_dict, strict=True)
if ema_model is not None:
if checkpoint.get('ema_state_dict') is not None:
key = 'ema_state_dict'
else:
key = 'state_dict'
print_once('WARNING: EMA weights not found in the checkpoint.')
print_once('WARNING: Initializing EMA model with regular params.')
state_dict = convert_v1_state_dict(checkpoint[key])
unwrap_ddp(ema_model).load_state_dict(state_dict, strict=True)
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
meta['start_epoch'] = checkpoint.get('epoch')
meta['best_wer'] = checkpoint.get('best_wer', meta['best_wer'])
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/helpers.py |
# Copyright (c) 2017 Keith Ito
""" from https://github.com/keithito/tacotron """
import re
import string
from . import cleaners
def _clean_text(text, cleaner_names, *args):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text, *args)
return text
def punctuation_map(labels):
# Punctuation to remove
punctuation = string.punctuation
punctuation = punctuation.replace("+", "")
punctuation = punctuation.replace("&", "")
# TODO We might also want to consider:
# @ -> at
# # -> number, pound, hashtag
# ~ -> tilde
# _ -> underscore
# % -> percent
# If a punctuation symbol is inside our vocab, we do not remove from text
for l in labels:
punctuation = punctuation.replace(l, "")
# Turn all punctuation to whitespace
table = str.maketrans(punctuation, " " * len(punctuation))
return table
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/__init__.py |
# Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modifed to add support for time and slight tweaks to _expand_number
"""
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
_time_re = re.compile(r'([0-9]{1,2}):([0-9]{2})')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
if int(m.group(0)[0]) == 0:
return _inflect.number_to_words(m.group(0), andword='', group=1)
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
# Add check for number phones and other large numbers
elif num > 1000000000 and num % 10000 != 0:
return _inflect.number_to_words(num, andword='', group=1)
else:
return _inflect.number_to_words(num, andword='')
def _expand_time(m):
mins = int(m.group(2))
if mins == 0:
return _inflect.number_to_words(m.group(1))
return " ".join([_inflect.number_to_words(m.group(1)), _inflect.number_to_words(m.group(2))])
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
text = re.sub(_time_re, _expand_time, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/numbers.py |
# Copyright (c) 2017 Keith Ito
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/symbols.py |
# Copyright (c) 2017 Keith Ito
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" from https://github.com/keithito/tacotron
Modified to add puncturation removal
"""
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
text2 = unidecoder(text)
if text != text2:
print(text)
print(text2)
return unidecoder(text)
def remove_punctuation(text, table):
text = text.translate(table)
text = re.sub(r'&', " and ", text)
text = re.sub(r'\+', " plus ", text)
return text
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text, table=None):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
if table is not None:
text = remove_punctuation(text, table)
text = collapse_whitespace(text)
return text
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/cleaners.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/text/unidecoder/homoglyphs.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import numpy as np
import torch.distributed as dist
from .iterator import DaliJasperIterator, SyntheticDataIterator
from .pipeline import DaliPipeline
from common.helpers import print_once
def _parse_json(json_path: str, start_label=0, predicate=lambda json: True):
"""
Parses json file to the format required by DALI
Args:
json_path: path to json file
start_label: the label, starting from which DALI will assign consecutive int numbers to every transcript
predicate: function, that accepts a sample descriptor (i.e. json dictionary) as an argument.
If the predicate for a given sample returns True, it will be included in the dataset.
Returns:
output_files: dictionary, that maps file name to label assigned by DALI
transcripts: dictionary, that maps label assigned by DALI to the transcript
"""
import json
global cnt
with open(json_path) as f:
librispeech_json = json.load(f)
output_files = {}
transcripts = {}
curr_label = start_label
for original_sample in librispeech_json:
if not predicate(original_sample):
continue
transcripts[curr_label] = original_sample['transcript']
output_files[original_sample['files'][-1]['fname']] = curr_label
curr_label += 1
return output_files, transcripts
def _dict_to_file(dict: dict, filename: str):
with open(filename, "w") as f:
for key, value in dict.items():
f.write("{} {}\n".format(key, value))
class DaliDataLoader:
"""
DataLoader is the main entry point to the data preprocessing pipeline.
To use, create an object and then just iterate over `data_iterator`.
DataLoader will do the rest for you.
Example:
data_layer = DataLoader(DaliTrainPipeline, path, json, bs, ngpu)
data_it = data_layer.data_iterator
for data in data_it:
print(data) # Here's your preprocessed data
Args:
device_type: Which device to use for preprocessing. Choose: "cpu", "gpu"
pipeline_type: Choose: "train", "val", "synth"
"""
def __init__(self, gpu_id, dataset_path: str, config_data: dict, config_features: dict, json_names: list,
symbols: list, batch_size: int, pipeline_type: str, grad_accumulation_steps: int = 1,
synth_iters_per_epoch: int = 544, device_type: str = "gpu"):
import torch
self.batch_size = batch_size
self.grad_accumulation_steps = grad_accumulation_steps
self.drop_last = (pipeline_type == 'train')
self.device_type = device_type
pipeline_type = self._parse_pipeline_type(pipeline_type)
if pipeline_type == "synth":
self._dali_data_iterator = self._init_synth_iterator(self.batch_size, config_features['nfilt'],
iters_per_epoch=synth_iters_per_epoch,
ngpus=torch.distributed.get_world_size())
else:
self._dali_data_iterator = self._init_iterator(gpu_id=gpu_id, dataset_path=dataset_path,
config_data=config_data,
config_features=config_features,
json_names=json_names, symbols=symbols,
train_pipeline=pipeline_type == "train")
def _init_iterator(self, gpu_id, dataset_path, config_data, config_features, json_names: list, symbols: list,
train_pipeline: bool):
"""
Returns data iterator. Data underneath this operator is preprocessed within Dali
"""
def hash_list_of_strings(li):
return str(abs(hash(''.join(li))))
output_files, transcripts = {}, {}
max_duration = config_data['max_duration']
for jname in json_names:
of, tr = _parse_json(jname if jname[0] == '/' else os.path.join(dataset_path, jname), len(output_files),
predicate=lambda json: json['original_duration'] <= max_duration)
output_files.update(of)
transcripts.update(tr)
file_list_path = os.path.join("/tmp", "jasper_dali.file_list." + hash_list_of_strings(json_names))
_dict_to_file(output_files, file_list_path)
self.dataset_size = len(output_files)
print_once(f"Dataset read by DALI. Number of samples: {self.dataset_size}")
pipeline = DaliPipeline.from_config(config_data=config_data, config_features=config_features, device_id=gpu_id,
file_root=dataset_path, file_list=file_list_path,
device_type=self.device_type, batch_size=self.batch_size,
train_pipeline=train_pipeline)
return DaliJasperIterator([pipeline], transcripts=transcripts, symbols=symbols, batch_size=self.batch_size,
reader_name="file_reader", train_iterator=train_pipeline)
def _init_synth_iterator(self, batch_size, nfeatures, iters_per_epoch, ngpus):
self.dataset_size = ngpus * iters_per_epoch * batch_size
return SyntheticDataIterator(batch_size, nfeatures, regenerate=True)
@staticmethod
def _parse_pipeline_type(pipeline_type):
pipe = pipeline_type.lower()
assert pipe in ("train", "val", "synth"), 'Invalid pipeline type (choices: "train", "val", "synth").'
return pipe
def _shard_size(self):
"""
Total number of samples handled by a single GPU in a single epoch.
"""
world_size = dist.get_world_size() if dist.is_initialized() else 1
if self.drop_last:
divisor = world_size * self.batch_size * self.grad_accumulation_steps
return self.dataset_size // divisor * divisor // world_size
else:
return int(math.ceil(self.dataset_size / world_size))
def __len__(self):
"""
Number of batches handled by each GPU.
"""
if self.drop_last:
assert self._shard_size() % self.batch_size == 0, f'{self._shard_size()} {self.batch_size}'
return int(math.ceil(self._shard_size() / self.batch_size))
def data_iterator(self):
return self._dali_data_iterator
def __iter__(self):
return self._dali_data_iterator
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/dali/data_loader.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/dali/__init__.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.distributed as dist
import numpy as np
from common.helpers import print_once
from common.text import _clean_text, punctuation_map
def normalize_string(s, symbols, punct_map):
"""
Normalizes string.
Example:
'call me at 8:00 pm!' -> 'call me at eight zero pm'
"""
labels = set(symbols)
try:
text = _clean_text(s, ["english_cleaners"], punct_map).strip()
return ''.join([tok for tok in text if all(t in labels for t in tok)])
except Exception as e:
print_once("WARNING: Normalizing failed: {s} {e}")
class DaliJasperIterator(object):
"""
Returns batches of data for Jasper training:
preprocessed_signal, preprocessed_signal_length, transcript, transcript_length
This iterator is not meant to be the entry point to Dali processing pipeline.
Use DataLoader instead.
"""
def __init__(self, dali_pipelines, transcripts, symbols, batch_size, reader_name, train_iterator: bool):
self.transcripts = transcripts
self.symbols = symbols
self.batch_size = batch_size
from nvidia.dali.plugin.pytorch import DALIGenericIterator
from nvidia.dali.plugin.base_iterator import LastBatchPolicy
self.dali_it = DALIGenericIterator(
dali_pipelines, ["audio", "label", "audio_shape"], reader_name=reader_name,
dynamic_shape=True, auto_reset=True,
last_batch_policy=(LastBatchPolicy.DROP if train_iterator else LastBatchPolicy.PARTIAL))
@staticmethod
def _str2list(s: str):
"""
Returns list of floats, that represents given string.
'0.' denotes separator
'1.' denotes 'a'
'27.' denotes "'"
Assumes, that the string is lower case.
"""
list = []
for c in s:
if c == "'":
list.append(27.)
else:
list.append(max(0., ord(c) - 96.))
return list
@staticmethod
def _pad_lists(lists: list, pad_val=0):
"""
Pads lists, so that all have the same size.
Returns list with actual sizes of corresponding input lists
"""
max_length = 0
sizes = []
for li in lists:
sizes.append(len(li))
max_length = max_length if len(li) < max_length else len(li)
for li in lists:
li += [pad_val] * (max_length - len(li))
return sizes
def _gen_transcripts(self, labels, normalize_transcripts: bool = True):
"""
Generate transcripts in format expected by NN
"""
lists = [
self._str2list(normalize_string(self.transcripts[lab.item()], self.symbols, punctuation_map(self.symbols)))
for lab in labels
] if normalize_transcripts else [self._str2list(self.transcripts[lab.item()]) for lab in labels]
sizes = self._pad_lists(lists)
return torch.tensor(lists).cuda(), torch.tensor(sizes, dtype=torch.int32).cuda()
def __next__(self):
data = self.dali_it.__next__()
transcripts, transcripts_lengths = self._gen_transcripts(data[0]["label"])
return data[0]["audio"], data[0]["audio_shape"][:, 1], transcripts, transcripts_lengths
def next(self):
return self.__next__()
def __iter__(self):
return self
# TODO: refactor
class SyntheticDataIterator(object):
def __init__(self, batch_size, nfeatures, feat_min=-5., feat_max=0., txt_min=0., txt_max=23., feat_lens_max=1760,
txt_lens_max=231, regenerate=False):
"""
Args:
batch_size
nfeatures: number of features for melfbanks
feat_min: minimum value in `feat` tensor, used for randomization
feat_max: maximum value in `feat` tensor, used for randomization
txt_min: minimum value in `txt` tensor, used for randomization
txt_max: maximum value in `txt` tensor, used for randomization
regenerate: If True, regenerate random tensors for every iterator step.
If False, generate them only at start.
"""
self.batch_size = batch_size
self.nfeatures = nfeatures
self.feat_min = feat_min
self.feat_max = feat_max
self.feat_lens_max = feat_lens_max
self.txt_min = txt_min
self.txt_max = txt_max
self.txt_lens_max = txt_lens_max
self.regenerate = regenerate
if not self.regenerate:
self.feat, self.feat_lens, self.txt, self.txt_lens = self._generate_sample()
def _generate_sample(self):
feat = (self.feat_max - self.feat_min) * np.random.random_sample(
(self.batch_size, self.nfeatures, self.feat_lens_max)) + self.feat_min
feat_lens = np.random.randint(0, int(self.feat_lens_max) - 1, size=self.batch_size)
txt = (self.txt_max - self.txt_min) * np.random.random_sample(
(self.batch_size, self.txt_lens_max)) + self.txt_min
txt_lens = np.random.randint(0, int(self.txt_lens_max) - 1, size=self.batch_size)
return torch.Tensor(feat).cuda(), \
torch.Tensor(feat_lens).cuda(), \
torch.Tensor(txt).cuda(), \
torch.Tensor(txt_lens).cuda()
def __next__(self):
if self.regenerate:
return self._generate_sample()
return self.feat, self.feat_lens, self.txt, self.txt_lens
def next(self):
return self.__next__()
def __iter__(self):
return self
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/dali/iterator.py |
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import nvidia.dali.types as types
import multiprocessing
import numpy as np
import torch
import math
import itertools
class DaliPipeline():
def __init__(self, *,
train_pipeline: bool, # True if train pipeline, False if validation pipeline
device_id,
num_threads,
batch_size,
file_root: str,
file_list: str,
sample_rate,
discrete_resample_range: bool,
resample_range: list,
window_size,
window_stride,
nfeatures,
nfft,
frame_splicing_factor,
dither_coeff,
silence_threshold,
preemph_coeff,
pad_align,
max_duration,
mask_time_num_regions,
mask_time_min,
mask_time_max,
mask_freq_num_regions,
mask_freq_min,
mask_freq_max,
mask_both_num_regions,
mask_both_min_time,
mask_both_max_time,
mask_both_min_freq,
mask_both_max_freq,
preprocessing_device="gpu",
is_triton_pipeline=False):
self._dali_init_log(locals())
if torch.distributed.is_initialized():
shard_id = torch.distributed.get_rank()
n_shards = torch.distributed.get_world_size()
else:
shard_id = 0
n_shards = 1
self.preprocessing_device = preprocessing_device.lower()
assert self.preprocessing_device == "cpu" or self.preprocessing_device == "gpu", \
"Incorrect preprocessing device. Please choose either 'cpu' or 'gpu'"
self.frame_splicing_factor = frame_splicing_factor
# TODO(janton): Implement this
assert frame_splicing_factor == 1, "Frame splicing is not yet implemented"
self.resample_range = resample_range
self.discrete_resample_range = discrete_resample_range
self.train = train_pipeline
self.sample_rate = sample_rate
self.dither_coeff = dither_coeff
self.nfeatures = nfeatures
self.max_duration = max_duration
self.mask_params = {
'time_num_regions': mask_time_num_regions,
'time_min': mask_time_min,
'time_max': mask_time_max,
'freq_num_regions': mask_freq_num_regions,
'freq_min': mask_freq_min,
'freq_max': mask_freq_max,
'both_num_regions': mask_both_num_regions,
'both_min_time': mask_both_min_time,
'both_max_time': mask_both_max_time,
'both_min_freq': mask_both_min_freq,
'both_max_freq': mask_both_max_freq,
}
self.do_remove_silence = True if silence_threshold is not None else False
@dali.pipeline_def
def dali_jasper_pipe():
if is_triton_pipeline:
assert not self.train, "Pipeline for Triton shall be a validation pipeline"
if torch.distributed.is_initialized():
raise RuntimeError(
"You're creating Triton pipeline, using multi-process mode. Please use single-process mode.")
encoded, label = fn.external_source(device="cpu", name="DALI_INPUT_0", no_copy=True)
else:
encoded, label = fn.readers.file(device="cpu", name="file_reader",
file_root=file_root, file_list=file_list, shard_id=shard_id,
num_shards=n_shards, shuffle_after_epoch=train_pipeline)
speed_perturbation_coeffs = None
if resample_range is not None:
if discrete_resample_range:
values = [self.resample_range[0], 1.0, self.resample_range[1]]
speed_perturbation_coeffs = fn.random.uniform(device="cpu", values=values)
else:
speed_perturbation_coeffs = fn.random.uniform(device="cpu", range=resample_range)
if self.train and speed_perturbation_coeffs is not None:
dec_sample_rate_arg = speed_perturbation_coeffs * self.sample_rate
elif resample_range is None:
dec_sample_rate_arg = self.sample_rate
else:
dec_sample_rate_arg = None
audio, _ = fn.decoders.audio(encoded, sample_rate=dec_sample_rate_arg, dtype=types.FLOAT, downmix=True)
if self.do_remove_silence:
begin, length = fn.nonsilent_region(audio, cutoff_db=silence_threshold)
audio = fn.slice(audio, begin, length, axes=[0])
# Max duration drop is performed at DataLayer stage
if self.preprocessing_device == "gpu":
audio = audio.gpu()
if self.dither_coeff != 0.:
audio = audio + fn.random.normal(audio) * self.dither_coeff
audio = fn.preemphasis_filter(audio, preemph_coeff=preemph_coeff)
spec = fn.spectrogram(audio, nfft=nfft,
window_length=window_size * sample_rate, window_step=window_stride * sample_rate)
mel_spec = fn.mel_filter_bank(spec, sample_rate=sample_rate, nfilter=self.nfeatures, normalize=True)
log_features = fn.to_decibels(mel_spec, multiplier=np.log(10), reference=1.0, cutoff_db=math.log(1e-20))
log_features_len = fn.shapes(log_features)
if self.frame_splicing_factor != 1:
log_features_len = self._div_ceil(log_features_len, self.frame_splicing_factor)
log_features = fn.normalize(log_features, axes=[1])
log_features = fn.pad(log_features, axes=[1], fill_value=0, align=pad_align)
if self.train and self._do_spectrogram_masking():
anchors, shapes = fn.external_source(source=self._cutouts_generator, num_outputs=2, cycle=True)
log_features = fn.erase(log_features, anchor=anchors, shape=shapes, axes=[0, 1], fill_value=0,
normalized_anchor=True)
# When modifying DALI pipeline returns, make sure you update `output_map` in DALIGenericIterator invocation
return log_features.gpu(), label.gpu(), log_features_len.gpu()
self.pipe_handle = dali_jasper_pipe(batch_size=batch_size, num_threads=num_threads, device_id=device_id)
def get_pipeline(self):
return self.pipe_handle
@classmethod
def from_config(cls, train_pipeline: bool, device_id, batch_size, file_root: str, file_list: str, config_data: dict,
config_features: dict, device_type: str = "gpu", do_resampling: bool = True,
num_cpu_threads=multiprocessing.cpu_count()):
max_duration = config_data['max_duration']
sample_rate = config_data['sample_rate']
silence_threshold = -60 if config_data['trim_silence'] else None
# TODO Take into account resampling probablity
# TODO config_features['speed_perturbation']['p']
if do_resampling and config_data['speed_perturbation'] is not None:
resample_range = [config_data['speed_perturbation']['min_rate'],
config_data['speed_perturbation']['max_rate']]
discrete_resample_range = config_data['speed_perturbation']['discrete']
else:
resample_range = None
discrete_resample_range = False
window_size = config_features['window_size']
window_stride = config_features['window_stride']
nfeatures = config_features['n_filt']
nfft = config_features['n_fft']
frame_splicing_factor = config_features['frame_splicing']
dither_coeff = config_features['dither']
pad_align = config_features['pad_align']
pad_to_max_duration = config_features['pad_to_max_duration']
assert not pad_to_max_duration, "Padding to max duration currently not supported in DALI"
preemph_coeff = .97
config_spec = config_features['spec_augment']
if config_spec is not None:
mask_time_num_regions = config_spec['time_masks']
mask_time_min = config_spec['min_time']
mask_time_max = config_spec['max_time']
mask_freq_num_regions = config_spec['freq_masks']
mask_freq_min = config_spec['min_freq']
mask_freq_max = config_spec['max_freq']
else:
mask_time_num_regions = 0
mask_time_min = 0
mask_time_max = 0
mask_freq_num_regions = 0
mask_freq_min = 0
mask_freq_max = 0
config_cutout = config_features['cutout_augment']
if config_cutout is not None:
mask_both_num_regions = config_cutout['masks']
mask_both_min_time = config_cutout['min_time']
mask_both_max_time = config_cutout['max_time']
mask_both_min_freq = config_cutout['min_freq']
mask_both_max_freq = config_cutout['max_freq']
else:
mask_both_num_regions = 0
mask_both_min_time = 0
mask_both_max_time = 0
mask_both_min_freq = 0
mask_both_max_freq = 0
inst = cls(train_pipeline=train_pipeline,
device_id=device_id,
preprocessing_device=device_type,
num_threads=num_cpu_threads,
batch_size=batch_size,
file_root=file_root,
file_list=file_list,
sample_rate=sample_rate,
discrete_resample_range=discrete_resample_range,
resample_range=resample_range,
window_size=window_size,
window_stride=window_stride,
nfeatures=nfeatures,
nfft=nfft,
frame_splicing_factor=frame_splicing_factor,
dither_coeff=dither_coeff,
silence_threshold=silence_threshold,
preemph_coeff=preemph_coeff,
pad_align=pad_align,
max_duration=max_duration,
mask_time_num_regions=mask_time_num_regions,
mask_time_min=mask_time_min,
mask_time_max=mask_time_max,
mask_freq_num_regions=mask_freq_num_regions,
mask_freq_min=mask_freq_min,
mask_freq_max=mask_freq_max,
mask_both_num_regions=mask_both_num_regions,
mask_both_min_time=mask_both_min_time,
mask_both_max_time=mask_both_max_time,
mask_both_min_freq=mask_both_min_freq,
mask_both_max_freq=mask_both_max_freq)
return inst.get_pipeline()
@staticmethod
def _dali_init_log(args: dict):
if (not torch.distributed.is_initialized() or (
torch.distributed.is_initialized() and torch.distributed.get_rank() == 0)): # print once
max_len = max([len(ii) for ii in args.keys()])
fmt_string = '\t%' + str(max_len) + 's : %s'
print('Initializing DALI with parameters:')
for keyPair in sorted(args.items()):
print(fmt_string % keyPair)
@staticmethod
def _div_ceil(dividend, divisor):
return (dividend + (divisor - 1)) // divisor
def _do_spectrogram_masking(self):
return self.mask_params['time_num_regions'] > 0 or self.mask_params['freq_num_regions'] > 0 or \
self.mask_params['both_num_regions'] > 0
@staticmethod
def _interleave_lists(*lists):
"""
[*, **, ***], [1, 2, 3], [a, b, c] -> [*, 1, a, **, 2, b, ***, 3, c]
Returns:
iterator over interleaved list
"""
assert all((len(lists[0]) == len(test_l) for test_l in lists)), "All lists have to have the same length"
return itertools.chain(*zip(*lists))
def _generate_cutouts(self):
"""
Returns:
Generates anchors and shapes of the cutout regions.
Single call generates one batch of data.
The output shall be passed to DALI's Erase operator
anchors = [f0 t0 f1 t1 ...]
shapes = [f0w t0h f1w t1h ...]
"""
MAX_TIME_DIMENSION = 20 * 16000
freq_anchors = np.random.random(self.mask_params['freq_num_regions'])
time_anchors = np.random.random(self.mask_params['time_num_regions'])
both_anchors_freq = np.random.random(self.mask_params['both_num_regions'])
both_anchors_time = np.random.random(self.mask_params['both_num_regions'])
anchors = []
for anch in freq_anchors:
anchors.extend([anch, 0])
for anch in time_anchors:
anchors.extend([0, anch])
for t, f in zip(both_anchors_time, both_anchors_freq):
anchors.extend([f, t])
shapes = []
shapes.extend(
self._interleave_lists(
np.random.randint(self.mask_params['freq_min'], self.mask_params['freq_max'] + 1,
self.mask_params['freq_num_regions']),
# XXX: Here, a time dimension of the spectrogram shall be passed.
# However, in DALI ArgumentInput can't come from GPU.
# So we leave the job for Erase (masking operator) to get it together.
[int(MAX_TIME_DIMENSION)] * self.mask_params['freq_num_regions']
)
)
shapes.extend(
self._interleave_lists(
[self.nfeatures] * self.mask_params['time_num_regions'],
np.random.randint(self.mask_params['time_min'], self.mask_params['time_max'] + 1,
self.mask_params['time_num_regions'])
)
)
shapes.extend(
self._interleave_lists(
np.random.randint(self.mask_params['both_min_freq'], self.mask_params['both_max_freq'] + 1,
self.mask_params['both_num_regions']),
np.random.randint(self.mask_params['both_min_time'], self.mask_params['both_max_time'] + 1,
self.mask_params['both_num_regions'])
)
)
return anchors, shapes
def _cutouts_generator(self):
"""
Generator, that wraps cutouts creation in order to randomize inputs
and allow passing them to DALI's ExternalSource operator
"""
def tuples2list(tuples: list):
"""
[(a, b), (c, d)] -> [[a, c], [b, d]]
"""
return map(list, zip(*tuples))
[anchors, shapes] = tuples2list([self._generate_cutouts() for _ in range(self.pipe_handle.max_batch_size)])
yield np.array(anchors, dtype=np.float32), np.array(shapes, dtype=np.float32)
class DaliTritonPipeline(DaliPipeline):
def __init__(self, **kwargs):
kwargs['is_triton_pipeline'] = True
super().__init__(**kwargs)
def serialize_dali_triton_pipeline(output_path: str, config_data: dict, config_features: dict):
pipe = DaliTritonPipeline.from_config(train_pipeline=False, device_id=-1, batch_size=-1, file_root=None,
file_list=None, config_data=config_data, config_features=config_features,
do_resampling=False, num_cpu_threads=-1)
pipe.serialize(filename=output_path)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/common/dali/pipeline.py |
import copy
import inspect
import typing
from ast import literal_eval
from contextlib import suppress
from numbers import Number
import yaml
from .model import JasperDecoderForCTC, JasperBlock, JasperEncoder
from common.audio import GainPerturbation, ShiftPerturbation, SpeedPerturbation
from common.dataset import AudioDataset
from common.features import CutoutAugment, FilterbankFeatures, SpecAugment
from common.helpers import print_once
def default_args(klass):
sig = inspect.signature(klass.__init__)
return {k: v.default for k,v in sig.parameters.items() if k != 'self'}
def load(fpath):
if fpath.endswith('.toml'):
raise ValueError('.toml config format has been changed to .yaml')
cfg = yaml.safe_load(open(fpath, 'r'))
# Reload to deep copy shallow copies, which were made with yaml anchors
yaml.Dumper.ignore_aliases = lambda *args: True
cfg = yaml.dump(cfg)
cfg = yaml.safe_load(cfg)
return cfg
def validate_and_fill(klass, user_conf, ignore_unk=[], optional=[]):
conf = default_args(klass)
for k,v in user_conf.items():
assert k in conf or k in ignore_unk, f'Unknown parameter {k} for {klass}'
conf[k] = v
# Keep only mandatory or optional-nonempty
conf = {k:v for k,v in conf.items()
if k not in optional or v is not inspect.Parameter.empty}
# Validate
for k,v in conf.items():
assert v is not inspect.Parameter.empty, \
f'Value for {k} not specified for {klass}'
return conf
def input(conf_yaml, split='train'):
conf = copy.deepcopy(conf_yaml[f'input_{split}'])
conf_dataset = conf.pop('audio_dataset')
conf_features = conf.pop('filterbank_features')
# Validate known inner classes
inner_classes = [
(conf_dataset, 'speed_perturbation', SpeedPerturbation),
(conf_dataset, 'gain_perturbation', GainPerturbation),
(conf_dataset, 'shift_perturbation', ShiftPerturbation),
(conf_features, 'spec_augment', SpecAugment),
(conf_features, 'cutout_augment', CutoutAugment),
]
for conf_tgt, key, klass in inner_classes:
if key in conf_tgt:
conf_tgt[key] = validate_and_fill(klass, conf_tgt[key])
for k in conf:
raise ValueError(f'Unknown key {k}')
# Validate outer classes
conf_dataset = validate_and_fill(
AudioDataset, conf_dataset,
optional=['data_dir', 'labels', 'manifest_fpaths'])
conf_features = validate_and_fill(
FilterbankFeatures, conf_features)
# Check params shared between classes
shared = ['sample_rate', 'max_duration', 'pad_to_max_duration']
for sh in shared:
assert conf_dataset[sh] == conf_features[sh], (
f'{sh} should match in Dataset and FeatureProcessor: '
f'{conf_dataset[sh]}, {conf_features[sh]}')
return conf_dataset, conf_features
def encoder(conf):
"""Validate config for JasperEncoder and subsequent JasperBlocks"""
# Validate, but don't overwrite with defaults
for blk in conf['jasper']['encoder']['blocks']:
validate_and_fill(JasperBlock, blk, optional=['infilters'],
ignore_unk=['residual_dense'])
return validate_and_fill(JasperEncoder, conf['jasper']['encoder'])
def decoder(conf, n_classes):
decoder_kw = {'n_classes': n_classes, **conf['jasper']['decoder']}
return validate_and_fill(JasperDecoderForCTC, decoder_kw)
def apply_config_overrides(conf, args):
if args.override_config is None:
return
for override_key_val in args.override_config:
key, val = override_key_val.split('=')
with suppress(TypeError, ValueError):
val = literal_eval(val)
apply_nested_config_override(conf, key, val)
def apply_nested_config_override(conf, key_str, val):
fields = key_str.split('.')
for f in fields[:-1]:
conf = conf[f]
f = fields[-1]
assert (f not in conf
or type(val) is type(conf[f])
or (isinstance(val, Number) and isinstance(conf[f], Number)))
conf[f] = val
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/jasper/config.py |
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
activations = {
"hardtanh": nn.Hardtanh,
"relu": nn.ReLU,
"selu": nn.SELU,
}
def init_weights(m, mode='xavier_uniform'):
if type(m) == nn.Conv1d or type(m) == MaskedConv1d:
if mode == 'xavier_uniform':
nn.init.xavier_uniform_(m.weight, gain=1.0)
elif mode == 'xavier_normal':
nn.init.xavier_normal_(m.weight, gain=1.0)
elif mode == 'kaiming_uniform':
nn.init.kaiming_uniform_(m.weight, nonlinearity="relu")
elif mode == 'kaiming_normal':
nn.init.kaiming_normal_(m.weight, nonlinearity="relu")
else:
raise ValueError("Unknown Initialization mode: {0}".format(mode))
elif type(m) == nn.BatchNorm1d:
if m.track_running_stats:
m.running_mean.zero_()
m.running_var.fill_(1)
m.num_batches_tracked.zero_()
if m.affine:
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
def get_same_padding(kernel_size, stride, dilation):
if stride > 1 and dilation > 1:
raise ValueError("Only stride OR dilation may be greater than 1")
return (kernel_size // 2) * dilation
class MaskedConv1d(nn.Conv1d):
"""1D convolution with sequence masking
"""
__constants__ = ["masked"]
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False, masked=True):
super(MaskedConv1d, self).__init__(
in_channels, out_channels, kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=bias)
self.masked = masked
def get_seq_len(self, lens):
# rounding_mode not available in 20.10 container
# return torch.div((lens + 2 * self.padding[0] - self.dilation[0]
# * (self.kernel_size[0] - 1) - 1), self.stride[0], rounding_mode="floor") + 1
return torch.floor((lens + 2 * self.padding[0] - self.dilation[0]
* (self.kernel_size[0] - 1) - 1) / self.stride[0]).long() + 1
def forward(self, x, x_lens=None):
if self.masked:
max_len = x.size(2)
idxs = torch.arange(max_len, dtype=x_lens.dtype, device=x_lens.device)
mask = idxs.expand(x_lens.size(0), max_len) >= x_lens.unsqueeze(1)
x = x.masked_fill(mask.unsqueeze(1).to(device=x.device), 0)
x_lens = self.get_seq_len(x_lens)
return super(MaskedConv1d, self).forward(x), x_lens
class JasperBlock(nn.Module):
__constants__ = ["use_conv_masks"]
"""Jasper Block. See https://arxiv.org/pdf/1904.03288.pdf
"""
def __init__(self, infilters, filters, repeat=3, kernel_size=11, stride=1,
dilation=1, padding='same', dropout=0.2, activation=None,
residual=True, residual_panes=[], use_conv_masks=False):
super(JasperBlock, self).__init__()
assert padding == "same", "Only 'same' padding is supported."
padding_val = get_same_padding(kernel_size[0], stride[0], dilation[0])
self.use_conv_masks = use_conv_masks
self.conv = nn.ModuleList()
for i in range(repeat):
self.conv.extend(self._conv_bn(infilters if i == 0 else filters,
filters,
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding_val))
if i < repeat - 1:
self.conv.extend(self._act_dropout(dropout, activation))
self.res = nn.ModuleList() if residual else None
res_panes = residual_panes.copy()
self.dense_residual = residual
if residual:
if len(residual_panes) == 0:
res_panes = [infilters]
self.dense_residual = False
for ip in res_panes:
self.res.append(nn.ModuleList(
self._conv_bn(ip, filters, kernel_size=1)))
self.out = nn.Sequential(*self._act_dropout(dropout, activation))
def _conv_bn(self, in_channels, out_channels, **kw):
return [MaskedConv1d(in_channels, out_channels,
masked=self.use_conv_masks, **kw),
nn.BatchNorm1d(out_channels, eps=1e-3, momentum=0.1)]
def _act_dropout(self, dropout=0.2, activation=None):
return [activation or nn.Hardtanh(min_val=0.0, max_val=20.0),
nn.Dropout(p=dropout)]
def forward(self, xs, xs_lens=None):
if not self.use_conv_masks:
xs_lens = 0
# forward convolutions
out = xs[-1]
lens = xs_lens
for i, l in enumerate(self.conv):
if isinstance(l, MaskedConv1d):
out, lens = l(out, lens)
else:
out = l(out)
# residuals
if self.res is not None:
for i, layer in enumerate(self.res):
res_out = xs[i]
for j, res_layer in enumerate(layer):
if j == 0: # and self.use_conv_mask:
res_out, _ = res_layer(res_out, xs_lens)
else:
res_out = res_layer(res_out)
out += res_out
# output
out = self.out(out)
if self.res is not None and self.dense_residual:
out = xs + [out]
else:
out = [out]
if self.use_conv_masks:
return out, lens
else:
return out, None
class JasperEncoder(nn.Module):
__constants__ = ["use_conv_masks"]
def __init__(self, in_feats, activation, frame_splicing=1,
init='xavier_uniform', use_conv_masks=False, blocks=[]):
super(JasperEncoder, self).__init__()
self.use_conv_masks = use_conv_masks
self.layers = nn.ModuleList()
in_feats *= frame_splicing
all_residual_panes = []
for i,blk in enumerate(blocks):
blk['activation'] = activations[activation]()
has_residual_dense = blk.pop('residual_dense', False)
if has_residual_dense:
all_residual_panes += [in_feats]
blk['residual_panes'] = all_residual_panes
else:
blk['residual_panes'] = []
self.layers.append(
JasperBlock(in_feats, use_conv_masks=use_conv_masks, **blk))
in_feats = blk['filters']
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, x, x_lens=None):
out, out_lens = [x], x_lens
for l in self.layers:
out, out_lens = l(out, out_lens)
return out, out_lens
class JasperDecoderForCTC(nn.Module):
def __init__(self, in_feats, n_classes, init='xavier_uniform'):
super(JasperDecoderForCTC, self).__init__()
self.layers = nn.Sequential(
nn.Conv1d(in_feats, n_classes, kernel_size=1, bias=True),)
self.apply(lambda x: init_weights(x, mode=init))
def forward(self, enc_out):
out = self.layers(enc_out[-1]).transpose(1, 2)
return F.log_softmax(out, dim=2)
class GreedyCTCDecoder(nn.Module):
@torch.no_grad()
def forward(self, log_probs, log_prob_lens=None):
if log_prob_lens is not None:
max_len = log_probs.size(1)
idxs = torch.arange(max_len, dtype=log_prob_lens.dtype,
device=log_prob_lens.device)
mask = idxs.unsqueeze(0) >= log_prob_lens.unsqueeze(1)
log_probs[:,:,-1] = log_probs[:,:,-1].masked_fill(mask, float("Inf"))
return log_probs.argmax(dim=-1, keepdim=False).int()
class Jasper(nn.Module):
def __init__(self, encoder_kw, decoder_kw, transpose_in=False):
super(Jasper, self).__init__()
self.transpose_in = transpose_in
self.encoder = JasperEncoder(**encoder_kw)
self.decoder = JasperDecoderForCTC(**decoder_kw)
def forward(self, x, x_lens=None):
if self.encoder.use_conv_masks:
assert x_lens is not None
enc, enc_lens = self.encoder(x, x_lens)
out = self.decoder(enc)
return out, enc_lens
else:
if self.transpose_in:
x = x.transpose(1, 2)
enc, _ = self.encoder(x)
out = self.decoder(enc)
return out # torchscript refuses to output None
# TODO Explicitly add x_lens=None for inference (now x can be a Tensor or tuple)
def infer(self, x, x_lens=None):
if self.encoder.use_conv_masks:
return self.forward(x, x_lens)
else:
ret = self.forward(x)
return ret, len(ret)
class CTCLossNM:
def __init__(self, n_classes):
self._criterion = nn.CTCLoss(blank=n_classes-1, reduction='none')
def __call__(self, log_probs, targets, input_length, target_length):
input_length = input_length.long()
target_length = target_length.long()
targets = targets.long()
loss = self._criterion(log_probs.transpose(1, 0), targets, input_length,
target_length)
# note that this is different from reduction = 'mean'
# because we are not dividing by target lengths
return torch.mean(loss)
| DeepLearningExamples-master | PyTorch/SpeechRecognition/Jasper/jasper/model.py |
#!/usr/bin/env python
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
os.environ['KMP_AFFINITY'] = 'disabled'
import argparse
import itertools
import logging
import sys
import warnings
from itertools import product
import dllogger
import numpy as np
import torch
import seq2seq.gpu_affinity as gpu_affinity
import seq2seq.utils as utils
from seq2seq.data.dataset import RawTextDataset
from seq2seq.data.dataset import SyntheticDataset
from seq2seq.data.tokenizer import Tokenizer
from seq2seq.inference import tables
from seq2seq.inference.translator import Translator
from seq2seq.models.gnmt import GNMT
def parse_args():
"""
Parse commandline arguments.
"""
def exclusive_group(group, name, default, help):
destname = name.replace('-', '_')
subgroup = group.add_mutually_exclusive_group(required=False)
subgroup.add_argument(f'--{name}', dest=f'{destname}',
action='store_true',
help=f'{help} (use \'--no-{name}\' to disable)')
subgroup.add_argument(f'--no-{name}', dest=f'{destname}',
action='store_false', help=argparse.SUPPRESS)
subgroup.set_defaults(**{destname: default})
parser = argparse.ArgumentParser(
description='GNMT Translate',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# dataset
dataset = parser.add_argument_group('data setup')
dataset.add_argument('-o', '--output', required=False,
help='full path to the output file \
if not specified, then the output will be printed')
dataset.add_argument('-r', '--reference', default=None,
help='full path to the file with reference \
translations (for sacrebleu, raw text)')
dataset.add_argument('-m', '--model', type=str, default=None,
help='full path to the model checkpoint file')
dataset.add_argument('--synthetic', action='store_true',
help='use synthetic dataset')
dataset.add_argument('--synthetic-batches', type=int, default=64,
help='number of synthetic batches to generate')
dataset.add_argument('--synthetic-vocab', type=int, default=32320,
help='size of synthetic vocabulary')
dataset.add_argument('--synthetic-len', type=int, default=50,
help='sequence length of synthetic samples')
source = dataset.add_mutually_exclusive_group(required=False)
source.add_argument('-i', '--input', required=False,
help='full path to the input file (raw text)')
source.add_argument('-t', '--input-text', nargs='+', required=False,
help='raw input text')
exclusive_group(group=dataset, name='sort', default=False,
help='sorts dataset by sequence length')
# parameters
params = parser.add_argument_group('inference setup')
params.add_argument('--batch-size', nargs='+', default=[128], type=int,
help='batch size per GPU')
params.add_argument('--beam-size', nargs='+', default=[5], type=int,
help='beam size')
params.add_argument('--max-seq-len', default=80, type=int,
help='maximum generated sequence length')
params.add_argument('--len-norm-factor', default=0.6, type=float,
help='length normalization factor')
params.add_argument('--cov-penalty-factor', default=0.1, type=float,
help='coverage penalty factor')
params.add_argument('--len-norm-const', default=5.0, type=float,
help='length normalization constant')
# general setup
general = parser.add_argument_group('general setup')
general.add_argument('--math', nargs='+', default=['fp16'],
choices=['fp16', 'fp32', 'tf32'], help='precision')
exclusive_group(group=general, name='env', default=False,
help='print info about execution env')
exclusive_group(group=general, name='bleu', default=True,
help='compares with reference translation and computes \
BLEU')
exclusive_group(group=general, name='cuda', default=True,
help='enables cuda')
exclusive_group(group=general, name='cudnn', default=True,
help='enables cudnn')
batch_first_parser = general.add_mutually_exclusive_group(required=False)
batch_first_parser.add_argument('--batch-first', dest='batch_first',
action='store_true',
help='uses (batch, seq, feature) data \
format for RNNs')
batch_first_parser.add_argument('--seq-first', dest='batch_first',
action='store_false',
help='uses (seq, batch, feature) data \
format for RNNs')
batch_first_parser.set_defaults(batch_first=True)
general.add_argument('--save-dir', default='gnmt',
help='path to directory with results, it will be \
automatically created if it does not exist')
general.add_argument('--dllog-file', type=str, default='eval_log.json',
help='Name of the DLLogger output file')
general.add_argument('--print-freq', '-p', default=1, type=int,
help='print log every PRINT_FREQ batches')
general.add_argument('--affinity', type=str,
default='single_unique',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
# benchmarking
benchmark = parser.add_argument_group('benchmark setup')
benchmark.add_argument('--target-perf', default=None, type=float,
help='target inference performance (in tokens \
per second)')
benchmark.add_argument('--target-bleu', default=None, type=float,
help='target accuracy')
benchmark.add_argument('--repeat', nargs='+', default=[1], type=float,
help='loops over the dataset REPEAT times, flag \
accepts multiple arguments, one for each specified \
batch size')
benchmark.add_argument('--warmup', default=0, type=int,
help='warmup iterations for performance counters')
benchmark.add_argument('--percentiles', nargs='+', type=int,
default=(90, 95, 99),
help='Percentiles for confidence intervals for \
throughput/latency benchmarks')
exclusive_group(group=benchmark, name='tables', default=False,
help='print accuracy, throughput and latency results in \
tables')
# distributed
distributed = parser.add_argument_group('distributed setup')
distributed.add_argument('--local_rank', type=int,
default=os.getenv('LOCAL_RANK', 0),
help='Used for multi-process training.')
args = parser.parse_args()
if args.input_text:
args.bleu = False
if args.bleu and args.reference is None:
parser.error('--bleu requires --reference')
if ('fp16' in args.math or 'tf32' in args.math) and not args.cuda:
parser.error(f'--math {args.math} requires --cuda')
if len(list(product(args.math, args.batch_size, args.beam_size))) > 1:
args.target_bleu = None
args.target_perf = None
args.repeat = dict(itertools.zip_longest(args.batch_size,
args.repeat,
fillvalue=1))
return args
def main():
"""
Launches translation (inference).
Inference is executed on a single GPU, implementation supports beam search
with length normalization and coverage penalty.
"""
args = parse_args()
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
device = utils.set_device(args.cuda, args.local_rank)
utils.init_distributed(args.cuda)
args.rank = utils.get_rank()
os.makedirs(args.save_dir, exist_ok=True)
utils.setup_logging()
dllog_file = os.path.join(args.save_dir, args.dllog_file)
utils.setup_dllogger(enabled=True, filename=dllog_file)
if args.env:
utils.log_env_info()
logging.info(f'Run arguments: {args}')
dllogger.log(step='PARAMETER', data=vars(args))
if not args.cuda and torch.cuda.is_available():
warnings.warn('cuda is available but not enabled')
if not args.cudnn:
torch.backends.cudnn.enabled = False
# load checkpoint and deserialize to CPU (to save GPU memory)
if args.model:
checkpoint = torch.load(args.model, map_location={'cuda:0': 'cpu'})
# build GNMT model
tokenizer = Tokenizer()
tokenizer.set_state(checkpoint['tokenizer'])
model_config = checkpoint['model_config']
model_config['batch_first'] = args.batch_first
model_config['vocab_size'] = tokenizer.vocab_size
model = GNMT(**model_config)
model.load_state_dict(checkpoint['state_dict'])
elif args.synthetic:
model = GNMT(args.synthetic_vocab, batch_first=args.batch_first)
tokenizer = None
else:
raise RuntimeError('Specify model either with --synthetic or with --model flag')
# construct the dataset
if args.input:
data = RawTextDataset(raw_datafile=args.input,
tokenizer=tokenizer,
sort=args.sort,
)
elif args.input_text:
data = RawTextDataset(raw_data=args.input_text,
tokenizer=tokenizer,
sort=args.sort,
)
elif args.synthetic:
data = SyntheticDataset(args.synthetic_vocab, args.synthetic_len, args.batch_size[0] * args.synthetic_batches)
latency_table = tables.LatencyTable(args.percentiles)
throughput_table = tables.ThroughputTable(args.percentiles)
accuracy_table = tables.AccuracyTable('BLEU')
dtype = {
'fp32': torch.FloatTensor,
'tf32': torch.FloatTensor,
'fp16': torch.HalfTensor
}
for (math, batch_size, beam_size) in product(args.math, args.batch_size,
args.beam_size):
logging.info(f'math: {math}, batch size: {batch_size}, '
f'beam size: {beam_size}')
model.type(dtype[math])
model = model.to(device)
model.eval()
# build the data loader
loader = data.get_loader(
batch_size=batch_size,
batch_first=args.batch_first,
pad=True,
repeat=args.repeat[batch_size],
num_workers=0,
)
# build the translator object
translator = Translator(
model=model,
tokenizer=tokenizer,
loader=loader,
beam_size=beam_size,
max_seq_len=args.max_seq_len,
len_norm_factor=args.len_norm_factor,
len_norm_const=args.len_norm_const,
cov_penalty_factor=args.cov_penalty_factor,
print_freq=args.print_freq,
)
# execute the inference
output, stats = translator.run(
calc_bleu=args.bleu,
eval_path=args.output,
summary=True,
warmup=args.warmup,
reference_path=args.reference,
)
# print translated outputs
if not args.synthetic and (not args.output and args.rank == 0):
logging.info(f'Translated output:')
for out in output:
print(out)
key = (batch_size, beam_size)
latency_table.add(key, {math: stats['runtimes']})
throughput_table.add(key, {math: stats['throughputs']})
accuracy_table.add(key, {math: stats['bleu']})
if args.tables:
accuracy_table.write('Inference accuracy', args.math)
if 'fp16' in args.math and 'fp32' in args.math:
relative = 'fp32'
elif 'fp16' in args.math and 'tf32' in args.math:
relative = 'tf32'
else:
relative = None
if 'fp32' in args.math:
throughput_table.write('Inference throughput', 'fp32')
if 'tf32' in args.math:
throughput_table.write('Inference throughput', 'tf32')
if 'fp16' in args.math:
throughput_table.write('Inference throughput', 'fp16',
relative=relative)
if 'fp32' in args.math:
latency_table.write('Inference latency', 'fp32')
if 'tf32' in args.math:
latency_table.write('Inference latency', 'tf32')
if 'fp16' in args.math:
latency_table.write('Inference latency', 'fp16',
relative=relative, reverse_speedup=True)
summary = {
'eval_throughput': stats['tokens_per_sec'],
'eval_bleu': stats['bleu'],
'eval_avg_latency': np.array(stats['runtimes']).mean(),
}
for p in args.percentiles:
summary[f'eval_{p}%_latency'] = np.percentile(stats['runtimes'], p)
dllogger.log(step=tuple(), data=summary)
passed = utils.benchmark(stats['bleu'], args.target_bleu,
stats['tokens_per_sec'], args.target_perf)
return passed
if __name__ == '__main__':
passed = main()
if not passed:
sys.exit(1)
| DeepLearningExamples-master | PyTorch/Translation/GNMT/translate.py |
#!/usr/bin/env python
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
os.environ['KMP_AFFINITY'] = 'disabled'
import argparse
import logging
import sys
import time
from ast import literal_eval
import dllogger
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data.distributed
import seq2seq.data.config as config
import seq2seq.gpu_affinity as gpu_affinity
import seq2seq.train.trainer as trainers
import seq2seq.utils as utils
from seq2seq.data.dataset import LazyParallelDataset
from seq2seq.data.dataset import ParallelDataset
from seq2seq.data.dataset import TextDataset
from seq2seq.data.tokenizer import Tokenizer
from seq2seq.inference.translator import Translator
from seq2seq.models.gnmt import GNMT
from seq2seq.train.smoothing import LabelSmoothing
from seq2seq.train.table import TrainingTable
def parse_args():
"""
Parse commandline arguments.
"""
def exclusive_group(group, name, default, help):
destname = name.replace('-', '_')
subgroup = group.add_mutually_exclusive_group(required=False)
subgroup.add_argument(f'--{name}', dest=f'{destname}',
action='store_true',
help=f'{help} (use \'--no-{name}\' to disable)')
subgroup.add_argument(f'--no-{name}', dest=f'{destname}',
action='store_false', help=argparse.SUPPRESS)
subgroup.set_defaults(**{destname: default})
parser = argparse.ArgumentParser(
description='GNMT training',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# dataset
dataset = parser.add_argument_group('dataset setup')
dataset.add_argument('--dataset-dir', default='data/wmt16_de_en',
help='path to the directory with training/test data')
dataset.add_argument('--src-lang',
default='en',
help='source language')
dataset.add_argument('--tgt-lang',
default='de',
help='target language')
dataset.add_argument('--vocab',
default='vocab.bpe.32000',
help='path to the vocabulary file \
(relative to DATASET_DIR directory)')
dataset.add_argument('-bpe', '--bpe-codes', default='bpe.32000',
help='path to the file with bpe codes \
(relative to DATASET_DIR directory)')
dataset.add_argument('--train-src',
default='train.tok.clean.bpe.32000.en',
help='path to the training source data file \
(relative to DATASET_DIR directory)')
dataset.add_argument('--train-tgt',
default='train.tok.clean.bpe.32000.de',
help='path to the training target data file \
(relative to DATASET_DIR directory)')
dataset.add_argument('--val-src',
default='newstest_dev.tok.clean.bpe.32000.en',
help='path to the validation source data file \
(relative to DATASET_DIR directory)')
dataset.add_argument('--val-tgt',
default='newstest_dev.tok.clean.bpe.32000.de',
help='path to the validation target data file \
(relative to DATASET_DIR directory)')
dataset.add_argument('--test-src',
default='newstest2014.tok.bpe.32000.en',
help='path to the test source data file \
(relative to DATASET_DIR directory)')
dataset.add_argument('--test-tgt',
default='newstest2014.de',
help='path to the test target data file \
(relative to DATASET_DIR directory)')
# results
results = parser.add_argument_group('results setup')
results.add_argument('--save-dir', default='gnmt',
help='path to directory with results, it will be \
automatically created if it does not exist')
results.add_argument('--print-freq', default=10, type=int,
help='print log every PRINT_FREQ batches')
results.add_argument('--warmup', default=1, type=int,
help='number of warmup iterations for performance \
counters')
# model
model = parser.add_argument_group('model setup')
model.add_argument('--hidden-size', default=1024, type=int,
help='hidden size of the model')
model.add_argument('--num-layers', default=4, type=int,
help='number of RNN layers in encoder and in decoder')
model.add_argument('--dropout', default=0.2, type=float,
help='dropout applied to input of RNN cells')
exclusive_group(group=model, name='share-embedding', default=True,
help='use shared embeddings for encoder and decoder')
model.add_argument('--smoothing', default=0.1, type=float,
help='label smoothing, if equal to zero model will use \
CrossEntropyLoss, if not zero model will be trained \
with label smoothing loss')
# setup
general = parser.add_argument_group('general setup')
general.add_argument('--math', default='fp16',
choices=['fp16', 'fp32', 'tf32', 'manual_fp16'],
help='precision')
general.add_argument('--seed', default=None, type=int,
help='master seed for random number generators, if \
"seed" is undefined then the master seed will be \
sampled from random.SystemRandom()')
general.add_argument('--prealloc-mode', default='always', type=str,
choices=['off', 'once', 'always'],
help='controls preallocation')
general.add_argument('--dllog-file', type=str, default='train_log.json',
help='Name of the DLLogger output file')
general.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
exclusive_group(group=general, name='eval', default=True,
help='run validation and test after every epoch')
exclusive_group(group=general, name='env', default=True,
help='print info about execution env')
exclusive_group(group=general, name='cuda', default=True,
help='enables cuda')
exclusive_group(group=general, name='cudnn', default=True,
help='enables cudnn')
exclusive_group(group=general, name='log-all-ranks', default=True,
help='enables logging from all distributed ranks, if \
disabled then only logs from rank 0 are reported')
# training
training = parser.add_argument_group('training setup')
dataset.add_argument('--train-max-size', default=None, type=int,
help='use at most TRAIN_MAX_SIZE elements from \
training dataset (useful for benchmarking), by \
default uses entire dataset')
training.add_argument('--train-batch-size', default=128, type=int,
help='training batch size per worker')
training.add_argument('--train-global-batch-size', default=None, type=int,
help='global training batch size, this argument \
does not have to be defined, if it is defined it \
will be used to automatically \
compute train_iter_size \
using the equation: train_iter_size = \
train_global_batch_size // (train_batch_size * \
world_size)')
training.add_argument('--train-iter-size', metavar='N', default=1,
type=int,
help='training iter size, training loop will \
accumulate gradients over N iterations and execute \
optimizer every N steps')
training.add_argument('--epochs', default=6, type=int,
help='max number of training epochs')
training.add_argument('--grad-clip', default=5.0, type=float,
help='enables gradient clipping and sets maximum \
norm of gradients')
training.add_argument('--train-max-length', default=50, type=int,
help='maximum sequence length for training \
(including special BOS and EOS tokens)')
training.add_argument('--train-min-length', default=0, type=int,
help='minimum sequence length for training \
(including special BOS and EOS tokens)')
training.add_argument('--train-loader-workers', default=2, type=int,
help='number of workers for training data loading')
training.add_argument('--batching', default='bucketing', type=str,
choices=['random', 'sharding', 'bucketing'],
help='select batching algorithm')
training.add_argument('--shard-size', default=80, type=int,
help='shard size for "sharding" batching algorithm, \
in multiples of global batch size')
training.add_argument('--num-buckets', default=5, type=int,
help='number of buckets for "bucketing" batching \
algorithm')
# optimizer
optimizer = parser.add_argument_group('optimizer setup')
optimizer.add_argument('--optimizer', type=str, default='Adam',
help='training optimizer')
optimizer.add_argument('--lr', type=float, default=2.00e-3,
help='learning rate')
optimizer.add_argument('--optimizer-extra', type=str,
default="{}",
help='extra options for the optimizer')
# mixed precision loss scaling
loss_scaling = parser.add_argument_group(
'mixed precision loss scaling setup'
)
loss_scaling.add_argument('--init-scale', type=float, default=8192,
help='initial loss scale')
loss_scaling.add_argument('--upscale-interval', type=float, default=128,
help='loss upscaling interval')
# scheduler
scheduler = parser.add_argument_group('learning rate scheduler setup')
scheduler.add_argument('--warmup-steps', type=str, default='200',
help='number of learning rate warmup iterations')
scheduler.add_argument('--remain-steps', type=str, default='0.666',
help='starting iteration for learning rate decay')
scheduler.add_argument('--decay-interval', type=str, default='None',
help='interval between learning rate decay steps')
scheduler.add_argument('--decay-steps', type=int, default=4,
help='max number of learning rate decay steps')
scheduler.add_argument('--decay-factor', type=float, default=0.5,
help='learning rate decay factor')
# validation
val = parser.add_argument_group('validation setup')
val.add_argument('--val-batch-size', default=64, type=int,
help='batch size for validation')
val.add_argument('--val-max-length', default=125, type=int,
help='maximum sequence length for validation \
(including special BOS and EOS tokens)')
val.add_argument('--val-min-length', default=0, type=int,
help='minimum sequence length for validation \
(including special BOS and EOS tokens)')
val.add_argument('--val-loader-workers', default=0, type=int,
help='number of workers for validation data loading')
# test
test = parser.add_argument_group('test setup')
test.add_argument('--test-batch-size', default=128, type=int,
help='batch size for test')
test.add_argument('--test-max-length', default=150, type=int,
help='maximum sequence length for test \
(including special BOS and EOS tokens)')
test.add_argument('--test-min-length', default=0, type=int,
help='minimum sequence length for test \
(including special BOS and EOS tokens)')
test.add_argument('--beam-size', default=5, type=int,
help='beam size')
test.add_argument('--len-norm-factor', default=0.6, type=float,
help='length normalization factor')
test.add_argument('--cov-penalty-factor', default=0.1, type=float,
help='coverage penalty factor')
test.add_argument('--len-norm-const', default=5.0, type=float,
help='length normalization constant')
test.add_argument('--intra-epoch-eval', metavar='N', default=0, type=int,
help='evaluate within training epoch, this option will \
enable extra N equally spaced evaluations executed \
during each training epoch')
test.add_argument('--test-loader-workers', default=0, type=int,
help='number of workers for test data loading')
# checkpointing
chkpt = parser.add_argument_group('checkpointing setup')
chkpt.add_argument('--start-epoch', default=0, type=int,
help='manually set initial epoch counter')
chkpt.add_argument('--resume', default=None, type=str, metavar='PATH',
help='resumes training from checkpoint from PATH')
chkpt.add_argument('--save-all', action='store_true', default=False,
help='saves checkpoint after every epoch')
chkpt.add_argument('--save-freq', default=5000, type=int,
help='save checkpoint every SAVE_FREQ batches')
chkpt.add_argument('--keep-checkpoints', default=0, type=int,
help='keep only last KEEP_CHECKPOINTS checkpoints, \
affects only checkpoints controlled by --save-freq \
option')
# benchmarking
benchmark = parser.add_argument_group('benchmark setup')
benchmark.add_argument('--target-perf', default=None, type=float,
help='target training performance (in tokens \
per second)')
benchmark.add_argument('--target-bleu', default=None, type=float,
help='target accuracy')
# distributed
distributed = parser.add_argument_group('distributed setup')
distributed.add_argument('--local_rank', type=int,
default=os.getenv('LOCAL_RANK', 0),
help='Used for multi-process training.')
args = parser.parse_args()
args.lang = {'src': args.src_lang, 'tgt': args.tgt_lang}
args.vocab = os.path.join(args.dataset_dir, args.vocab)
args.bpe_codes = os.path.join(args.dataset_dir, args.bpe_codes)
args.train_src = os.path.join(args.dataset_dir, args.train_src)
args.train_tgt = os.path.join(args.dataset_dir, args.train_tgt)
args.val_src = os.path.join(args.dataset_dir, args.val_src)
args.val_tgt = os.path.join(args.dataset_dir, args.val_tgt)
args.test_src = os.path.join(args.dataset_dir, args.test_src)
args.test_tgt = os.path.join(args.dataset_dir, args.test_tgt)
args.warmup_steps = literal_eval(args.warmup_steps)
args.remain_steps = literal_eval(args.remain_steps)
args.decay_interval = literal_eval(args.decay_interval)
return args
def set_iter_size(train_iter_size, train_global_batch_size, train_batch_size):
"""
Automatically set train_iter_size based on train_global_batch_size,
world_size and per-worker train_batch_size
:param train_global_batch_size: global training batch size
:param train_batch_size: local training batch size
"""
if train_global_batch_size is not None:
global_bs = train_global_batch_size
bs = train_batch_size
world_size = utils.get_world_size()
assert global_bs % (bs * world_size) == 0
train_iter_size = global_bs // (bs * world_size)
logging.info(f'Global batch size was set, '
f'Setting train_iter_size to {train_iter_size}')
return train_iter_size
def build_criterion(vocab_size, padding_idx, smoothing):
if smoothing == 0.:
logging.info(f'Building CrossEntropyLoss')
criterion = nn.CrossEntropyLoss(ignore_index=padding_idx, size_average=False)
else:
logging.info(f'Building LabelSmoothingLoss (smoothing: {smoothing})')
criterion = LabelSmoothing(padding_idx, smoothing)
return criterion
def main():
"""
Launches data-parallel multi-gpu training.
"""
training_start = time.time()
args = parse_args()
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
device = utils.set_device(args.cuda, args.local_rank)
utils.init_distributed(args.cuda)
args.rank = utils.get_rank()
if not args.cudnn:
torch.backends.cudnn.enabled = False
# create directory for results
os.makedirs(args.save_dir, exist_ok=True)
# setup logging
log_filename = f'log_rank_{utils.get_rank()}.log'
utils.setup_logging(args.log_all_ranks,
os.path.join(args.save_dir, log_filename))
dllog_file = os.path.join(args.save_dir, args.dllog_file)
utils.setup_dllogger(enabled=True, filename=dllog_file)
if args.env:
utils.log_env_info()
logging.info(f'Saving results to: {args.save_dir}')
logging.info(f'Run arguments: {args}')
dllogger.log(step='PARAMETER', data=vars(args))
args.train_iter_size = set_iter_size(args.train_iter_size,
args.train_global_batch_size,
args.train_batch_size)
worker_seeds, shuffling_seeds = utils.setup_seeds(args.seed,
args.epochs,
device)
worker_seed = worker_seeds[args.rank]
logging.info(f'Worker {args.rank} is using worker seed: {worker_seed}')
torch.manual_seed(worker_seed)
# build tokenizer
pad_vocab = utils.pad_vocabulary(args.math)
tokenizer = Tokenizer(args.vocab, args.bpe_codes, args.lang, pad_vocab)
# build datasets
train_data = LazyParallelDataset(
src_fname=args.train_src,
tgt_fname=args.train_tgt,
tokenizer=tokenizer,
min_len=args.train_min_length,
max_len=args.train_max_length,
sort=False,
max_size=args.train_max_size,
)
val_data = ParallelDataset(
src_fname=args.val_src,
tgt_fname=args.val_tgt,
tokenizer=tokenizer,
min_len=args.val_min_length,
max_len=args.val_max_length,
sort=True,
)
test_data = TextDataset(
src_fname=args.test_src,
tokenizer=tokenizer,
min_len=args.test_min_length,
max_len=args.test_max_length,
sort=True,
)
vocab_size = tokenizer.vocab_size
# build GNMT model
model_config = {'hidden_size': args.hidden_size,
'vocab_size': vocab_size,
'num_layers': args.num_layers,
'dropout': args.dropout,
'batch_first': False,
'share_embedding': args.share_embedding,
}
model = GNMT(**model_config).to(device)
logging.info(model)
batch_first = model.batch_first
# define loss function (criterion) and optimizer
criterion = build_criterion(vocab_size, config.PAD,
args.smoothing).to(device)
opt_config = {'optimizer': args.optimizer, 'lr': args.lr}
opt_config.update(literal_eval(args.optimizer_extra))
logging.info(f'Training optimizer config: {opt_config}')
scheduler_config = {'warmup_steps': args.warmup_steps,
'remain_steps': args.remain_steps,
'decay_interval': args.decay_interval,
'decay_steps': args.decay_steps,
'decay_factor': args.decay_factor}
logging.info(f'Training LR schedule config: {scheduler_config}')
num_parameters = sum([l.nelement() for l in model.parameters()])
logging.info(f'Number of parameters: {num_parameters}')
batching_opt = {'shard_size': args.shard_size,
'num_buckets': args.num_buckets}
# get data loaders
train_loader = train_data.get_loader(batch_size=args.train_batch_size,
seeds=shuffling_seeds,
batch_first=batch_first,
shuffle=True,
batching=args.batching,
batching_opt=batching_opt,
num_workers=args.train_loader_workers)
val_loader = val_data.get_loader(batch_size=args.val_batch_size,
batch_first=batch_first,
shuffle=False,
num_workers=args.val_loader_workers)
test_loader = test_data.get_loader(batch_size=args.test_batch_size,
batch_first=batch_first,
shuffle=False,
pad=True,
num_workers=args.test_loader_workers)
translator = Translator(model=model,
tokenizer=tokenizer,
loader=test_loader,
beam_size=args.beam_size,
max_seq_len=args.test_max_length,
len_norm_factor=args.len_norm_factor,
len_norm_const=args.len_norm_const,
cov_penalty_factor=args.cov_penalty_factor,
print_freq=args.print_freq,
reference=args.test_tgt,
)
# create trainer
total_train_iters = len(train_loader) // args.train_iter_size * args.epochs
save_info = {
'model_config': model_config,
'config': args,
'tokenizer': tokenizer.get_state()
}
loss_scaling = {
'init_scale': args.init_scale,
'upscale_interval': args.upscale_interval
}
trainer_options = dict(
model=model,
criterion=criterion,
grad_clip=args.grad_clip,
iter_size=args.train_iter_size,
save_dir=args.save_dir,
save_freq=args.save_freq,
save_info=save_info,
opt_config=opt_config,
scheduler_config=scheduler_config,
train_iterations=total_train_iters,
keep_checkpoints=args.keep_checkpoints,
math=args.math,
loss_scaling=loss_scaling,
print_freq=args.print_freq,
intra_epoch_eval=args.intra_epoch_eval,
translator=translator,
prealloc_mode=args.prealloc_mode,
warmup=args.warmup,
)
trainer = trainers.Seq2SeqTrainer(**trainer_options)
# optionally resume from a checkpoint
if args.resume:
checkpoint_file = args.resume
if os.path.isdir(checkpoint_file):
checkpoint_file = os.path.join(checkpoint_file, 'model_best.pth')
if os.path.isfile(checkpoint_file):
trainer.load(checkpoint_file)
else:
logging.error(f'No checkpoint found at {args.resume}')
# training loop
train_loss = float('inf')
val_loss = float('inf')
best_loss = float('inf')
training_perf = []
break_training = False
test_bleu = None
for epoch in range(args.start_epoch, args.epochs):
logging.info(f'Starting epoch {epoch}')
train_loader.sampler.set_epoch(epoch)
trainer.epoch = epoch
train_loss, train_perf = trainer.optimize(train_loader)
training_perf.append(train_perf)
# evaluate on validation set
if args.eval:
logging.info(f'Running validation on dev set')
val_loss, val_perf = trainer.evaluate(val_loader)
# remember best prec@1 and save checkpoint
if args.rank == 0:
is_best = val_loss < best_loss
best_loss = min(val_loss, best_loss)
trainer.save(save_all=args.save_all, is_best=is_best)
if args.eval:
utils.barrier()
eval_fname = f'eval_epoch_{epoch}'
eval_path = os.path.join(args.save_dir, eval_fname)
_, eval_stats = translator.run(
calc_bleu=True,
epoch=epoch,
eval_path=eval_path,
)
test_bleu = eval_stats['bleu']
if args.target_bleu and test_bleu >= args.target_bleu:
logging.info(f'Target accuracy reached')
break_training = True
acc_log = []
acc_log += [f'Summary: Epoch: {epoch}']
acc_log += [f'Training Loss: {train_loss:.4f}']
if args.eval:
acc_log += [f'Validation Loss: {val_loss:.4f}']
acc_log += [f'Test BLEU: {test_bleu:.2f}']
perf_log = []
perf_log += [f'Performance: Epoch: {epoch}']
perf_log += [f'Training: {train_perf:.0f} Tok/s']
if args.eval:
perf_log += [f'Validation: {val_perf:.0f} Tok/s']
if args.rank == 0:
logging.info('\t'.join(acc_log))
logging.info('\t'.join(perf_log))
logging.info(f'Finished epoch {epoch}')
if break_training:
break
utils.barrier()
training_stop = time.time()
training_time = training_stop - training_start
logging.info(f'Total training time {training_time:.0f} s')
table = TrainingTable()
avg_training_perf = len(training_perf) / sum(1 / v for v in training_perf)
table.add(utils.get_world_size(), args.train_batch_size, test_bleu,
avg_training_perf, training_time)
if utils.get_rank() == 0:
table.write('Training Summary', args.math)
summary = {
'val_loss': val_loss,
'train_loss': train_loss,
'train_throughput': avg_training_perf,
'train_elapsed': training_time,
'test_bleu': test_bleu,
}
dllogger.log(step=tuple(), data=summary)
passed = utils.benchmark(test_bleu, args.target_bleu,
train_perf, args.target_perf)
if not passed:
sys.exit(1)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/train.py |
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/gpu_affinity.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging.config
import os
import random
import sys
import time
from contextlib import contextmanager
import dllogger
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.init as init
import torch.utils.collect_env
def init_lstm_(lstm, init_weight=0.1):
"""
Initializes weights of LSTM layer.
Weights and biases are initialized with uniform(-init_weight, init_weight)
distribution.
:param lstm: instance of torch.nn.LSTM
:param init_weight: range for the uniform initializer
"""
# Initialize hidden-hidden weights
init.uniform_(lstm.weight_hh_l0.data, -init_weight, init_weight)
# Initialize input-hidden weights:
init.uniform_(lstm.weight_ih_l0.data, -init_weight, init_weight)
# Initialize bias. PyTorch LSTM has two biases, one for input-hidden GEMM
# and the other for hidden-hidden GEMM. Here input-hidden bias is
# initialized with uniform distribution and hidden-hidden bias is
# initialized with zeros.
init.uniform_(lstm.bias_ih_l0.data, -init_weight, init_weight)
init.zeros_(lstm.bias_hh_l0.data)
if lstm.bidirectional:
init.uniform_(lstm.weight_hh_l0_reverse.data, -init_weight, init_weight)
init.uniform_(lstm.weight_ih_l0_reverse.data, -init_weight, init_weight)
init.uniform_(lstm.bias_ih_l0_reverse.data, -init_weight, init_weight)
init.zeros_(lstm.bias_hh_l0_reverse.data)
def generate_seeds(rng, size):
"""
Generate list of random seeds
:param rng: random number generator
:param size: length of the returned list
"""
seeds = [rng.randint(0, 2**32 - 1) for _ in range(size)]
return seeds
def broadcast_seeds(seeds, device):
"""
Broadcasts random seeds to all distributed workers.
Returns list of random seeds (broadcasted from workers with rank 0).
:param seeds: list of seeds (integers)
:param device: torch.device
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
seeds_tensor = torch.tensor(seeds, dtype=torch.int64, device=device)
torch.distributed.broadcast(seeds_tensor, 0)
seeds = seeds_tensor.tolist()
return seeds
def setup_seeds(master_seed, epochs, device):
"""
Generates seeds from one master_seed.
Function returns (worker_seeds, shuffling_seeds), worker_seeds are later
used to initialize per-worker random number generators (mostly for
dropouts), shuffling_seeds are for RNGs resposible for reshuffling the
dataset before each epoch.
Seeds are generated on worker with rank 0 and broadcasted to all other
workers.
:param master_seed: master RNG seed used to initialize other generators
:param epochs: number of epochs
:param device: torch.device (used for distributed.broadcast)
"""
if master_seed is None:
# random master seed, random.SystemRandom() uses /dev/urandom on Unix
master_seed = random.SystemRandom().randint(0, 2**32 - 1)
if get_rank() == 0:
# master seed is reported only from rank=0 worker, it's to avoid
# confusion, seeds from rank=0 are later broadcasted to other
# workers
logging.info(f'Using random master seed: {master_seed}')
else:
# master seed was specified from command line
logging.info(f'Using master seed from command line: {master_seed}')
# initialize seeding RNG
seeding_rng = random.Random(master_seed)
# generate worker seeds, one seed for every distributed worker
worker_seeds = generate_seeds(seeding_rng, get_world_size())
# generate seeds for data shuffling, one seed for every epoch
shuffling_seeds = generate_seeds(seeding_rng, epochs)
# broadcast seeds from rank=0 to other workers
worker_seeds = broadcast_seeds(worker_seeds, device)
shuffling_seeds = broadcast_seeds(shuffling_seeds, device)
return worker_seeds, shuffling_seeds
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use, else calls
torch.cuda.synchronize() if CUDA is initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
elif torch.cuda.is_available() and torch.cuda.is_initialized():
torch.cuda.synchronize()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
@contextmanager
def timer(name, ndigits=2, sync_gpu=True):
if sync_gpu:
torch.cuda.synchronize()
start = time.time()
yield
if sync_gpu:
torch.cuda.synchronize()
stop = time.time()
elapsed = round(stop - start, ndigits)
logging.info(f'TIMER {name} {elapsed}')
def setup_logging(log_all_ranks=True, log_file=os.devnull):
"""
Configures logging.
By default logs from all workers are printed to the console, entries are
prefixed with "N: " where N is the rank of the worker. Logs printed to the
console don't include timestaps.
Full logs with timestamps are saved to the log_file file.
"""
class RankFilter(logging.Filter):
def __init__(self, rank, log_all_ranks):
self.rank = rank
self.log_all_ranks = log_all_ranks
def filter(self, record):
record.rank = self.rank
if self.log_all_ranks:
return True
else:
return (self.rank == 0)
rank = get_rank()
rank_filter = RankFilter(rank, log_all_ranks)
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
handler.close()
logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s"
logging.basicConfig(level=logging.DEBUG,
format=logging_format,
datefmt="%Y-%m-%d %H:%M:%S",
filename=log_file,
filemode='w')
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(rank)s: %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logging.getLogger('').addFilter(rank_filter)
def setup_dllogger(enabled=True, filename=os.devnull):
rank = get_rank()
if enabled and rank == 0:
backends = [
dllogger.JSONStreamBackend(
dllogger.Verbosity.VERBOSE,
filename,
),
]
dllogger.init(backends)
else:
dllogger.init([])
dllogger.metadata("test_bleu", {"unit": None})
dllogger.metadata("eval_90%_latency", {"unit": "ms"})
dllogger.metadata("eval_avg_latency", {"unit": "ms"})
dllogger.metadata("train_elapsed", {"unit": "s"})
dllogger.metadata("eval_throughput", {"unit": "tokens/s"})
dllogger.metadata("train_throughput", {"unit": "tokens/s"})
def set_device(cuda, local_rank):
"""
Sets device based on local_rank and returns instance of torch.device.
:param cuda: if True: use cuda
:param local_rank: local rank of the worker
"""
if cuda:
torch.cuda.set_device(local_rank)
device = torch.device('cuda')
else:
device = torch.device('cpu')
return device
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
dist.init_process_group(backend=backend,
init_method='env://')
assert dist.is_initialized()
return distributed
def log_env_info():
"""
Prints information about execution environment.
"""
logging.info('Collecting environment information...')
env_info = torch.utils.collect_env.get_pretty_env_info()
logging.info(f'{env_info}')
def pad_vocabulary(math):
if math == 'tf32' or math == 'fp16' or math == 'manual_fp16':
pad_vocab = 8
elif math == 'fp32':
pad_vocab = 1
return pad_vocab
def benchmark(test_acc, target_acc, test_perf, target_perf):
def test(achieved, target, name):
passed = True
if target is not None and achieved is not None:
logging.info(f'{name} achieved: {achieved:.2f} '
f'target: {target:.2f}')
if achieved >= target:
logging.info(f'{name} test passed')
else:
logging.info(f'{name} test failed')
passed = False
return passed
passed = True
passed &= test(test_acc, target_acc, 'Accuracy')
passed &= test(test_perf, target_perf, 'Performance')
return passed
def debug_tensor(tensor, name):
"""
Simple utility which helps with debugging.
Takes a tensor and outputs: min, max, avg, std, number of NaNs, number of
INFs.
:param tensor: torch tensor
:param name: name of the tensor (only for logging)
"""
logging.info(name)
tensor = tensor.detach().float().cpu().numpy()
logging.info(f'MIN: {tensor.min()} MAX: {tensor.max()} '
f'AVG: {tensor.mean()} STD: {tensor.std()} '
f'NAN: {np.isnan(tensor).sum()} INF: {np.isinf(tensor).sum()}')
class AverageMeter:
"""
Computes and stores the average and current value
"""
def __init__(self, warmup=0, keep=False):
self.reset()
self.warmup = warmup
self.keep = keep
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
self.iters = 0
self.vals = []
def update(self, val, n=1):
self.iters += 1
self.val = val
if self.iters > self.warmup:
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if self.keep:
self.vals.append(val)
def reduce(self, op):
"""
Reduces average value over all workers.
:param op: 'sum' or 'mean', reduction operator
"""
if op not in ('sum', 'mean'):
raise NotImplementedError
distributed = (get_world_size() > 1)
if distributed:
backend = dist.get_backend()
cuda = (backend == dist.Backend.NCCL)
if cuda:
avg = torch.cuda.FloatTensor([self.avg])
_sum = torch.cuda.FloatTensor([self.sum])
else:
avg = torch.FloatTensor([self.avg])
_sum = torch.FloatTensor([self.sum])
dist.all_reduce(avg)
dist.all_reduce(_sum)
self.avg = avg.item()
self.sum = _sum.item()
if op == 'mean':
self.avg /= get_world_size()
self.sum /= get_world_size()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/utils.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import itertools
import torch
import torch.nn as nn
import seq2seq.data.config as config
from seq2seq.models.attention import BahdanauAttention
from seq2seq.utils import init_lstm_
class RecurrentAttention(nn.Module):
"""
LSTM wrapped with an attention module.
"""
def __init__(self, input_size=1024, context_size=1024, hidden_size=1024,
num_layers=1, batch_first=False, dropout=0.2,
init_weight=0.1):
"""
Constructor for the RecurrentAttention.
:param input_size: number of features in input tensor
:param context_size: number of features in output from encoder
:param hidden_size: internal hidden size
:param num_layers: number of layers in LSTM
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param dropout: probability of dropout (on input to LSTM layer)
:param init_weight: range for the uniform initializer
"""
super(RecurrentAttention, self).__init__()
self.rnn = nn.LSTM(input_size, hidden_size, num_layers, bias=True,
batch_first=batch_first)
init_lstm_(self.rnn, init_weight)
self.attn = BahdanauAttention(hidden_size, context_size, context_size,
normalize=True, batch_first=batch_first)
self.dropout = nn.Dropout(dropout)
def forward(self, inputs, hidden, context, context_len):
"""
Execute RecurrentAttention.
:param inputs: tensor with inputs
:param hidden: hidden state for LSTM layer
:param context: context tensor from encoder
:param context_len: vector of encoder sequence lengths
:returns (rnn_outputs, hidden, attn_output, attn_scores)
"""
# set attention mask, sequences have different lengths, this mask
# allows to include only valid elements of context in attention's
# softmax
self.attn.set_mask(context_len, context)
inputs = self.dropout(inputs)
rnn_outputs, hidden = self.rnn(inputs, hidden)
attn_outputs, scores = self.attn(rnn_outputs, context)
return rnn_outputs, hidden, attn_outputs, scores
class Classifier(nn.Module):
"""
Fully-connected classifier
"""
def __init__(self, in_features, out_features, init_weight=0.1):
"""
Constructor for the Classifier.
:param in_features: number of input features
:param out_features: number of output features (size of vocabulary)
:param init_weight: range for the uniform initializer
"""
super(Classifier, self).__init__()
self.classifier = nn.Linear(in_features, out_features)
nn.init.uniform_(self.classifier.weight.data, -init_weight, init_weight)
nn.init.uniform_(self.classifier.bias.data, -init_weight, init_weight)
def forward(self, x):
"""
Execute the classifier.
:param x: output from decoder
"""
out = self.classifier(x)
return out
class ResidualRecurrentDecoder(nn.Module):
"""
Decoder with Embedding, LSTM layers, attention, residual connections and
optinal dropout.
Attention implemented in this module is different than the attention
discussed in the GNMT arxiv paper. In this model the output from the first
LSTM layer of the decoder goes into the attention module, then the
re-weighted context is concatenated with inputs to all subsequent LSTM
layers in the decoder at the current timestep.
Residual connections are enabled after 3rd LSTM layer, dropout is applied
on inputs to LSTM layers.
"""
def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2,
batch_first=False, embedder=None, init_weight=0.1):
"""
Constructor of the ResidualRecurrentDecoder.
:param vocab_size: size of vocabulary
:param hidden_size: hidden size for LSMT layers
:param num_layers: number of LSTM layers
:param dropout: probability of dropout (on input to LSTM layers)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param embedder: instance of nn.Embedding, if None constructor will
create new embedding layer
:param init_weight: range for the uniform initializer
"""
super(ResidualRecurrentDecoder, self).__init__()
self.num_layers = num_layers
self.att_rnn = RecurrentAttention(hidden_size, hidden_size,
hidden_size, num_layers=1,
batch_first=batch_first,
dropout=dropout)
self.rnn_layers = nn.ModuleList()
for _ in range(num_layers - 1):
self.rnn_layers.append(
nn.LSTM(2 * hidden_size, hidden_size, num_layers=1, bias=True,
batch_first=batch_first))
for lstm in self.rnn_layers:
init_lstm_(lstm, init_weight)
if embedder is not None:
self.embedder = embedder
else:
self.embedder = nn.Embedding(vocab_size, hidden_size,
padding_idx=config.PAD)
nn.init.uniform_(self.embedder.weight.data, -init_weight,
init_weight)
self.classifier = Classifier(hidden_size, vocab_size)
self.dropout = nn.Dropout(p=dropout)
def init_hidden(self, hidden):
"""
Converts flattened hidden state (from sequence generator) into a tuple
of hidden states.
:param hidden: None or flattened hidden state for decoder RNN layers
"""
if hidden is not None:
# per-layer chunks
hidden = hidden.chunk(self.num_layers)
# (h, c) chunks for LSTM layer
hidden = tuple(i.chunk(2) for i in hidden)
else:
hidden = [None] * self.num_layers
self.next_hidden = []
return hidden
def append_hidden(self, h):
"""
Appends the hidden vector h to the list of internal hidden states.
:param h: hidden vector
"""
if self.inference:
self.next_hidden.append(h)
def package_hidden(self):
"""
Flattens the hidden state from all LSTM layers into one tensor (for
the sequence generator).
"""
if self.inference:
hidden = torch.cat(tuple(itertools.chain(*self.next_hidden)))
else:
hidden = None
return hidden
def forward(self, inputs, context, inference=False):
"""
Execute the decoder.
:param inputs: tensor with inputs to the decoder
:param context: state of encoder, encoder sequence lengths and hidden
state of decoder's LSTM layers
:param inference: if True stores and repackages hidden state
"""
self.inference = inference
enc_context, enc_len, hidden = context
hidden = self.init_hidden(hidden)
x = self.embedder(inputs)
x, h, attn, scores = self.att_rnn(x, hidden[0], enc_context, enc_len)
self.append_hidden(h)
x = torch.cat((x, attn), dim=2)
x = self.dropout(x)
x, h = self.rnn_layers[0](x, hidden[1])
self.append_hidden(h)
for i in range(1, len(self.rnn_layers)):
residual = x
x = torch.cat((x, attn), dim=2)
x = self.dropout(x)
x, h = self.rnn_layers[i](x, hidden[i + 1])
self.append_hidden(h)
x = x + residual
x = self.classifier(x)
hidden = self.package_hidden()
return x, scores, [enc_context, enc_len, hidden]
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/models/decoder.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
import seq2seq.data.config as config
from seq2seq.models.decoder import ResidualRecurrentDecoder
from seq2seq.models.encoder import ResidualRecurrentEncoder
from seq2seq.models.seq2seq_base import Seq2Seq
class GNMT(Seq2Seq):
"""
GNMT v2 model
"""
def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2,
batch_first=False, share_embedding=True):
"""
Constructor for the GNMT v2 model.
:param vocab_size: size of vocabulary (number of tokens)
:param hidden_size: internal hidden size of the model
:param num_layers: number of layers, applies to both encoder and
decoder
:param dropout: probability of dropout (in encoder and decoder)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param share_embedding: if True embeddings are shared between encoder
and decoder
"""
super(GNMT, self).__init__(batch_first=batch_first)
if share_embedding:
embedder = nn.Embedding(vocab_size, hidden_size,
padding_idx=config.PAD)
nn.init.uniform_(embedder.weight.data, -0.1, 0.1)
else:
embedder = None
self.encoder = ResidualRecurrentEncoder(vocab_size, hidden_size,
num_layers, dropout,
batch_first, embedder)
self.decoder = ResidualRecurrentDecoder(vocab_size, hidden_size,
num_layers, dropout,
batch_first, embedder)
def forward(self, input_encoder, input_enc_len, input_decoder):
context = self.encode(input_encoder, input_enc_len)
context = (context, input_enc_len, None)
output, _, _ = self.decode(input_decoder, context)
return output
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/models/gnmt.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class BahdanauAttention(nn.Module):
"""
Bahdanau Attention (https://arxiv.org/abs/1409.0473)
Implementation is very similar to tf.contrib.seq2seq.BahdanauAttention
"""
def __init__(self, query_size, key_size, num_units, normalize=False,
batch_first=False, init_weight=0.1):
"""
Constructor for the BahdanauAttention.
:param query_size: feature dimension for query
:param key_size: feature dimension for keys
:param num_units: internal feature dimension
:param normalize: whether to normalize energy term
:param batch_first: if True batch size is the 1st dimension, if False
the sequence is first and batch size is second
:param init_weight: range for uniform initializer used to initialize
Linear key and query transform layers and linear_att vector
"""
super(BahdanauAttention, self).__init__()
self.normalize = normalize
self.batch_first = batch_first
self.num_units = num_units
self.linear_q = nn.Linear(query_size, num_units, bias=False)
self.linear_k = nn.Linear(key_size, num_units, bias=False)
nn.init.uniform_(self.linear_q.weight.data, -init_weight, init_weight)
nn.init.uniform_(self.linear_k.weight.data, -init_weight, init_weight)
self.linear_att = Parameter(torch.Tensor(num_units))
self.mask = None
if self.normalize:
self.normalize_scalar = Parameter(torch.Tensor(1))
self.normalize_bias = Parameter(torch.Tensor(num_units))
else:
self.register_parameter('normalize_scalar', None)
self.register_parameter('normalize_bias', None)
self.reset_parameters(init_weight)
def reset_parameters(self, init_weight):
"""
Sets initial random values for trainable parameters.
"""
stdv = 1. / math.sqrt(self.num_units)
self.linear_att.data.uniform_(-init_weight, init_weight)
if self.normalize:
self.normalize_scalar.data.fill_(stdv)
self.normalize_bias.data.zero_()
def set_mask(self, context_len, context):
"""
sets self.mask which is applied before softmax
ones for inactive context fields, zeros for active context fields
:param context_len: b
:param context: if batch_first: (b x t_k x n) else: (t_k x b x n)
self.mask: (b x t_k)
"""
if self.batch_first:
max_len = context.size(1)
else:
max_len = context.size(0)
indices = torch.arange(0, max_len, dtype=torch.int64,
device=context.device)
self.mask = indices >= (context_len.unsqueeze(1))
def calc_score(self, att_query, att_keys):
"""
Calculate Bahdanau score
:param att_query: b x t_q x n
:param att_keys: b x t_k x n
returns: b x t_q x t_k scores
"""
b, t_k, n = att_keys.size()
t_q = att_query.size(1)
att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n)
att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n)
sum_qk = att_query + att_keys
if self.normalize:
sum_qk = sum_qk + self.normalize_bias
linear_att = self.linear_att / self.linear_att.norm()
linear_att = linear_att * self.normalize_scalar
else:
linear_att = self.linear_att
out = torch.tanh(sum_qk).matmul(linear_att)
return out
def forward(self, query, keys):
"""
:param query: if batch_first: (b x t_q x n) else: (t_q x b x n)
:param keys: if batch_first: (b x t_k x n) else (t_k x b x n)
:returns: (context, scores_normalized)
context: if batch_first: (b x t_q x n) else (t_q x b x n)
scores_normalized: if batch_first (b x t_q x t_k) else (t_q x b x t_k)
"""
# first dim of keys and query has to be 'batch', it's needed for bmm
if not self.batch_first:
keys = keys.transpose(0, 1)
if query.dim() == 3:
query = query.transpose(0, 1)
if query.dim() == 2:
single_query = True
query = query.unsqueeze(1)
else:
single_query = False
b = query.size(0)
t_k = keys.size(1)
t_q = query.size(1)
# FC layers to transform query and key
processed_query = self.linear_q(query)
processed_key = self.linear_k(keys)
# scores: (b x t_q x t_k)
scores = self.calc_score(processed_query, processed_key)
if self.mask is not None:
mask = self.mask.unsqueeze(1).expand(b, t_q, t_k)
# I can't use -INF because of overflow check in pytorch
scores.masked_fill_(mask, -65504.0)
# Normalize the scores, softmax over t_k
scores_normalized = F.softmax(scores, dim=-1)
# Calculate the weighted average of the attention inputs according to
# the scores
# context: (b x t_q x n)
context = torch.bmm(scores_normalized, keys)
if single_query:
context = context.squeeze(1)
scores_normalized = scores_normalized.squeeze(1)
elif not self.batch_first:
context = context.transpose(0, 1)
scores_normalized = scores_normalized.transpose(0, 1)
return context, scores_normalized
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/models/attention.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.nn.utils.rnn import pad_packed_sequence
import seq2seq.data.config as config
from seq2seq.utils import init_lstm_
class ResidualRecurrentEncoder(nn.Module):
"""
Encoder with Embedding, LSTM layers, residual connections and optional
dropout.
The first LSTM layer is bidirectional and uses variable sequence length
API, the remaining (num_layers-1) layers are unidirectional. Residual
connections are enabled after third LSTM layer, dropout is applied on
inputs to LSTM layers.
"""
def __init__(self, vocab_size, hidden_size=1024, num_layers=4, dropout=0.2,
batch_first=False, embedder=None, init_weight=0.1):
"""
Constructor for the ResidualRecurrentEncoder.
:param vocab_size: size of vocabulary
:param hidden_size: hidden size for LSTM layers
:param num_layers: number of LSTM layers, 1st layer is bidirectional
:param dropout: probability of dropout (on input to LSTM layers)
:param batch_first: if True the model uses (batch,seq,feature) tensors,
if false the model uses (seq, batch, feature)
:param embedder: instance of nn.Embedding, if None constructor will
create new embedding layer
:param init_weight: range for the uniform initializer
"""
super(ResidualRecurrentEncoder, self).__init__()
self.batch_first = batch_first
self.rnn_layers = nn.ModuleList()
# 1st LSTM layer, bidirectional
self.rnn_layers.append(
nn.LSTM(hidden_size, hidden_size, num_layers=1, bias=True,
batch_first=batch_first, bidirectional=True))
# 2nd LSTM layer, with 2x larger input_size
self.rnn_layers.append(
nn.LSTM((2 * hidden_size), hidden_size, num_layers=1, bias=True,
batch_first=batch_first))
# Remaining LSTM layers
for _ in range(num_layers - 2):
self.rnn_layers.append(
nn.LSTM(hidden_size, hidden_size, num_layers=1, bias=True,
batch_first=batch_first))
for lstm in self.rnn_layers:
init_lstm_(lstm, init_weight)
self.dropout = nn.Dropout(p=dropout)
if embedder is not None:
self.embedder = embedder
else:
self.embedder = nn.Embedding(vocab_size, hidden_size,
padding_idx=config.PAD)
nn.init.uniform_(self.embedder.weight.data, -init_weight,
init_weight)
def forward(self, inputs, lengths):
"""
Execute the encoder.
:param inputs: tensor with indices from the vocabulary
:param lengths: vector with sequence lengths (excluding padding)
returns: tensor with encoded sequences
"""
x = self.embedder(inputs)
# bidirectional layer
x = self.dropout(x)
x = pack_padded_sequence(x, lengths.cpu().numpy(),
batch_first=self.batch_first)
x, _ = self.rnn_layers[0](x)
x, _ = pad_packed_sequence(x, batch_first=self.batch_first)
# 1st unidirectional layer
x = self.dropout(x)
x, _ = self.rnn_layers[1](x)
# the rest of unidirectional layers,
# with residual connections starting from 3rd layer
for i in range(2, len(self.rnn_layers)):
residual = x
x = self.dropout(x)
x, _ = self.rnn_layers[i](x)
x = x + residual
return x
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/models/encoder.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
from torch.nn.functional import log_softmax
class Seq2Seq(nn.Module):
"""
Generic Seq2Seq module, with an encoder and a decoder.
"""
def __init__(self, encoder=None, decoder=None, batch_first=False):
"""
Constructor for the Seq2Seq module.
:param encoder: encoder module
:param decoder: decoder module
:param batch_first: if True the model uses (batch, seq, feature)
tensors, if false the model uses (seq, batch, feature) tensors
"""
super(Seq2Seq, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.batch_first = batch_first
def encode(self, inputs, lengths):
"""
Applies the encoder to inputs with a given input sequence lengths.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param lengths: vector with sequence lengths (excluding padding)
"""
return self.encoder(inputs, lengths)
def decode(self, inputs, context, inference=False):
"""
Applies the decoder to inputs, given the context from the encoder.
:param inputs: tensor with inputs (batch, seq_len) if 'batch_first'
else (seq_len, batch)
:param context: context from the encoder
:param inference: if True inference mode, if False training mode
"""
return self.decoder(inputs, context, inference)
def generate(self, inputs, context, beam_size):
"""
Autoregressive generator, works with SequenceGenerator class.
Executes decoder (in inference mode), applies log_softmax and topK for
inference with beam search decoding.
:param inputs: tensor with inputs to the decoder
:param context: context from the encoder
:param beam_size: beam size for the generator
returns: (words, logprobs, scores, new_context)
words: indices of topK tokens
logprobs: log probabilities of topK tokens
scores: scores from the attention module (for coverage penalty)
new_context: new decoder context, includes new hidden states for
decoder RNN cells
"""
logits, scores, new_context = self.decode(inputs, context, True)
logprobs = log_softmax(logits, dim=-1)
logprobs, words = logprobs.topk(beam_size, dim=-1)
return words, logprobs, scores, new_context
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/models/seq2seq_base.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import math
import torch
def perhaps_convert_float(param, total):
if isinstance(param, float):
param = int(param * total)
return param
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
"""
Learning rate scheduler with exponential warmup and step decay.
"""
def __init__(self, optimizer, iterations, warmup_steps=0,
remain_steps=1.0, decay_interval=None, decay_steps=4,
decay_factor=0.5, last_epoch=-1):
"""
Constructor of WarmupMultiStepLR.
Parameters: warmup_steps, remain_steps and decay_interval accept both
integers and floats as an input. Integer input is interpreted as
absolute index of iteration, float input is interpreted as a fraction
of total training iterations (epochs * steps_per_epoch).
If decay_interval is None then the decay will happen at regulary spaced
intervals ('decay_steps' decays between iteration indices
'remain_steps' and 'iterations').
:param optimizer: instance of optimizer
:param iterations: total number of training iterations
:param warmup_steps: number of warmup iterations
:param remain_steps: start decay at 'remain_steps' iteration
:param decay_interval: interval between LR decay steps
:param decay_steps: max number of decay steps
:param decay_factor: decay factor
:param last_epoch: the index of last iteration
"""
# iterations before learning rate reaches base LR
self.warmup_steps = perhaps_convert_float(warmup_steps, iterations)
logging.info(f'Scheduler warmup steps: {self.warmup_steps}')
# iteration at which decay starts
self.remain_steps = perhaps_convert_float(remain_steps, iterations)
logging.info(f'Scheduler remain steps: {self.remain_steps}')
# number of steps between each decay
if decay_interval is None:
# decay at regulary spaced intervals
decay_iterations = iterations - self.remain_steps
self.decay_interval = decay_iterations // (decay_steps)
self.decay_interval = max(self.decay_interval, 1)
else:
self.decay_interval = perhaps_convert_float(decay_interval,
iterations)
logging.info(f'Scheduler decay interval: {self.decay_interval}')
# multiplicative decay factor
self.decay_factor = decay_factor
logging.info(f'Scheduler decay factor: {self.decay_factor}')
# max number of decay steps
self.decay_steps = decay_steps
logging.info(f'Scheduler max decay steps: {self.decay_steps}')
if self.warmup_steps > self.remain_steps:
logging.warn(f'warmup_steps should not be larger than '
f'remain_steps, setting warmup_steps=remain_steps')
self.warmup_steps = self.remain_steps
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
if self.last_epoch <= self.warmup_steps:
# exponential lr warmup
if self.warmup_steps != 0:
warmup_factor = math.exp(math.log(0.01) / self.warmup_steps)
else:
warmup_factor = 1.0
inv_decay = warmup_factor ** (self.warmup_steps - self.last_epoch)
lr = [base_lr * inv_decay for base_lr in self.base_lrs]
elif self.last_epoch >= self.remain_steps:
# step decay
decay_iter = self.last_epoch - self.remain_steps
num_decay_steps = decay_iter // self.decay_interval + 1
num_decay_steps = min(num_decay_steps, self.decay_steps)
lr = [
base_lr * (self.decay_factor ** num_decay_steps)
for base_lr in self.base_lrs
]
else:
# base lr
lr = [base_lr for base_lr in self.base_lrs]
return lr
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/train/lr_scheduler.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
import torch.nn as nn
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, padding_idx, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param padding_idx: index of the PAD token
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1,
dtype=torch.float32)
non_pad_mask = (target != self.padding_idx)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)[non_pad_mask]
smooth_loss = -logprobs.mean(dim=-1)[non_pad_mask]
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.sum()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/train/smoothing.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pytablewriter import MarkdownTableWriter
class TrainingTable:
def __init__(self, acc_unit='BLEU', time_unit='min', perf_unit='tok/s'):
self.data = []
self.acc_unit = acc_unit
self.time_unit = time_unit
self.perf_unit = perf_unit
self.time_unit_convert = {'s': 1, 'min': 1/60, 'h': 1/3600}
def add(self, gpus, batch_size, accuracy, perf, time_to_train):
time_to_train *= self.time_unit_convert[self.time_unit]
if not accuracy:
accuracy = 0.0
accuracy = round(accuracy, 2)
self.data.append([gpus, batch_size, accuracy, perf, time_to_train])
def write(self, title, math):
writer = MarkdownTableWriter()
writer.table_name = f'{title}'
header = [f'**GPUs**',
f'**Batch Size / GPU**',
f'**Accuracy - {math.upper()} ({self.acc_unit})**',
f'**Throughput - {math.upper()} ({self.perf_unit})**',
f'**Time to Train - {math.upper()} ({self.time_unit})**',
]
writer.headers = header
writer.value_matrix = self.data
writer.write_table()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/train/table.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import os
import time
from itertools import cycle
import numpy as np
import torch
import torch.optim
import torch.utils.data
from apex.parallel import DistributedDataParallel
from apex import amp
from seq2seq.train.fp_optimizers import FP16Optimizer
from seq2seq.train.fp_optimizers import FP32Optimizer
from seq2seq.train.fp_optimizers import AMPOptimizer
from seq2seq.train.lr_scheduler import WarmupMultiStepLR
from seq2seq.utils import AverageMeter
from seq2seq.utils import sync_workers
class Seq2SeqTrainer:
"""
Seq2SeqTrainer
"""
def __init__(self,
model,
criterion,
opt_config,
scheduler_config,
print_freq=10,
save_freq=1000,
grad_clip=float('inf'),
save_info={},
save_dir='.',
train_iterations=0,
checkpoint_filename='checkpoint%s.pth',
keep_checkpoints=5,
math='fp32',
loss_scaling={},
intra_epoch_eval=0,
prealloc_mode='always',
warmup=0,
iter_size=1,
translator=None,
verbose=False):
"""
Constructor for the Seq2SeqTrainer.
:param model: model to train
:param criterion: criterion (loss function)
:param opt_config: dictionary with options for the optimizer
:param scheduler_config: dictionary with options for the learning rate
scheduler
:param print_freq: prints short summary every 'print_freq' iterations
:param save_freq: saves checkpoint every 'save_freq' iterations
:param grad_clip: coefficient for gradient clipping
:param save_info: dict with additional state stored in each checkpoint
:param save_dir: path to the directiory for checkpoints
:param train_iterations: total number of training iterations to execute
:param checkpoint_filename: name of files with checkpoints
:param keep_checkpoints: max number of checkpoints to keep
:param math: arithmetic type
:param loss_scaling: options for dynamic loss scaling
:param intra_epoch_eval: number of additional eval runs within each
training epoch
:param prealloc_mode: controls preallocation,
choices=['off', 'once', 'always']
:param warmup: number of warmup iterations for performance counters
:param iter_size: number of iterations between weight updates
:param translator: instance of Translator, runs inference on test set
:param verbose: enables verbose logging
"""
super(Seq2SeqTrainer, self).__init__()
self.model = model
self.criterion = criterion
self.epoch = 0
self.save_info = save_info
self.save_dir = save_dir
self.save_freq = save_freq
self.save_counter = 0
self.checkpoint_filename = checkpoint_filename
self.checkpoint_counter = cycle(range(keep_checkpoints))
self.opt_config = opt_config
self.device = next(model.parameters()).device
self.print_freq = print_freq
self.verbose = verbose
self.loss = None
self.translator = translator
self.intra_epoch_eval = intra_epoch_eval
self.warmup = warmup
self.iter_size = iter_size
self.prealloc_mode = prealloc_mode
self.preallocated = False
self.distributed = torch.distributed.is_initialized()
self.batch_first = model.batch_first
params = self.model.parameters()
if math == 'manual_fp16':
self.fp_optimizer = FP16Optimizer(
self.model, grad_clip,
loss_scale=loss_scaling['init_scale'],
dls_upscale_interval=loss_scaling['upscale_interval']
)
params = self.fp_optimizer.fp32_params
elif math == 'fp32' or math == 'tf32':
self.fp_optimizer = FP32Optimizer(self.model, grad_clip)
opt_name = opt_config.pop('optimizer')
self.optimizer = torch.optim.__dict__[opt_name](params, **opt_config)
logging.info(f'Using optimizer: {self.optimizer}')
self.scheduler = WarmupMultiStepLR(self.optimizer, train_iterations,
**scheduler_config)
if math == 'fp16':
self.model, self.optimizer = amp.initialize(
self.model,
self.optimizer,
cast_model_outputs=torch.float16,
keep_batchnorm_fp32=False,
opt_level='O2')
self.fp_optimizer = AMPOptimizer(
self.model,
grad_clip,
loss_scale=loss_scaling['init_scale'],
dls_upscale_interval=loss_scaling['upscale_interval']
)
if self.distributed:
self.model = DistributedDataParallel(self.model)
def iterate(self, src, tgt, update=True, training=True):
"""
Performs one iteration of the training/validation.
:param src: batch of examples from the source language
:param tgt: batch of examples from the target language
:param update: if True: optimizer does update of the weights
:param training: if True: executes optimizer
"""
src, src_length = src
tgt, tgt_length = tgt
src = src.to(self.device)
tgt = tgt.to(self.device)
src_length = src_length.to(self.device)
num_toks = {}
num_toks['tgt'] = int(sum(tgt_length - 1))
num_toks['src'] = int(sum(src_length))
if self.batch_first:
output = self.model(src, src_length, tgt[:, :-1])
tgt_labels = tgt[:, 1:]
T, B = output.size(1), output.size(0)
else:
output = self.model(src, src_length, tgt[:-1])
tgt_labels = tgt[1:]
T, B = output.size(0), output.size(1)
loss = self.criterion(output.view(T * B, -1),
tgt_labels.contiguous().view(-1))
loss_per_batch = loss.item()
loss /= (B * self.iter_size)
if training:
self.fp_optimizer.step(loss, self.optimizer, self.scheduler,
update)
loss_per_token = loss_per_batch / num_toks['tgt']
loss_per_sentence = loss_per_batch / B
return loss_per_token, loss_per_sentence, num_toks
def feed_data(self, data_loader, training=True):
"""
Runs training or validation on batches from data_loader.
:param data_loader: data loader
:param training: if True runs training else runs validation
"""
if training:
assert self.optimizer is not None
eval_fractions = np.linspace(0, 1, self.intra_epoch_eval+2)[1:-1]
iters_with_update = len(data_loader) // self.iter_size
eval_iters = (eval_fractions * iters_with_update).astype(int)
eval_iters = eval_iters * self.iter_size
eval_iters = set(eval_iters)
batch_time = AverageMeter(self.warmup)
data_time = AverageMeter(self.warmup)
losses_per_token = AverageMeter()
losses_per_sentence = AverageMeter()
tot_tok_time = AverageMeter(self.warmup)
src_tok_time = AverageMeter(self.warmup)
tgt_tok_time = AverageMeter(self.warmup)
batch_size = data_loader.batch_size
if self.device.type == 'cuda':
torch.cuda.synchronize()
end = time.time()
for i, (src, tgt) in enumerate(data_loader):
self.save_counter += 1
# measure data loading time
data_time.update(time.time() - end)
update = False
if i % self.iter_size == self.iter_size - 1:
update = True
# do a train/evaluate iteration
stats = self.iterate(src, tgt, update, training=training)
loss_per_token, loss_per_sentence, num_toks = stats
# measure accuracy and record loss
losses_per_token.update(loss_per_token, num_toks['tgt'])
losses_per_sentence.update(loss_per_sentence, batch_size)
# measure elapsed time
if self.device.type == 'cuda':
torch.cuda.synchronize()
elapsed = time.time() - end
batch_time.update(elapsed)
src_tok_time.update(num_toks['src'] / elapsed, elapsed)
tgt_tok_time.update(num_toks['tgt'] / elapsed, elapsed)
tot_num_toks = num_toks['tgt'] + num_toks['src']
tot_tok_time.update(tot_num_toks / elapsed, elapsed)
self.loss = losses_per_token.avg
if training and i in eval_iters:
eval_fname = f'eval_epoch_{self.epoch}_iter_{i}'
eval_path = os.path.join(self.save_dir, eval_fname)
_, eval_stats = self.translator.run(
calc_bleu=True,
epoch=self.epoch,
iteration=i,
eval_path=eval_path,
)
test_bleu = eval_stats['bleu']
log = []
log += [f'TRAIN [{self.epoch}][{i}/{len(data_loader)}]']
log += [f'BLEU: {test_bleu:.2f}']
log = '\t'.join(log)
logging.info(log)
self.model.train()
self.preallocate(data_loader.batch_size,
data_loader.dataset.max_len, training=True)
if i % self.print_freq == 0:
phase = 'TRAIN' if training else 'VALIDATION'
log = []
log += [f'{phase} [{self.epoch}][{i}/{len(data_loader)}]']
log += [f'Time {batch_time.val:.3f} ({batch_time.avg:.3f})']
log += [f'Data {data_time.val:.2e} ({data_time.avg:.2e})']
log += [f'Tok/s {tot_tok_time.val:.0f} ({tot_tok_time.avg:.0f})']
if self.verbose:
log += [f'Src tok/s {src_tok_time.val:.0f} ({src_tok_time.avg:.0f})']
log += [f'Tgt tok/s {tgt_tok_time.val:.0f} ({tgt_tok_time.avg:.0f})']
log += [f'Loss/sentence {losses_per_sentence.val:.1f} ({losses_per_sentence.avg:.1f})']
log += [f'Loss/tok {losses_per_token.val:.4f} ({losses_per_token.avg:.4f})']
if training:
lr = self.optimizer.param_groups[0]['lr']
log += [f'LR {lr:.3e}']
log = '\t'.join(log)
logging.info(log)
save_chkpt = (self.save_counter % self.save_freq) == (self.save_freq - 1)
if training and save_chkpt:
self.save_counter = 0
self.save_info['iteration'] = i
identifier = next(self.checkpoint_counter, -1)
if identifier != -1:
with sync_workers() as rank:
if rank == 0:
self.save(identifier=identifier)
if self.device.type == 'cuda':
torch.cuda.synchronize()
end = time.time()
tot_tok_time.reduce('sum')
losses_per_token.reduce('mean')
return losses_per_token.avg, tot_tok_time.avg
def preallocate(self, batch_size, max_length, training):
"""
Generates maximum sequence length batch and runs forward and backward
pass without updating model parameters.
:param batch_size: batch size for preallocation
:param max_length: max sequence length for preallocation
:param training: if True preallocates memory for backward pass
"""
if self.prealloc_mode == 'always' or (self.prealloc_mode == 'once' and
not self.preallocated):
logging.info('Executing preallocation')
torch.cuda.empty_cache()
src_length = torch.full((batch_size,), max_length,
dtype=torch.int64)
tgt_length = torch.full((batch_size,), max_length,
dtype=torch.int64)
if self.batch_first:
shape = (batch_size, max_length)
else:
shape = (max_length, batch_size)
src = torch.full(shape, 4, dtype=torch.int64)
tgt = torch.full(shape, 4, dtype=torch.int64)
src = src, src_length
tgt = tgt, tgt_length
self.iterate(src, tgt, update=False, training=training)
self.model.zero_grad()
self.preallocated = True
def optimize(self, data_loader):
"""
Sets model in training mode, preallocates memory and runs training on
data provided by data_loader.
:param data_loader: data loader
"""
torch.set_grad_enabled(True)
self.model.train()
self.preallocate(data_loader.batch_size, data_loader.dataset.max_len,
training=True)
output = self.feed_data(data_loader, training=True)
self.model.zero_grad()
return output
def evaluate(self, data_loader):
"""
Sets model in eval mode, disables gradients, preallocates memory and
runs validation on data provided by data_loader.
:param data_loader: data loader
"""
torch.set_grad_enabled(False)
self.model.eval()
self.preallocate(data_loader.batch_size, data_loader.dataset.max_len,
training=False)
output = self.feed_data(data_loader, training=False)
self.model.zero_grad()
return output
def load(self, filename):
"""
Loads checkpoint from filename.
:param filename: path to the checkpoint file
"""
if os.path.isfile(filename):
checkpoint = torch.load(filename, map_location={'cuda:0': 'cpu'})
if self.distributed:
self.model.module.load_state_dict(checkpoint['state_dict'])
else:
self.model.load_state_dict(checkpoint['state_dict'])
self.fp_optimizer.initialize_model(self.model)
self.optimizer.load_state_dict(checkpoint['optimizer'])
self.scheduler.load_state_dict(checkpoint['scheduler'])
self.epoch = checkpoint['epoch']
self.loss = checkpoint['loss']
logging.info(f'Loaded checkpoint {filename} (epoch {self.epoch})')
else:
logging.error(f'Invalid checkpoint: {filename}')
def save(self, identifier=None, is_best=False, save_all=False):
"""
Stores checkpoint to a file.
:param identifier: identifier for periodic checkpoint
:param is_best: if True stores checkpoint to 'model_best.pth'
:param save_all: if True stores checkpoint after completed training
epoch
"""
def write_checkpoint(state, filename):
filename = os.path.join(self.save_dir, filename)
logging.info(f'Saving model to {filename}')
torch.save(state, filename)
if self.distributed:
model_state = self.model.module.state_dict()
else:
model_state = self.model.state_dict()
state = {
'epoch': self.epoch,
'state_dict': model_state,
'optimizer': self.optimizer.state_dict(),
'scheduler': self.scheduler.state_dict(),
'loss': getattr(self, 'loss', None),
}
state = dict(list(state.items()) + list(self.save_info.items()))
if identifier is not None:
filename = self.checkpoint_filename % identifier
write_checkpoint(state, filename)
if is_best:
filename = 'model_best.pth'
write_checkpoint(state, filename)
if save_all:
filename = f'checkpoint_epoch_{self.epoch:03d}.pth'
write_checkpoint(state, filename)
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/train/trainer.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import math
import torch
from torch.nn.utils import clip_grad_norm_
import apex.amp._amp_state
from apex import amp
class FP16Optimizer:
"""
Mixed precision optimizer with dynamic loss scaling and backoff.
https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor
"""
@staticmethod
def set_grads(params, params_with_grad):
"""
Copies gradients from param_with_grad to params
:param params: dst parameters
:param params_with_grad: src parameters
"""
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(torch.empty_like(param))
param.grad.data.copy_(param_w_grad.grad.data)
@staticmethod
def set_weights(params, new_params):
"""
Copies parameters from new_params to params
:param params: dst parameters
:param new_params: src parameters
"""
for param, new_param in zip(params, new_params):
param.data.copy_(new_param.data)
def __init__(self, model, grad_clip=float('inf'), loss_scale=8192,
dls_downscale=2, dls_upscale=2, dls_upscale_interval=128):
"""
Constructor for the Fp16Optimizer.
:param model: model
:param grad_clip: coefficient for gradient clipping, max L2 norm of the
gradients
:param loss_scale: initial loss scale
:param dls_downscale: loss downscale factor, loss scale is divided by
this factor when NaN/INF occurs in the gradients
:param dls_upscale: loss upscale factor, loss scale is multiplied by
this factor if previous dls_upscale_interval batches finished
successfully
:param dls_upscale_interval: interval for loss scale upscaling
"""
logging.info('Initializing fp16 optimizer')
self.initialize_model(model)
self.since_last_invalid = 0
self.loss_scale = loss_scale
self.dls_downscale = dls_downscale
self.dls_upscale = dls_upscale
self.dls_upscale_interval = dls_upscale_interval
self.grad_clip = grad_clip
def initialize_model(self, model):
"""
Initializes internal state and build fp32 master copy of weights.
:param model: fp16 model
"""
logging.info('Converting model to half precision')
model.half()
logging.info('Initializing fp32 clone weights')
self.model = model
self.model.zero_grad()
self.fp32_params = [param.to(torch.float32).detach()
for param in model.parameters()]
for param in self.fp32_params:
param.requires_grad = True
def step(self, loss, optimizer, scheduler, update=True):
"""
Performs one step of the optimizer.
Applies loss scaling, computes gradients in fp16, converts gradients to
fp32, inverts scaling and applies optional gradient norm clipping.
If gradients are finite, it applies update to fp32 master weights and
copies updated parameters to fp16 model for the next iteration. If
gradients are not finite, it skips the batch and adjusts scaling factor
for the next iteration.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
"""
loss *= self.loss_scale
loss.backward()
if update:
self.set_grads(self.fp32_params, self.model.parameters())
if self.loss_scale != 1.0:
for param in self.fp32_params:
param.grad.data /= self.loss_scale
norm = clip_grad_norm_(self.fp32_params, self.grad_clip)
if math.isfinite(norm):
scheduler.step()
optimizer.step()
self.set_weights(self.model.parameters(),
self.fp32_params)
self.since_last_invalid += 1
else:
self.loss_scale /= self.dls_downscale
self.since_last_invalid = 0
logging.info(f'Gradient norm: {norm}')
logging.info(f'Skipped batch, new scale: {self.loss_scale}')
if self.since_last_invalid >= self.dls_upscale_interval:
self.loss_scale *= self.dls_upscale
self.loss_scale = min(self.loss_scale, 8192.0)
logging.info(f'Upscaling, new scale: {self.loss_scale}')
self.since_last_invalid = 0
self.model.zero_grad()
class FP32Optimizer:
"""
Standard optimizer, computes backward and applies weight update.
"""
def __init__(self, model, grad_clip=None):
"""
Constructor for the Fp32Optimizer
:param model: model
:param grad_clip: coefficient for gradient clipping, max L2 norm of the
gradients
"""
logging.info('Initializing fp32 optimizer')
self.initialize_model(model)
self.grad_clip = grad_clip
def initialize_model(self, model):
"""
Initializes state of the model.
:param model: model
"""
self.model = model
self.model.zero_grad()
def step(self, loss, optimizer, scheduler, update=True):
"""
Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
"""
loss.backward()
if update:
if self.grad_clip != float('inf'):
clip_grad_norm_(self.model.parameters(), self.grad_clip)
scheduler.step()
optimizer.step()
self.model.zero_grad()
class AMPOptimizer:
"""
Optimizer compatible with AMP.
Uses AMP to apply loss scaling, computes backward and applies weight
update.
"""
def __init__(self, model, grad_clip=None, loss_scale=8192,
dls_upscale_interval=128):
"""
Constructor for the AMPOptimizer
:param model: model
:param grad_clip: coefficient for gradient clipping, max L2 norm of the
gradients
"""
logging.info('Initializing amp optimizer')
self.initialize_model(model)
self.grad_clip = grad_clip
loss_scaler = apex.amp._amp_state.loss_scalers[0]
loss_scaler._loss_scale = loss_scale
loss_scaler._scale_seq_len = dls_upscale_interval
def initialize_model(self, model):
"""
Initializes state of the model.
:param model: model
"""
self.model = model
self.model.zero_grad()
def step(self, loss, optimizer, scheduler, update=True):
"""
Performs one step of the optimizer.
:param loss: value of loss function
:param optimizer: optimizer
:param update: if True executes weight update
"""
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if update:
if self.grad_clip != float('inf'):
clip_grad_norm_(amp.master_params(optimizer), self.grad_clip)
scheduler.step()
optimizer.step()
self.model.zero_grad()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/train/fp_optimizers.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch
from seq2seq.data.config import BOS
from seq2seq.data.config import EOS
class SequenceGenerator:
"""
Generator for the autoregressive inference with beam search decoding.
"""
def __init__(self, model, beam_size=5, max_seq_len=100,
len_norm_factor=0.6, len_norm_const=5,
cov_penalty_factor=0.1):
"""
Constructor for the SequenceGenerator.
Beam search decoding supports coverage penalty and length
normalization. For details, refer to Section 7 of the GNMT paper
(https://arxiv.org/pdf/1609.08144.pdf).
:param model: model which implements generate method
:param beam_size: decoder beam size
:param max_seq_len: maximum decoder sequence length
:param len_norm_factor: length normalization factor
:param len_norm_const: length normalization constant
:param cov_penalty_factor: coverage penalty factor
"""
self.model = model
self.beam_size = beam_size
self.max_seq_len = max_seq_len
self.len_norm_factor = len_norm_factor
self.len_norm_const = len_norm_const
self.cov_penalty_factor = cov_penalty_factor
self.batch_first = self.model.batch_first
def greedy_search(self, batch_size, initial_input, initial_context=None):
"""
Greedy decoder.
:param batch_size: decoder batch size
:param initial_input: initial input, usually tensor of BOS tokens
:param initial_context: initial context, usually [encoder_context,
src_seq_lengths, None]
returns: (translation, lengths, counter)
translation: (batch_size, max_seq_len) - indices of target tokens
lengths: (batch_size) - lengths of generated translations
counter: number of iterations of the decoding loop
"""
device = initial_input.device
max_seq_len = self.max_seq_len
translation = torch.zeros(batch_size, max_seq_len, dtype=torch.int64,
device=device)
lengths = torch.ones(batch_size, dtype=torch.int64,
device=device)
active = torch.arange(0, batch_size, dtype=torch.int64,
device=device)
base_mask = torch.arange(0, batch_size, dtype=torch.int64,
device=device)
translation[:, 0] = BOS
words, context = initial_input, initial_context
if self.batch_first:
word_view = (-1, 1)
ctx_batch_dim = 0
else:
word_view = (1, -1)
ctx_batch_dim = 1
counter = 0
for idx in range(1, max_seq_len):
if not len(active):
break
counter += 1
words = words.view(word_view)
output = self.model.generate(words, context, 1)
words, logprobs, attn, context = output
words = words.view(-1)
translation[active, idx] = words
lengths[active] += 1
terminating = (words == EOS)
if terminating.any():
not_terminating = ~terminating
mask = base_mask[:len(active)]
mask = mask.masked_select(not_terminating)
active = active.masked_select(not_terminating)
words = words[mask]
context[0] = context[0].index_select(ctx_batch_dim, mask)
context[1] = context[1].index_select(0, mask)
context[2] = context[2].index_select(1, mask)
return translation, lengths, counter
def beam_search(self, batch_size, initial_input, initial_context=None):
"""
Beam search decoder.
:param batch_size: decoder batch size
:param initial_input: initial input, usually tensor of BOS tokens
:param initial_context: initial context, usually [encoder_context,
src_seq_lengths, None]
returns: (translation, lengths, counter)
translation: (batch_size, max_seq_len) - indices of target tokens
lengths: (batch_size) - lengths of generated translations
counter: number of iterations of the decoding loop
"""
device = initial_input.device
beam_size = self.beam_size
norm_const = self.len_norm_const
norm_factor = self.len_norm_factor
max_seq_len = self.max_seq_len
cov_penalty_factor = self.cov_penalty_factor
translation = torch.zeros(batch_size * beam_size, max_seq_len,
dtype=torch.int64, device=device)
lengths = torch.ones(batch_size * beam_size,
dtype=torch.int64, device=device)
scores = torch.zeros(batch_size * beam_size,
dtype=torch.float32, device=device)
active = torch.arange(0, batch_size * beam_size,
dtype=torch.int64, device=device)
base_mask = torch.arange(0, batch_size * beam_size,
dtype=torch.int64, device=device)
global_offset = torch.arange(0, batch_size * beam_size, beam_size,
device=device, dtype=torch.int64)
eos_beam_fill = torch.tensor([0] + (beam_size - 1) * [float('-inf')],
dtype=torch.float32, device=device)
translation[:, 0] = BOS
words, context = initial_input, initial_context
if self.batch_first:
word_view = (-1, 1)
ctx_batch_dim = 0
attn_query_dim = 1
else:
word_view = (1, -1)
ctx_batch_dim = 1
attn_query_dim = 0
# replicate context
if self.batch_first:
# context[0] (encoder state): (batch, seq, feature)
_, seq, feature = context[0].shape
context[0].unsqueeze_(1)
context[0] = context[0].expand(-1, beam_size, -1, -1)
context[0] = context[0].contiguous().view(batch_size * beam_size,
seq, feature)
# context[0]: (batch * beam, seq, feature)
else:
# context[0] (encoder state): (seq, batch, feature)
seq, _, feature = context[0].shape
context[0].unsqueeze_(2)
context[0] = context[0].expand(-1, -1, beam_size, -1)
context[0] = context[0].contiguous().view(seq, batch_size *
beam_size, feature)
# context[0]: (seq, batch * beam, feature)
# context[1] (encoder seq length): (batch)
context[1].unsqueeze_(1)
context[1] = context[1].expand(-1, beam_size)
context[1] = context[1].contiguous().view(batch_size * beam_size)
# context[1]: (batch * beam)
accu_attn_scores = torch.zeros(batch_size * beam_size, seq,
dtype=torch.float32, device=device)
counter = 0
for idx in range(1, self.max_seq_len):
if not len(active):
break
counter += 1
eos_mask = (words == EOS)
eos_mask = eos_mask.view(-1, beam_size)
terminating, _ = eos_mask.min(dim=1)
lengths[active[~eos_mask.view(-1)]] += 1
output = self.model.generate(words, context, beam_size)
words, logprobs, attn, context = output
attn = attn.float().squeeze(attn_query_dim)
attn = attn.masked_fill(eos_mask.view(-1).unsqueeze(1), 0)
accu_attn_scores[active] += attn
# words: (batch, beam, k)
words = words.view(-1, beam_size, beam_size)
words = words.masked_fill(eos_mask.unsqueeze(2), EOS)
# logprobs: (batch, beam, k)
logprobs = logprobs.float().view(-1, beam_size, beam_size)
if eos_mask.any():
logprobs[eos_mask] = eos_beam_fill
active_scores = scores[active].view(-1, beam_size)
# new_scores: (batch, beam, k)
new_scores = active_scores.unsqueeze(2) + logprobs
if idx == 1:
new_scores[:, 1:, :].fill_(float('-inf'))
new_scores = new_scores.view(-1, beam_size * beam_size)
# index: (batch, beam)
_, index = new_scores.topk(beam_size, dim=1)
source_beam = index // beam_size
new_scores = new_scores.view(-1, beam_size * beam_size)
best_scores = torch.gather(new_scores, 1, index)
scores[active] = best_scores.view(-1)
words = words.view(-1, beam_size * beam_size)
words = torch.gather(words, 1, index)
# words: (1, batch * beam)
words = words.view(word_view)
offset = global_offset[:source_beam.shape[0]]
source_beam += offset.unsqueeze(1)
translation[active, :] = translation[active[source_beam.view(-1)], :]
translation[active, idx] = words.view(-1)
lengths[active] = lengths[active[source_beam.view(-1)]]
context[2] = context[2].index_select(1, source_beam.view(-1))
if terminating.any():
not_terminating = ~terminating
not_terminating = not_terminating.unsqueeze(1)
not_terminating = not_terminating.expand(-1, beam_size).contiguous()
normalization_mask = active.view(-1, beam_size)[terminating]
# length normalization
norm = lengths[normalization_mask].float()
norm = (norm_const + norm) / (norm_const + 1.0)
norm = norm ** norm_factor
scores[normalization_mask] /= norm
# coverage penalty
penalty = accu_attn_scores[normalization_mask]
penalty = penalty.clamp(0, 1)
penalty = penalty.log()
penalty[penalty == float('-inf')] = 0
penalty = penalty.sum(dim=-1)
scores[normalization_mask] += cov_penalty_factor * penalty
mask = base_mask[:len(active)]
mask = mask.masked_select(not_terminating.view(-1))
words = words.index_select(ctx_batch_dim, mask)
context[0] = context[0].index_select(ctx_batch_dim, mask)
context[1] = context[1].index_select(0, mask)
context[2] = context[2].index_select(1, mask)
active = active.masked_select(not_terminating.view(-1))
scores = scores.view(batch_size, beam_size)
_, idx = scores.max(dim=1)
translation = translation[idx + global_offset, :]
lengths = lengths[idx + global_offset]
return translation, lengths, counter
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/inference/beam_search.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import collections
import itertools
import numpy as np
from pytablewriter import MarkdownTableWriter
def interleave(*args):
return list(itertools.chain(*zip(*args)))
class AccuracyTable:
def __init__(self, unit):
self.data = collections.defaultdict(dict)
self.unit = unit
def add(self, key, data):
self.data[key].update(data)
def write(self, title, write_math):
writer = MarkdownTableWriter()
writer.table_name = f'{title}'
main_header = ['**Batch Size**', '**Beam Size**']
data_header = []
if 'fp32' in write_math:
data_header += [f'**Accuracy - FP32 ({self.unit})**']
if 'tf32' in write_math:
data_header += [f'**Accuracy - TF32 ({self.unit})**']
if 'fp16' in write_math:
data_header += [f'**Accuracy - FP16 ({self.unit})**']
writer.headers = main_header + data_header
writer.value_matrix = []
for k, v in self.data.items():
batch_size, beam_size = k
row = [batch_size, beam_size]
if 'fp32' in write_math:
row.append(v['fp32'])
if 'tf32' in write_math:
row.append(v['tf32'])
if 'fp16' in write_math:
row.append(v['fp16'])
writer.value_matrix.append(row)
writer.write_table()
class PerformanceTable:
def __init__(self, percentiles, unit, reverse_percentiles=False):
self.percentiles = percentiles
self.data = collections.defaultdict(dict)
self.unit = unit
self.reverse_percentiles = reverse_percentiles
def add(self, key, value):
math, value = next(iter(value.items()))
value = np.array(value)
if self.reverse_percentiles:
percentiles = [100 - p for p in self.percentiles]
else:
percentiles = self.percentiles
stats = []
for p in percentiles:
val = np.percentile(value, p)
stats.append(val * self.unit_convert[self.unit])
avg = value.mean() * self.unit_convert[self.unit]
self.data[key].update({math: (avg, stats)})
def write(self, title, math, relative=None, reverse_speedup=False):
writer = MarkdownTableWriter()
writer.table_name = f'{title} - {math.upper()}'
main_header = ['**Batch Size**', '**Beam Size**']
data_header = [f'**Avg ({self.unit})**']
data_header += [f'**{p}% ({self.unit})**' for p in self.percentiles]
if relative:
speedup_header = ['**Speedup**'] * len(data_header)
data_header = interleave(data_header, speedup_header)
writer.headers = main_header + data_header
writer.value_matrix = []
for k, v in self.data.items():
batch_size, beam_size = k
avg, res_percentiles = v[math]
main = [batch_size, beam_size]
data = [avg, *res_percentiles]
if relative:
rel = self.data[k][relative]
rel_avg, rel_res_percentiles = rel
rel = [rel_avg, *rel_res_percentiles]
speedup = [d / r for (r, d) in zip(rel, data)]
if reverse_speedup:
speedup = [1 / s for s in speedup]
data = interleave(data, speedup)
writer.value_matrix.append(main + data)
writer.write_table()
class LatencyTable(PerformanceTable):
def __init__(self, percentiles, unit='ms'):
super().__init__(percentiles, unit)
self.unit_convert = {'s': 1, 'ms': 1e3, 'us': 1e6}
class ThroughputTable(PerformanceTable):
def __init__(self, percentiles, unit='tok/s', reverse_percentiles=True):
super().__init__(percentiles, unit, reverse_percentiles)
self.unit_convert = {'tok/s': 1}
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/inference/tables.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import subprocess
import time
import torch
import torch.distributed as dist
import seq2seq.data.config as config
import seq2seq.utils as utils
from seq2seq.inference.beam_search import SequenceGenerator
def gather_predictions(preds):
world_size = utils.get_world_size()
if world_size > 1:
all_preds = [preds.new(preds.size(0), preds.size(1)) for i in range(world_size)]
dist.all_gather(all_preds, preds)
preds = torch.cat(all_preds)
return preds
def run_sacrebleu(test_path, reference_path):
"""
Executes sacrebleu and returns BLEU score.
:param test_path: path to the test file
:param reference_path: path to the reference file
"""
sacrebleu_params = '--score-only -lc --tokenize intl'
logging.info(f'Running sacrebleu (parameters: {sacrebleu_params})')
sacrebleu = subprocess.run([f'sacrebleu --input {test_path} \
{reference_path} {sacrebleu_params}'],
stdout=subprocess.PIPE, shell=True)
test_bleu = round(float(sacrebleu.stdout.strip()), 2)
return test_bleu
class Translator:
"""
Translator runs validation on test dataset, executes inference, optionally
computes BLEU score using sacrebleu.
"""
def __init__(self,
model,
tokenizer,
loader=None,
beam_size=5,
len_norm_factor=0.6,
len_norm_const=5.0,
cov_penalty_factor=0.1,
max_seq_len=50,
print_freq=1,
reference=None,
):
self.model = model
self.tokenizer = tokenizer
self.loader = loader
self.insert_target_start = [config.BOS]
self.insert_src_start = [config.BOS]
self.insert_src_end = [config.EOS]
self.batch_first = model.batch_first
self.beam_size = beam_size
self.print_freq = print_freq
self.reference = reference
self.distributed = (utils.get_world_size() > 1)
self.generator = SequenceGenerator(
model=self.model,
beam_size=beam_size,
max_seq_len=max_seq_len,
len_norm_factor=len_norm_factor,
len_norm_const=len_norm_const,
cov_penalty_factor=cov_penalty_factor)
def run(self, calc_bleu=True, epoch=None, iteration=None, eval_path=None,
summary=False, warmup=0, reference_path=None):
"""
Runs translation on test dataset.
:param calc_bleu: if True compares results with reference and computes
BLEU score
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param eval_path: path to the file for saving results
:param summary: if True prints summary
:param reference_path: path to the file with reference translation
"""
if reference_path is None:
reference_path = self.reference
device = next(self.model.parameters()).device
test_bleu = torch.tensor([0.], device=device)
rank = utils.get_rank()
logging.info(f'Running evaluation on test set')
self.model.eval()
output, eval_stats = self.evaluate(self.loader, epoch, iteration,
warmup, summary)
output = output[:len(self.loader.dataset)]
output = self.loader.dataset.unsort(output)
if rank == 0 and eval_path:
with open(eval_path, 'w') as eval_file:
lines = [line + '\n' for line in output]
eval_file.writelines(lines)
if calc_bleu:
test_bleu[0] = run_sacrebleu(eval_path, reference_path)
if summary:
logging.info(f'BLEU on test dataset: {test_bleu[0]:.2f}')
utils.barrier()
logging.info(f'Finished evaluation on test set')
if self.distributed:
dist.broadcast(test_bleu, 0)
if calc_bleu:
eval_stats['bleu'] = test_bleu[0].item()
else:
eval_stats['bleu'] = None
return output, eval_stats
def evaluate(self, loader, epoch=0, iteration=0, warmup=0, summary=False):
"""
Runs evaluation on test dataset.
:param epoch: index of the current epoch
:param iteration: index of the current iteration
:param summary: if True prints summary
"""
device = next(self.model.parameters()).device
batch_time = utils.AverageMeter(warmup, keep=True)
tot_tok_per_sec = utils.AverageMeter(warmup, keep=True)
iterations = utils.AverageMeter()
enc_seq_len = utils.AverageMeter()
dec_seq_len = utils.AverageMeter()
stats = {}
batch_size = loader.batch_size
global_batch_size = batch_size * utils.get_world_size()
beam_size = self.beam_size
bos = [self.insert_target_start] * (batch_size * beam_size)
bos = torch.tensor(bos, dtype=torch.int64, device=device)
if self.batch_first:
bos = bos.view(-1, 1)
else:
bos = bos.view(1, -1)
if beam_size == 1:
generator = self.generator.greedy_search
else:
generator = self.generator.beam_search
output = []
for i, (src, indices) in enumerate(loader):
if device.type == 'cuda':
torch.cuda.synchronize()
translate_timer = time.time()
src, src_length = src
stats['total_enc_len'] = int(src_length.sum())
src = src.to(device)
src_length = src_length.to(device)
with torch.no_grad():
context = self.model.encode(src, src_length)
context = [context, src_length, None]
preds, lengths, counter = generator(batch_size, bos, context)
stats['total_dec_len'] = lengths.sum().item()
stats['iters'] = counter
indices = torch.tensor(indices).to(preds)
preds = preds.scatter(0, indices.unsqueeze(1).expand_as(preds), preds)
preds = gather_predictions(preds).cpu()
if self.tokenizer:
for pred in preds:
pred = pred.tolist()
detok = self.tokenizer.detokenize(pred)
output.append(detok)
if device.type == 'cuda':
torch.cuda.synchronize()
elapsed = time.time() - translate_timer
batch_time.update(elapsed, batch_size)
total_tokens = stats['total_dec_len'] + stats['total_enc_len']
ttps = total_tokens / elapsed
tot_tok_per_sec.update(ttps, elapsed)
iterations.update(stats['iters'])
enc_seq_len.update(stats['total_enc_len'] / batch_size, batch_size)
dec_seq_len.update(stats['total_dec_len'] / batch_size, batch_size)
if i % self.print_freq == self.print_freq - 1:
log = []
log += f'TEST '
if epoch is not None:
log += f'[{epoch}]'
if iteration is not None:
log += f'[{iteration}]'
log += f'[{i}/{len(loader)}]\t'
log += f'Time {batch_time.val:.4f} ({batch_time.avg:.4f})\t'
log += f'Decoder iters {iterations.val:.1f} ({iterations.avg:.1f})\t'
log += f'Tok/s {tot_tok_per_sec.val:.0f} ({tot_tok_per_sec.avg:.0f})'
log = ''.join(log)
logging.info(log)
tot_tok_per_sec.reduce('sum')
enc_seq_len.reduce('mean')
dec_seq_len.reduce('mean')
batch_time.reduce('mean')
iterations.reduce('sum')
if summary and utils.get_rank() == 0:
time_per_sentence = (batch_time.avg / global_batch_size)
log = []
log += f'TEST SUMMARY:\n'
log += f'Lines translated: {len(loader.dataset)}\t'
log += f'Avg total tokens/s: {tot_tok_per_sec.avg:.0f}\n'
log += f'Avg time per batch: {batch_time.avg:.3f} s\t'
log += f'Avg time per sentence: {1000*time_per_sentence:.3f} ms\n'
log += f'Avg encoder seq len: {enc_seq_len.avg:.2f}\t'
log += f'Avg decoder seq len: {dec_seq_len.avg:.2f}\t'
log += f'Total decoder iterations: {int(iterations.sum)}'
log = ''.join(log)
logging.info(log)
eval_stats = {}
eval_stats['tokens_per_sec'] = tot_tok_per_sec.avg
eval_stats['runtimes'] = batch_time.vals
eval_stats['throughputs'] = tot_tok_per_sec.vals
return output, eval_stats
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/inference/translator.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
PAD_TOKEN = '<pad>'
UNK_TOKEN = '<unk>'
BOS_TOKEN = '<s>'
EOS_TOKEN = '<\s>'
# special PAD, UNKNOWN, BEGIN-OF-STRING, END-OF-STRING tokens
PAD, UNK, BOS, EOS = [0, 1, 2, 3]
# path to the moses detokenizer, relative to the data directory
DETOKENIZER = 'mosesdecoder/scripts/tokenizer/detokenizer.perl'
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/data/config.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from operator import itemgetter
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
import seq2seq.data.config as config
from seq2seq.data.sampler import BucketingSampler
from seq2seq.data.sampler import DistributedSampler
from seq2seq.data.sampler import ShardingSampler
from seq2seq.data.sampler import StaticDistributedSampler
def build_collate_fn(batch_first=False, parallel=True, sort=False):
"""
Factory for collate_fn functions.
:param batch_first: if True returns batches in (batch, seq) format, if
False returns in (seq, batch) format
:param parallel: if True builds batches from parallel corpus (src, tgt)
:param sort: if True sorts by src sequence length within each batch
"""
def collate_seq(seq):
"""
Builds batches for training or inference.
Batches are returned as pytorch tensors, with padding.
:param seq: list of sequences
"""
lengths = torch.tensor([len(s) for s in seq], dtype=torch.int64)
batch_length = max(lengths)
shape = (len(seq), batch_length)
seq_tensor = torch.full(shape, config.PAD, dtype=torch.int64)
for i, s in enumerate(seq):
end_seq = lengths[i]
seq_tensor[i, :end_seq].copy_(s[:end_seq])
if not batch_first:
seq_tensor = seq_tensor.t()
return (seq_tensor, lengths)
def parallel_collate(seqs):
"""
Builds batches from parallel dataset (src, tgt), optionally sorts batch
by src sequence length.
:param seqs: tuple of (src, tgt) sequences
"""
src_seqs, tgt_seqs = zip(*seqs)
if sort:
indices, src_seqs = zip(*sorted(enumerate(src_seqs),
key=lambda item: len(item[1]),
reverse=True))
tgt_seqs = [tgt_seqs[idx] for idx in indices]
return tuple([collate_seq(s) for s in [src_seqs, tgt_seqs]])
def single_collate(src_seqs):
"""
Builds batches from text dataset, optionally sorts batch by src
sequence length.
:param src_seqs: source sequences
"""
if sort:
indices, src_seqs = zip(*sorted(enumerate(src_seqs),
key=lambda item: len(item[1]),
reverse=True))
else:
indices = range(len(src_seqs))
return collate_seq(src_seqs), tuple(indices)
if parallel:
return parallel_collate
else:
return single_collate
class SyntheticDataset(Dataset):
def __init__(self, vocab_size, seq_len, nsamples):
self.vocab_size = vocab_size
self.nsamples = nsamples
self.seq_len = seq_len
def __getitem__(self, idx):
rand = torch.randint(0, self.vocab_size, size=(self.seq_len,))
return rand
def unsort(self, array):
return array
def get_loader(self, batch_size=1, num_workers=0, batch_first=False,
pad=False, repeat=1):
collate_fn = build_collate_fn(batch_first, parallel=False,
sort=True)
sampler = StaticDistributedSampler(self, batch_size, pad, repeat)
return DataLoader(self,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=False)
def __len__(self):
return self.nsamples
class RawTextDataset(Dataset):
def __init__(self, raw_data=None, raw_datafile=None, tokenizer=None,
sort=False, max_size=None):
self.tokenizer = tokenizer
self.sorted = False
if raw_datafile:
with open(raw_datafile, 'r') as f:
self.raw_data = f.readlines()
else:
self.raw_data = raw_data
if max_size:
self.raw_data = self.raw_data[:max_size]
self.lengths = [len(s.split()) for s in self.raw_data]
if sort:
self.sort_by_length()
def __getitem__(self, idx):
raw = self.raw_data[idx]
tokenized = self.tokenizer.tokenize(raw)
return tokenized
def unsort(self, array):
"""
"Unsorts" given array (restores original order of elements before
dataset was sorted by sequence length).
:param array: array to be "unsorted"
"""
if self.sorted:
inverse = sorted(enumerate(self.indices), key=itemgetter(1))
array = [array[i[0]] for i in inverse]
return array
def sort_by_length(self):
output = sorted(
enumerate(self.raw_data),
key=lambda x: len(x[1].split()),
)
self.indices, self.raw_data = zip(*output)
self.lengths = [self.lengths[idx] for idx in self.indices]
self.sorted = True
def __len__(self):
return len(self.raw_data)
def get_loader(self, batch_size=1, num_workers=0, batch_first=False,
pad=False, repeat=1):
collate_fn = build_collate_fn(batch_first, parallel=False,
sort=True)
sampler = StaticDistributedSampler(self, batch_size, pad, repeat)
return DataLoader(self,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=False)
class TextDataset(Dataset):
def __init__(self, src_fname, tokenizer, min_len=None, max_len=None,
sort=False, max_size=None):
"""
Constructor for the TextDataset. Builds monolingual dataset.
:param src_fname: path to the file with data
:param tokenizer: tokenizer
:param min_len: minimum sequence length
:param max_len: maximum sequence length
:param sort: sorts dataset by sequence length
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset
"""
self.min_len = min_len
self.max_len = max_len
self.parallel = False
self.sorted = False
self.src = self.process_data(src_fname, tokenizer, max_size)
if min_len is not None and max_len is not None:
self.filter_data(min_len, max_len)
lengths = [len(s) for s in self.src]
self.lengths = torch.tensor(lengths)
if sort:
self.sort_by_length()
def sort_by_length(self):
"""
Sorts dataset by the sequence length.
"""
self.lengths, indices = self.lengths.sort(descending=True)
self.src = [self.src[idx] for idx in indices]
self.indices = indices.tolist()
self.sorted = True
def unsort(self, array):
"""
"Unsorts" given array (restores original order of elements before
dataset was sorted by sequence length).
:param array: array to be "unsorted"
"""
if self.sorted:
inverse = sorted(enumerate(self.indices), key=itemgetter(1))
array = [array[i[0]] for i in inverse]
return array
def filter_data(self, min_len, max_len):
"""
Preserves only samples which satisfy the following inequality:
min_len <= sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
"""
logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}')
initial_len = len(self.src)
filtered_src = []
for src in self.src:
if min_len <= len(src) <= max_len:
filtered_src.append(src)
self.src = filtered_src
filtered_len = len(self.src)
logging.info(f'Pairs before: {initial_len}, after: {filtered_len}')
def process_data(self, fname, tokenizer, max_size):
"""
Loads data from the input file.
:param fname: input file name
:param tokenizer: tokenizer
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset
"""
logging.info(f'Processing data from {fname}')
data = []
with open(fname) as dfile:
for idx, line in enumerate(dfile):
if max_size and idx == max_size:
break
entry = tokenizer.segment(line)
entry = torch.tensor(entry)
data.append(entry)
return data
def __len__(self):
return len(self.src)
def __getitem__(self, idx):
return self.src[idx]
def get_loader(self, batch_size=1, seeds=None, shuffle=False,
num_workers=0, batch_first=False, pad=False,
batching=None, batching_opt={}):
collate_fn = build_collate_fn(batch_first, parallel=self.parallel,
sort=True)
if shuffle:
if batching == 'random':
sampler = DistributedSampler(self, batch_size, seeds)
elif batching == 'sharding':
sampler = ShardingSampler(self, batch_size, seeds,
batching_opt['shard_size'])
elif batching == 'bucketing':
sampler = BucketingSampler(self, batch_size, seeds,
batching_opt['num_buckets'])
else:
raise NotImplementedError
else:
sampler = StaticDistributedSampler(self, batch_size, pad)
return DataLoader(self,
batch_size=batch_size,
collate_fn=collate_fn,
sampler=sampler,
num_workers=num_workers,
pin_memory=True,
drop_last=False)
class ParallelDataset(TextDataset):
def __init__(self, src_fname, tgt_fname, tokenizer,
min_len, max_len, sort=False, max_size=None):
"""
Constructor for the ParallelDataset.
Tokenization is done when the data is loaded from the disk.
:param src_fname: path to the file with src language data
:param tgt_fname: path to the file with tgt language data
:param tokenizer: tokenizer
:param min_len: minimum sequence length
:param max_len: maximum sequence length
:param sort: sorts dataset by sequence length
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset
"""
self.min_len = min_len
self.max_len = max_len
self.parallel = True
self.sorted = False
self.src = self.process_data(src_fname, tokenizer, max_size)
self.tgt = self.process_data(tgt_fname, tokenizer, max_size)
assert len(self.src) == len(self.tgt)
self.filter_data(min_len, max_len)
assert len(self.src) == len(self.tgt)
src_lengths = [len(s) for s in self.src]
tgt_lengths = [len(t) for t in self.tgt]
self.src_lengths = torch.tensor(src_lengths)
self.tgt_lengths = torch.tensor(tgt_lengths)
self.lengths = self.src_lengths + self.tgt_lengths
if sort:
self.sort_by_length()
def sort_by_length(self):
"""
Sorts dataset by the sequence length.
"""
self.lengths, indices = self.lengths.sort(descending=True)
self.src = [self.src[idx] for idx in indices]
self.tgt = [self.tgt[idx] for idx in indices]
self.src_lengths = [self.src_lengths[idx] for idx in indices]
self.tgt_lengths = [self.tgt_lengths[idx] for idx in indices]
self.indices = indices.tolist()
self.sorted = True
def filter_data(self, min_len, max_len):
"""
Preserves only samples which satisfy the following inequality:
min_len <= src sample sequence length <= max_len AND
min_len <= tgt sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
"""
logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}')
initial_len = len(self.src)
filtered_src = []
filtered_tgt = []
for src, tgt in zip(self.src, self.tgt):
if min_len <= len(src) <= max_len and \
min_len <= len(tgt) <= max_len:
filtered_src.append(src)
filtered_tgt.append(tgt)
self.src = filtered_src
self.tgt = filtered_tgt
filtered_len = len(self.src)
logging.info(f'Pairs before: {initial_len}, after: {filtered_len}')
def __getitem__(self, idx):
return self.src[idx], self.tgt[idx]
class LazyParallelDataset(TextDataset):
def __init__(self, src_fname, tgt_fname, tokenizer,
min_len, max_len, sort=False, max_size=None):
"""
Constructor for the LazyParallelDataset.
Tokenization is done on the fly.
:param src_fname: path to the file with src language data
:param tgt_fname: path to the file with tgt language data
:param tokenizer: tokenizer
:param min_len: minimum sequence length
:param max_len: maximum sequence length
:param sort: sorts dataset by sequence length
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset
"""
self.min_len = min_len
self.max_len = max_len
self.parallel = True
self.sorted = False
self.tokenizer = tokenizer
self.raw_src = self.process_raw_data(src_fname, max_size)
self.raw_tgt = self.process_raw_data(tgt_fname, max_size)
assert len(self.raw_src) == len(self.raw_tgt)
logging.info(f'Filtering data, min len: {min_len}, max len: {max_len}')
# Subtracting 2 because EOS and BOS are added later during tokenization
self.filter_raw_data(min_len - 2, max_len - 2)
assert len(self.raw_src) == len(self.raw_tgt)
# Adding 2 because EOS and BOS are added later during tokenization
src_lengths = [i + 2 for i in self.src_len]
tgt_lengths = [i + 2 for i in self.tgt_len]
self.src_lengths = torch.tensor(src_lengths)
self.tgt_lengths = torch.tensor(tgt_lengths)
self.lengths = self.src_lengths + self.tgt_lengths
def process_raw_data(self, fname, max_size):
"""
Loads data from the input file.
:param fname: input file name
:param max_size: loads at most 'max_size' samples from the input file,
if None loads the entire dataset
"""
logging.info(f'Processing data from {fname}')
data = []
with open(fname) as dfile:
for idx, line in enumerate(dfile):
if max_size and idx == max_size:
break
data.append(line)
return data
def filter_raw_data(self, min_len, max_len):
"""
Preserves only samples which satisfy the following inequality:
min_len <= src sample sequence length <= max_len AND
min_len <= tgt sample sequence length <= max_len
:param min_len: minimum sequence length
:param max_len: maximum sequence length
"""
initial_len = len(self.raw_src)
filtered_src = []
filtered_tgt = []
filtered_src_len = []
filtered_tgt_len = []
for src, tgt in zip(self.raw_src, self.raw_tgt):
src_len = src.count(' ') + 1
tgt_len = tgt.count(' ') + 1
if min_len <= src_len <= max_len and \
min_len <= tgt_len <= max_len:
filtered_src.append(src)
filtered_tgt.append(tgt)
filtered_src_len.append(src_len)
filtered_tgt_len.append(tgt_len)
self.raw_src = filtered_src
self.raw_tgt = filtered_tgt
self.src_len = filtered_src_len
self.tgt_len = filtered_tgt_len
filtered_len = len(self.raw_src)
logging.info(f'Pairs before: {initial_len}, after: {filtered_len}')
def __getitem__(self, idx):
src = torch.tensor(self.tokenizer.segment(self.raw_src[idx]))
tgt = torch.tensor(self.tokenizer.segment(self.raw_tgt[idx]))
return src, tgt
def __len__(self):
return len(self.raw_src)
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/data/dataset.py |
# Copyright (c) 2017 Elad Hoffer
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
from collections import defaultdict
from functools import partial
import torch
import subword_nmt.apply_bpe
import sacremoses
import seq2seq.data.config as config
class Tokenizer:
"""
Tokenizer class.
"""
def __init__(self, vocab_fname=None, bpe_fname=None, lang=None, pad=1,
separator='@@'):
"""
Constructor for the Tokenizer class.
:param vocab_fname: path to the file with vocabulary
:param bpe_fname: path to the file with bpe codes
:param pad: pads vocabulary to a multiple of 'pad' tokens
:param separator: tokenization separator
"""
self.separator = separator
self.lang = lang
if bpe_fname:
with open(bpe_fname, 'r') as bpe_codes:
self.bpe = subword_nmt.apply_bpe.BPE(bpe_codes)
if vocab_fname:
self.build_vocabulary(vocab_fname, pad)
if lang:
self.init_moses(lang)
def init_moses(self, lang):
self.moses_tokenizer = sacremoses.MosesTokenizer(lang['src'])
self.moses_detokenizer = sacremoses.MosesDetokenizer(lang['tgt'])
def build_vocabulary(self, vocab_fname, pad):
logging.info(f'Building vocabulary from {vocab_fname}')
vocab = [config.PAD_TOKEN, config.UNK_TOKEN,
config.BOS_TOKEN, config.EOS_TOKEN]
with open(vocab_fname) as vfile:
for line in vfile:
vocab.append(line.strip())
self.pad_vocabulary(vocab, pad)
self.vocab_size = len(vocab)
logging.info(f'Size of vocabulary: {self.vocab_size}')
self.tok2idx = defaultdict(partial(int, config.UNK))
for idx, token in enumerate(vocab):
self.tok2idx[token] = idx
self.idx2tok = {}
for key, value in self.tok2idx.items():
self.idx2tok[value] = key
def pad_vocabulary(self, vocab, pad):
"""
Pads vocabulary to a multiple of 'pad' tokens.
:param vocab: list with vocabulary
:param pad: integer
"""
vocab_size = len(vocab)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:04d}'
vocab.append(token)
assert len(vocab) % pad == 0
def get_state(self):
logging.info(f'Saving state of the tokenizer')
state = {
'lang': self.lang,
'separator': self.separator,
'vocab_size': self.vocab_size,
'bpe': self.bpe,
'tok2idx': self.tok2idx,
'idx2tok': self.idx2tok,
}
return state
def set_state(self, state):
logging.info(f'Restoring state of the tokenizer')
self.lang = state['lang']
self.separator = state['separator']
self.vocab_size = state['vocab_size']
self.bpe = state['bpe']
self.tok2idx = state['tok2idx']
self.idx2tok = state['idx2tok']
self.init_moses(self.lang)
def segment(self, line):
"""
Tokenizes single sentence and adds special BOS and EOS tokens.
:param line: sentence
returns: list representing tokenized sentence
"""
line = line.strip().split()
entry = [self.tok2idx[i] for i in line]
entry = [config.BOS] + entry + [config.EOS]
return entry
def tokenize(self, line):
tokenized = self.moses_tokenizer.tokenize(line, return_str=True)
bpe = self.bpe.process_line(tokenized)
segmented = self.segment(bpe)
tensor = torch.tensor(segmented)
return tensor
def detokenize_bpe(self, inp, delim=' '):
"""
Detokenizes single sentence and removes token separator characters.
:param inputs: sequence of tokens
:param delim: tokenization delimiter
returns: string representing detokenized sentence
"""
detok = delim.join([self.idx2tok[idx] for idx in inp])
detok = detok.replace(self.separator + ' ', '')
detok = detok.replace(self.separator, '')
detok = detok.replace(config.BOS_TOKEN, '')
detok = detok.replace(config.EOS_TOKEN, '')
detok = detok.replace(config.PAD_TOKEN, '')
detok = detok.strip()
return detok
def detokenize_moses(self, inp):
output = self.moses_detokenizer.detokenize(inp.split())
return output
def detokenize(self, inp):
detok_bpe = self.detokenize_bpe(inp)
output = self.detokenize_moses(detok_bpe)
return output
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/data/tokenizer.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import logging
import torch
from torch.utils.data.sampler import Sampler
from seq2seq.utils import get_rank
from seq2seq.utils import get_world_size
class DistributedSampler(Sampler):
def __init__(self, dataset, batch_size, seeds, world_size=None, rank=None):
"""
Constructor for the DistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.dataset = dataset
self.world_size = world_size
self.rank = rank
self.epoch = 0
self.seeds = seeds
self.batch_size = batch_size
self.global_batch_size = batch_size * world_size
self.data_len = len(self.dataset)
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def init_rng(self):
"""
Creates new RNG, seed depends on current epoch idx.
"""
rng = torch.Generator()
seed = self.seeds[self.epoch]
logging.info(f'Sampler for epoch {self.epoch} uses seed {seed}')
rng.manual_seed(seed)
return rng
def distribute_batches(self, indices):
"""
Assigns batches to workers.
Consecutive ranks are getting consecutive batches.
:param indices: torch.tensor with batch indices
"""
assert len(indices) == self.num_samples
indices = indices.view(-1, self.batch_size)
indices = indices[self.rank::self.world_size].contiguous()
indices = indices.view(-1)
indices = indices.tolist()
assert len(indices) == self.num_samples // self.world_size
return indices
def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices
def __iter__(self):
rng = self.init_rng()
# generate permutation
indices = torch.randperm(self.data_len, generator=rng)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# assign batches to workers
indices = self.distribute_batches(indices)
return iter(indices)
def set_epoch(self, epoch):
"""
Sets current epoch index.
Epoch index is used to seed RNG in __iter__() function.
:param epoch: index of current epoch
"""
self.epoch = epoch
def __len__(self):
return self.num_samples // self.world_size
class ShardingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, seeds, shard_size,
world_size=None, rank=None):
"""
Constructor for the ShardingSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param shard_size: number of global batches within one shard
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, seeds, world_size, rank)
self.shard_size = shard_size
self.num_samples = self.data_len // self.global_batch_size \
* self.global_batch_size
def __iter__(self):
rng = self.init_rng()
# generate permutation
indices = torch.randperm(self.data_len, generator=rng)
# make indices evenly divisible by (batch_size * world_size)
indices = indices[:self.num_samples]
# splits the dataset into chunks of 'self.shard_size' global batches
# each, sorts by (src + tgt) sequence length within each chunk,
# reshuffles all global batches
shard_size = self.global_batch_size * self.shard_size
nshards = (self.num_samples + shard_size - 1) // shard_size
lengths = self.dataset.lengths[indices]
shards = [indices[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
len_shards = [lengths[i * shard_size:(i+1) * shard_size] for i in range(nshards)]
# sort by (src + tgt) sequence length within each shard
indices = []
for len_shard in len_shards:
_, ind = len_shard.sort()
indices.append(ind)
output = tuple(shard[idx] for shard, idx in zip(shards, indices))
# build batches
indices = torch.cat(output)
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, rng)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
class BucketingSampler(DistributedSampler):
def __init__(self, dataset, batch_size, seeds, num_buckets,
world_size=None, rank=None):
"""
Constructor for the BucketingSampler.
:param dataset: dataset
:param batch_size: local batch size
:param seeds: list of seeds, one seed for each training epoch
:param num_buckets: number of buckets
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
super().__init__(dataset, batch_size, seeds, world_size, rank)
self.num_buckets = num_buckets
bucket_width = (dataset.max_len + num_buckets - 1) // num_buckets
# assign sentences to buckets based on src and tgt sequence lengths
bucket_ids = torch.max(dataset.src_lengths // bucket_width,
dataset.tgt_lengths // bucket_width)
bucket_ids.clamp_(0, num_buckets - 1)
# build buckets
all_indices = torch.arange(self.data_len)
self.buckets = []
self.num_samples = 0
global_bs = self.global_batch_size
for bid in range(num_buckets):
# gather indices for current bucket
indices = all_indices[bucket_ids == bid]
self.buckets.append(indices)
# count number of samples in current bucket
samples = len(indices) // global_bs * global_bs
self.num_samples += samples
def __iter__(self):
rng = self.init_rng()
global_bs = self.global_batch_size
indices = []
for bid in range(self.num_buckets):
# random shuffle within current bucket
perm = torch.randperm(len(self.buckets[bid]), generator=rng)
bucket_indices = self.buckets[bid][perm]
# make bucket_indices evenly divisible by global batch size
length = len(bucket_indices) // global_bs * global_bs
bucket_indices = bucket_indices[:length]
assert len(bucket_indices) % self.global_batch_size == 0
# add samples from current bucket to indices for current epoch
indices.append(bucket_indices)
indices = torch.cat(indices)
assert len(indices) % self.global_batch_size == 0
# perform global reshuffle of all global batches
indices = self.reshuffle_batches(indices, rng)
# distribute batches to individual workers
indices = self.distribute_batches(indices)
return iter(indices)
class StaticDistributedSampler(Sampler):
def __init__(self, dataset, batch_size, pad, repeat=1, world_size=None, rank=None):
"""
Constructor for the StaticDistributedSampler.
:param dataset: dataset
:param batch_size: local batch size
:param pad: if True: pads dataset to a multiple of global_batch_size
samples
:param world_size: number of distributed workers
:param rank: rank of the current process
"""
if world_size is None:
world_size = get_world_size()
if rank is None:
rank = get_rank()
self.world_size = world_size
global_batch_size = batch_size * world_size
data_len = len(dataset)
repeated_data_len = int(len(dataset) * repeat)
num_samples = (repeated_data_len + global_batch_size - 1) \
// global_batch_size * global_batch_size
self.num_samples = num_samples
indices = list(range(repeated_data_len))
if pad:
# pad dataset to a multiple of global_batch_size samples, uses
# sample with idx 0 as pad
indices += [0] * (num_samples - len(indices))
else:
# temporary pad to a multiple of global batch size, pads with "-1"
# which is later removed from the list of indices
indices += [-1] * (num_samples - len(indices))
indices = torch.tensor(indices)
indices = indices.view(-1, batch_size)
indices = indices[rank::world_size].contiguous()
indices = indices.view(-1)
# remove temporary pad
indices = indices[indices != -1]
indices = indices % data_len
indices = indices.tolist()
self.indices = indices
def __iter__(self):
return iter(self.indices)
def __len__(self):
return len(self.indices)
| DeepLearningExamples-master | PyTorch/Translation/GNMT/seq2seq/data/sampler.py |
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
from collections import Counter
def parse_args():
parser = argparse.ArgumentParser(description='Clean dataset')
parser.add_argument('-f1', '--file1', help='file1')
parser.add_argument('-f2', '--file2', help='file2')
return parser.parse_args()
def save_output(fname, data):
with open(fname, 'w') as f:
f.writelines(data)
def main():
"""
Discards all pairs of sentences which can't be decoded by latin-1 encoder.
It aims to filter out sentences with rare unicode glyphs and pairs which
are most likely not valid English-German sentences.
Examples of discarded sentences:
✿★★★Hommage au king de la pop ★★★✿ ✿★★★Que son âme repos...
Для их осуществления нам, прежде всего, необходимо преодолеть
возражения рыночных фундаменталистов, которые хотят ликвидировать или
уменьшить роль МВФ.
practised as a scientist in various medical departments of the ⇗Medical
University of Hanover , the ⇗University of Ulm , and the ⇗RWTH Aachen
(rheumatology, pharmacology, physiology, pathology, microbiology,
immunology and electron-microscopy).
The same shift】 and press 【】 【alt out with a smaller diameter
circle.
Brought to you by ABMSUBS ♥leira(Coordinator/Translator)
♥chibichan93(Timer/Typesetter) ♥ja...
Some examples: &0u - ☺ &0U - ☻ &tel - ☏ &PI - ¶ &SU - ☼ &cH- - ♥ &M2=♫
&sn - ﺵ SGML maps SGML to unicode.
"""
args = parse_args()
c = Counter()
skipped = 0
valid = 0
data1 = []
data2 = []
with open(args.file1) as f1, open(args.file2) as f2:
for idx, lines in enumerate(zip(f1, f2)):
line1, line2 = lines
if idx % 100000 == 1:
print(f'Processed {idx} lines')
try:
line1.encode('latin1')
line2.encode('latin1')
except UnicodeEncodeError:
skipped += 1
else:
data1.append(line1)
data2.append(line2)
valid += 1
c.update(line1)
ratio = valid / (skipped + valid)
print(f'Skipped: {skipped}, Valid: {valid}, Valid ratio {ratio}')
print('Character frequency:', c)
save_output(args.file1, data1)
save_output(args.file2, data2)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/Translation/GNMT/scripts/filter_dataset.py |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
import argparse
from itertools import zip_longest
import os
import shutil
from fairseq.data import indexed_dataset, dictionary
from fairseq.tokenizer import Tokenizer, tokenize_line
def get_parser():
parser = argparse.ArgumentParser(
description='Data pre-processing: Create dictionary and store data in binary format')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--trainpref', metavar='FP', default=None,
help='train file prefix')
parser.add_argument('--validpref', metavar='FP', default=None,
help='comma separated, valid file prefixes')
parser.add_argument('--testpref', metavar='FP', default=None,
help='comma separated, test file prefixes')
parser.add_argument('--destdir', metavar='DIR', default='data-bin',
help='destination dir')
parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int,
help='map words appearing less than threshold times to unknown')
parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary')
parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary')
parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int,
help='number of target words to retain')
parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int,
help='number of source words to retain')
parser.add_argument('--alignfile', metavar='ALIGN', default=None,
help='an alignment file (optional)')
parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'],
help='output format (optional)')
parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary')
parser.add_argument('--only-source', action='store_true', help='Only process the source language')
parser.add_argument('--padding-factor', metavar='N', default=8, type=int,
help='Pad dictionary size to be multiple of N')
return parser
def main(args):
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = not args.only_source
def build_dictionary(filenames):
d = dictionary.Dictionary()
for filename in filenames:
Tokenizer.add_file_to_dictionary(filename, d, tokenize_line)
return d
def train_path(lang):
return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '')
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += f'.{lang}'
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path('dict', lang) + '.txt'
def dataset_dest_path(output_prefix, lang, extension):
base = f'{args.destdir}/{output_prefix}'
lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else ''
return f'{base}{lang_part}.{extension}'
if args.joined_dictionary:
assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary'
assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary'
src_dict = build_dictionary({
train_path(lang)
for lang in [args.source_lang, args.target_lang]
})
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = dictionary.Dictionary.load(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)])
if target:
if args.tgtdict:
tgt_dict = dictionary.Dictionary.load(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)])
src_dict.finalize(
threshold=args.thresholdsrc,
nwords=args.nwordssrc,
padding_factor=args.padding_factor,
)
src_dict.save(dict_path(args.source_lang))
if target:
if not args.joined_dictionary:
tgt_dict.finalize(
threshold=args.thresholdtgt,
nwords=args.nwordstgt,
padding_factor=args.padding_factor,
)
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(input_prefix, output_prefix, lang):
_dict = dictionary.Dictionary.load(dict_path(lang))
print('| [{}] Dictionary: {} types'.format(lang, len(_dict) - 1))
ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_path(output_prefix, lang, 'bin'))
def consumer(tensor):
ds.add_item(tensor)
input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '')
res = Tokenizer.binarize(input_file, _dict, consumer)
print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format(
lang, input_file, res['nseq'], res['ntok'],
100 * res['nunk'] / res['ntok'], _dict.unk_word))
ds.finalize(dataset_dest_path(output_prefix, lang, 'idx'))
def make_dataset(input_prefix, output_prefix, lang):
if args.output_format == 'binary':
make_binary_dataset(input_prefix, output_prefix, lang)
elif args.output_format == 'raw':
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
def make_all(lang):
if args.trainpref:
make_dataset(args.trainpref, 'train', lang)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(',')):
outprefix = 'valid{}'.format(k) if k > 0 else 'valid'
make_dataset(validpref, outprefix, lang)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(',')):
outprefix = 'test{}'.format(k) if k > 0 else 'test'
make_dataset(testpref, outprefix, lang)
make_all(args.source_lang)
if target:
make_all(args.target_lang)
print('| Wrote preprocessed data to {}'.format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
src_dict = dictionary.Dictionary.load(dict_path(args.source_lang))
tgt_dict = dictionary.Dictionary.load(dict_path(args.target_lang))
freq_map = {}
with open(args.alignfile, 'r') as align_file:
with open(src_file_name, 'r') as src_file:
with open(tgt_file_name, 'r') as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False)
ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split('-')), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map:
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format(
args.source_lang, args.target_lang)), 'w') as f:
for k, v in align_dict.items():
print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f)
if __name__ == '__main__':
parser = get_parser()
ARGS = parser.parse_args()
main(ARGS)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/preprocess.py |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Extension
from torch.utils.cpp_extension import BuildExtension, CUDAExtension, CppExtension
import sys
if sys.version_info < (3,):
sys.exit('Sorry, Python3 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license = f.read()
with open('requirements.txt') as f:
reqs = f.read()
extra_compile_args = {'cxx' : ['-O2']}
extra_compile_args['nvcc'] = ['-O3',
'-I./cutlass/',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_70,code=compute_70',
'-gencode', 'arch=compute_80,code=sm_80',
'-gencode', 'arch=compute_80,code=compute_80',
]
strided_batched_gemm = CUDAExtension(
name='strided_batched_gemm',
sources=['fairseq/modules/strided_batched_gemm/strided_batched_gemm.cpp', 'fairseq/modules/strided_batched_gemm/strided_batched_gemm_cuda.cu'],
extra_compile_args=extra_compile_args
)
batch_utils = CppExtension(
name='fairseq.data.batch_C',
sources=['fairseq/data/csrc/make_batches.cpp'],
extra_compile_args={
'cxx': ['-O2',],
}
)
setup(
name='fairseq',
version='0.5.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
long_description=readme,
license=license,
install_requires=reqs.strip().split('\n'),
packages=find_packages(),
ext_modules=[strided_batched_gemm, batch_utils],
cmdclass={
'build_ext': BuildExtension.with_options(use_ninja=False)
},
test_suite='tests',
)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/setup.py |
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import os
import math
import time
import ctypes
from copy import deepcopy
import torch
import sacrebleu
import dllogger as DLLogger
from fairseq import data, distributed_utils, options, utils, tokenizer
from fairseq.ddp_trainer import DDPTrainer
from fairseq.meters import StopwatchMeter
from fairseq.sequence_generator import SequenceGenerator
from fairseq.data import data_utils, load_dataset_splits
from fairseq.models import build_model
from fairseq.log_helper import setup_logger, reset_perf_meters
def main(args):
print(args)
setup_logger(args)
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
torch.cuda.set_device(args.local_rank)
if args.distributed_world_size > 1:
assert torch.distributed.is_initialized()
torch.distributed.broadcast(torch.tensor([1], device="cuda"), 0)
torch.cuda.synchronize()
pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int))
ctypes.CDLL('libcudart.so').cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
ctypes.CDLL('libcudart.so').cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
torch.manual_seed(args.seed)
src_dict, tgt_dict = data_utils.load_dictionaries(args)
add_extra_items_to_checkpoint({'src_dict': src_dict, 'tgt_dict': tgt_dict})
datasets = load_dataset_splits(args, ['train', 'valid', 'test'], src_dict, tgt_dict)
model = build_model(args)
print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters())))
# Build trainer
if torch.cuda.get_device_capability(0)[0] >= 7 and not args.amp:
print('| NOTICE: your device may support faster training with --amp')
trainer = DDPTrainer(args, model)
print('| model {}, criterion {}'.format(args.arch, trainer.criterion.__class__.__name__))
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
epoch_itr = data.EpochBatchIterator(
dataset=datasets[args.train_subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
)
# Load the latest checkpoint if one is available
load_checkpoint(args, trainer, epoch_itr)
# Send a dummy batch to warm the caching allocator
dummy_batch = data_utils.get_dummy_batch(args.max_tokens, src_dict, tgt_dict)
trainer.dummy_train_step(dummy_batch)
# Sanity check
if args.do_sanity_check:
print('Performing sanity check...')
sanity_score = score(args, trainer, datasets['test'], src_dict, tgt_dict, 'test.raw.de')
DLLogger.log(step='SANITY_CHECK', data={'sanity_check_score': sanity_score}, verbosity=1)
# Train until the learning rate gets too small or model reaches target score
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
tgt_bleu = args.target_bleu or math.inf
current_bleu = 0.0
best_bleu = -1.0
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_losses = [None]
valid_subsets = args.valid_subset.split(',')
run_summary = {'loss': float('inf'),
'val_loss': float('inf'),
'speed': 0,
'accuracy': 0}
while lr >= args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update and current_bleu < tgt_bleu:
DLLogger.log(step=trainer.get_num_updates()+1, data={'epoch': epoch_itr.epoch}, verbosity=0)
# train for one epoch
train(args, trainer, epoch_itr)
DLLogger.log(step=trainer.get_num_updates(), data={'walltime': train_meter.sum}, verbosity=1)
DLLogger.log(step=trainer.get_num_updates(),
data={'avg_epoch_loss': trainer.avg_loss_meter.avg}, verbosity=1)
if epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, datasets, valid_subsets)
valid_bleu = score(args, trainer, datasets[valid_subsets[0]], src_dict, tgt_dict, 'valid.raw.de')
DLLogger.log(step=trainer.get_num_updates(),
data={'val_loss': valid_losses[0], 'val_bleu': valid_bleu}, verbosity=1)
# Eval BLEU score
if args.online_eval or (tgt_bleu is not math.inf):
current_bleu = score(args, trainer, datasets[args.gen_subset], src_dict, tgt_dict, 'test.raw.de')
DLLogger.log(step=trainer.get_num_updates(), data={'test_bleu': current_bleu}, verbosity=1)
best_bleu = max(best_bleu, current_bleu)
run_summary['val_loss'] = min(run_summary['val_loss'], valid_losses[0])
run_summary['accuracy'] = best_bleu if best_bleu >= 0 else valid_bleu
run_summary['loss'] = valid_losses[0]
run_summary['speed'] = trainer.throughput_meter.u_avg
# Only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
train_meter.stop()
run_summary['walltime'] = train_meter.sum
DLLogger.log(step=(), data=run_summary, verbosity=0)
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, epoch_itr):
"""Train the model for one epoch."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr()
# update parameters every N batches
if epoch_itr.epoch <= len(args.update_freq):
update_freq = args.update_freq[epoch_itr.epoch - 1]
else:
update_freq = args.update_freq[-1]
max_update = args.max_update or math.inf
num_batches = len(epoch_itr)
torch.cuda.synchronize()
begin = time.time()
# reset meters
DLLogger.flush()
trainer.get_throughput_meter().reset()
for i, sample in enumerate(itr):
if i < num_batches - 1 and (i + 1) % update_freq > 0:
# buffer updates according to --update-freq
trainer.train_step(sample, update_params=False, last_step=(i == len(itr)-1))
continue
else:
trainer.train_step(sample, update_params=True, last_step=(i == len(itr)-1))
# ignore the first mini-batch in words-per-second calculation
if i == 0:
trainer.get_throughput_meter().reset()
reset_perf_meters()
if (i+1) % args.log_interval == 0:
DLLogger.flush()
if trainer.get_num_updates() >= max_update:
break
torch.cuda.synchronize()
print('Epoch time:', time.time() - begin)
# Print epoch stats and reset training meters
DLLogger.log(step=trainer.get_num_updates(),
data={'speed': trainer.get_throughput_meter().avg}, verbosity=0)
DLLogger.flush()
def validate(args, trainer, datasets, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
valid_losses = []
for subset in subsets:
if len(subsets) > 1:
print('Validating on \'{}\' subset'.format(subset))
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=datasets[subset],
max_tokens=args.max_tokens,
max_sentences=args.max_sentences_valid,
max_positions=args.max_positions,
required_batch_size_multiple=8,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
# reset validation loss meters
DLLogger.flush()
subset_losses = []
for sample in itr:
loss = trainer.valid_step(sample)
subset_losses.append(loss)
subset_loss = sum(subset_losses)/len(subset_losses)
DLLogger.flush()
valid_losses.append(subset_loss)
print(f'Validation loss on subset {subset}: {subset_loss}')
return valid_losses
def score(args, trainer, dataset, src_dict, tgt_dict, ref_file):
torch.cuda.synchronize()
begin = time.time()
src_dict = deepcopy(src_dict) # This is necessary, generation of translations
tgt_dict = deepcopy(tgt_dict) # alters target dictionary messing up with the rest of training
model = trainer.get_model()
# Initialize data iterator
itr = data.EpochBatchIterator(
dataset=dataset,
max_tokens=None,
max_sentences=max(8, min(math.ceil(1024/args.distributed_world_size), 128)),
max_positions=args.max_positions,
required_batch_size_multiple=8,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
translator = SequenceGenerator(
[model],
tgt_dict.get_metadata(),
maxlen=args.max_target_positions - 1, # do not include EOS token
beam_size=args.beam,
stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen, unk_penalty=args.unkpen,
sampling=args.sampling, sampling_topk=args.sampling_topk, minlen=args.min_len,
use_amp=args.amp,
)
# Generate and compute BLEU
predictions = []
translations = translator.generate_batched_itr(
itr, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b,
cuda=True, timer=gen_timer, prefix_size=args.prefix_size,
)
for sample_id, src_tokens, _, hypos in translations:
# Process input and grount truth
src_str = src_dict.string(src_tokens, args.remove_bpe)
# Process top predictions
for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]):
_, hypo_str, _ = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe
)
# Score only the top hypothesis
if i == 0:
hypo_str = tokenizer.Tokenizer.detokenize(hypo_str, 'de')
predictions.append('{}\t{}'.format(sample_id, hypo_str))
if args.distributed_world_size > 1:
predictions = _all_gather_predictions(predictions)
with open(os.path.join(args.data, ref_file), 'r') as reference:
refs = [reference.readlines()]
# reducing indexed predictions as strings is more memory efficient than reducing tuples
predictions = [tuple(item.split('\t')) for item in predictions]
predictions = [(int(item[0]), item[1]) for item in predictions]
predictions.sort(key=lambda tup: tup[0])
predictions = [hypo[1] + ('\n' if hypo[1][-1] != '\n' else '') for hypo in predictions]
sacrebleu_score = sacrebleu.corpus_bleu(predictions, refs, lowercase=not args.test_cased_bleu).score
if args.save_predictions:
os.makedirs(os.path.join(args.save_dir, 'predictions'), exist_ok=True)
fname = ref_file + '.pred.update_{}'.format(trainer.get_num_updates())
save_path = os.path.join(args.save_dir, 'predictions', fname)
with open(save_path, 'w') as f:
f.write(''.join(predictions))
DLLogger.log(step=trainer.get_num_updates(),
data={'inference tokens/s': float(args.distributed_world_size) / gen_timer.avg},
verbosity=0)
DLLogger.flush()
if gen_timer.sum != 0:
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
len(predictions),
gen_timer.n,
gen_timer.sum,
len(predictions) / gen_timer.sum,
float(args.distributed_world_size)/gen_timer.avg
))
torch.cuda.synchronize()
print('| Eval completed in: {:.2f}s | {}CASED BLEU {:.2f}'.format(
time.time()-begin,
'' if args.test_cased_bleu else 'UN',
sacrebleu_score
))
return sacrebleu_score
def _all_gather_predictions(predictions):
ready = False
all_ready = False
reduced_predictions = []
max_size = 65000
while not all_ready:
lst_len = len(predictions)
size = 2000 # some extra space for python stuff
n = 0
while n < lst_len:
str_len = len(predictions[n].encode('utf8')) + 8 # per string pickle overhead
if size + str_len >= max_size:
break
size += str_len
n += 1
chunk = predictions[:n]
predictions = predictions[n:]
if not predictions:
ready = True
chunk = (ready, chunk)
torch.cuda.synchronize()
gathered = distributed_utils.all_gather_list(chunk, max_size=65000)
torch.cuda.synchronize()
reduced_predictions += [t[1] for t in gathered]
all_ready = all([t[0] for t in gathered])
reduced_predictions = [item for sublist in reduced_predictions for item in sublist]
return reduced_predictions
def save_checkpoint(args, trainer, epoch_itr, val_loss):
if epoch_itr.epoch % args.save_interval != 0:
return
if args.no_save or not distributed_utils.is_master(args):
return
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds['checkpoint{}.pt'.format(epoch)] = end_of_epoch and not args.no_epoch_checkpoints
checkpoint_conds['checkpoint_best.pt'] = (
val_loss is not None and
(not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best)
)
checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink
prev_best = getattr(save_checkpoint, 'best', val_loss)
if val_loss is not None:
save_checkpoint.best = min(val_loss, prev_best)
extra_state = {
'best': save_checkpoint.best,
'train_iterator': epoch_itr.state_dict(),
'val_loss': val_loss,
}
extra_state.update(save_checkpoint.extra_items)
checkpoints = [os.path.join(args.save_dir, 'checkpoints', fn)
for fn, cond in checkpoint_conds.items() if cond]
if checkpoints:
for cp in checkpoints:
trainer.save_checkpoint(cp, extra_state)
def add_extra_items_to_checkpoint(items):
if not hasattr(save_checkpoint, 'extra_items'):
save_checkpoint.extra_items = {}
save_checkpoint.extra_items.update(items)
def load_checkpoint(args, trainer, epoch_itr):
"""Load a checkpoint and replay dataloader to match."""
os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)
checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)
if os.path.isfile(checkpoint_path):
extra_state = trainer.load_checkpoint(checkpoint_path)
if extra_state is not None:
# replay train iterator to match checkpoint
epoch_itr.load_state_dict(extra_state['train_iterator'])
print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(
checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))
trainer.lr_step(epoch_itr.epoch)
trainer.lr_step_update(trainer.get_num_updates())
if 'best' in extra_state:
save_checkpoint.best = extra_state['best']
if __name__ == '__main__':
parser = options.get_training_parser()
ARGS = options.parse_args_and_arch(parser)
distributed_utils.distributed_init(ARGS)
main(ARGS)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/train.py |
#!/usr/bin/env python3 -u
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import time
from collections import namedtuple
import numpy as np
import torch
from torch.serialization import default_restore_location
from fairseq import data, options, tokenizer, utils, log_helper
from fairseq.sequence_generator import SequenceGenerator
from fairseq.meters import StopwatchMeter
from fairseq.models.transformer import TransformerModel
import dllogger
from apply_bpe import BPE
Batch = namedtuple('Batch', 'srcs tokens lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def load_ensemble_for_inference(filenames):
"""Load an ensemble of models for inference.
model_arg_overrides allows you to pass a dictionary model_arg_overrides --
{'arg_name': arg} -- to override model args that were used during model
training
"""
# load model architectures and weights
states = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError('Model file not found: {}'.format(filename))
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
states.append(state)
ensemble = []
for state in states:
args = state['args']
# build model for ensemble
model = TransformerModel.build_model(args)
model.load_state_dict(state['model'], strict=True)
ensemble.append(model)
src_dict = states[0]['extra_state']['src_dict']
tgt_dict = states[0]['extra_state']['tgt_dict']
return ensemble, args, src_dict, tgt_dict
def buffered_read(buffer_size, data_descriptor):
buffer = []
for src_str in data_descriptor:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if buffer:
yield buffer
def make_batches(lines, args, src_dict, max_positions, bpe=None):
tokens = [
tokenizer.Tokenizer.tokenize(
src_str,
src_dict,
tokenize=tokenizer.tokenize_en,
add_if_not_exist=False,
bpe=bpe
).long()
for src_str in lines
]
lengths = np.array([t.numel() for t in tokens])
itr = data.EpochBatchIterator(
dataset=data.LanguagePairDataset(tokens, lengths, src_dict),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
srcs=[lines[i] for i in batch['id']],
tokens=batch['net_input']['src_tokens'],
lengths=batch['net_input']['src_lengths'],
), batch['id']
def setup_logger(args):
if not args.no_dllogger:
dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=1, filename=args.stat_file)])
for k, v in vars(args).items():
dllogger.log(step='PARAMETER', data={k:v}, verbosity=0)
container_setup_info = log_helper.get_framework_env_vars()
dllogger.log(step='PARAMETER', data=container_setup_info, verbosity=0)
dllogger.metadata('throughput',
{'unit':'tokens/s', 'format':':/3f', 'GOAL':'MAXIMIZE', 'STAGE':'INFER'})
else:
dllogger.init(backends=[])
def main(args):
setup_logger(args)
args.interactive = sys.stdin.isatty() and not args.file # Just make the code more understendable
if args.file:
data_descriptor = open(args.file, 'r')
else:
data_descriptor = sys.stdin
if args.interactive:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
if args.buffer_size > 50000:
print("WARNING: To prevent memory exhaustion buffer size is set to 50000", file=sys.stderr)
args.buffer_size = 50000
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args, file=sys.stderr)
use_cuda = torch.cuda.is_available() and not args.cpu
torch.cuda.synchronize()
processing_start = time.time()
# Load ensemble
print('| loading model(s) from {}'.format(args.path), file=sys.stderr)
model_paths = args.path.split(':')
models, model_args, src_dict, tgt_dict = load_ensemble_for_inference(model_paths)
if args.fp16:
for model in models:
model.half()
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(need_attn=args.print_alignment)
# Initialize generator
translator = SequenceGenerator(
models,
tgt_dict.get_metadata(),
maxlen=args.max_target_positions,
beam_size=args.beam,
stop_early=(not args.no_early_stop),
normalize_scores=(not args.unnormalized),
len_penalty=args.lenpen,
unk_penalty=args.unkpen,
sampling=args.sampling,
sampling_topk=args.sampling_topk,
minlen=args.min_len,
sampling_temperature=args.sampling_temperature
)
if use_cuda:
translator.cuda()
# Load BPE codes file
bpe = None
if args.bpe_codes:
codes = open(args.bpe_codes, 'r')
bpe = BPE(codes)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
def make_result(src_str, hypos):
result = Translation(
src_str=src_str,
hypos=[],
pos_scores=[],
alignments=[],
)
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None,
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
hypo_str = tokenizer.Tokenizer.detokenize(hypo_str, 'de').strip()
result.hypos.append((hypo['score'], hypo_str))
result.pos_scores.append('P\t' + ' '.join(f'{x:.4f}' for x in hypo['positional_scores'].tolist()))
result.alignments.append('A\t' + ' '.join(str(utils.item(x)) for x in alignment)
if args.print_alignment else None
)
return result
gen_timer = StopwatchMeter()
def process_batch(batch):
tokens = batch.tokens
lengths = batch.lengths
if use_cuda:
tokens = tokens.cuda()
lengths = lengths.cuda()
torch.cuda.synchronize()
translation_start = time.time()
gen_timer.start()
translations = translator.generate(
tokens,
lengths,
maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b),
)
gen_timer.stop(sum(len(h[0]['tokens']) for h in translations))
torch.cuda.synchronize()
dllogger.log(step='infer', data={'latency': time.time() - translation_start})
return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)]
if args.interactive:
print('| Type the input sentence and press return:')
for inputs in buffered_read(args.buffer_size, data_descriptor):
indices = []
results = []
for batch, batch_indices in make_batches(inputs, args, src_dict, args.max_positions, bpe):
indices.extend(batch_indices)
results += process_batch(batch)
for i in np.argsort(indices):
result = results[i]
print(result.src_str, file=sys.stderr)
for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments):
print(f'Score {hypo[0]}', file=sys.stderr)
print(hypo[1])
if align is not None:
print(align, file=sys.stderr)
if args.file:
data_descriptor.close()
torch.cuda.synchronize()
log_dict = {
'throughput': 1./gen_timer.avg,
'latency_avg': sum(gen_timer.intervals)/len(gen_timer.intervals),
'latency_p90': gen_timer.p(90),
'latency_p95': gen_timer.p(95),
'latency_p99': gen_timer.p(99),
'total_infernece_time': gen_timer.sum,
'total_run_time': time.time() - processing_start,
}
print('Translation time: {} s'.format(log_dict['total_infernece_time']),
file=sys.stderr)
print('Model throughput (beam {}): {} tokens/s'.format(args.beam, log_dict['throughput']),
file=sys.stderr)
print('Latency:\n\tAverage {:.3f}s\n\tp90 {:.3f}s\n\tp95 {:.3f}s\n\tp99 {:.3f}s'.format(
log_dict['latency_avg'], log_dict['latency_p90'], log_dict['latency_p95'], log_dict['latency_p99']),
file=sys.stderr)
print('End to end time: {} s'.format(log_dict['total_run_time']), file=sys.stderr)
dllogger.log(step=(), data=log_dict)
if __name__ == '__main__':
parser = options.get_inference_parser()
parser.add_argument('--no-dllogger', action='store_true')
ARGS = options.parse_args_and_arch(parser)
main(ARGS)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/inference.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import torch
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
from fairseq.criterions import CRITERION_REGISTRY
from fairseq.optim import OPTIMIZER_REGISTRY
from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY
def get_training_parser():
parser = get_parser('Trainer')
add_dataset_args(parser, train=True, gen=True)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
add_inference_args(parser)
add_perf_args(parser)
return parser
def get_inference_parser():
parser = get_parser('Generation')
add_dataset_args(parser, gen=True)
add_inference_args(parser)
add_perf_args(parser)
return parser
def parse_args_and_arch(parser, input_args=None, parse_known=False):
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group(
'Model-specific configuration',
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
if hasattr(args, 'optimizer'):
OPTIMIZER_REGISTRY[args.optimizer].add_args(parser)
if hasattr(args, 'lr_scheduler'):
LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
args.max_positions = (args.max_source_positions, args.max_target_positions)
if hasattr(args, 'target_bleu') and (args.online_eval or args.target_bleu) and not args.remove_bpe:
args.remove_bpe = '@@ '
# Apply architecture configuration.
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc):
parser = argparse.ArgumentParser(
description='Facebook AI Research Sequence-to-Sequence Toolkit -- ' + desc)
parser.add_argument('--log-interval', type=int, default=500, metavar='N',
help='print aggregated stats and flush json log every N iteration')
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--amp', action='store_true',
help='use Automatic Mixed Precision')
parser.add_argument('--stat-file', type=str, default='run_log.json',
help='Name of the file containing DLLogger output')
parser.add_argument('--save-dir', metavar='DIR', default='results',
help='path to save checkpoints and logs')
parser.add_argument('--do-sanity-check', action='store_true',
help='Perform evaluation on test set before running the training')
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad-source', default=True, type=bool, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default=False, type=bool, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--pad-sequence', default=1, type=int, metavar='N',
help='Pad sequences to a multiple of N')
if train:
parser.add_argument('data', metavar='DIR', help='path to data directory')
group.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (train, valid, valid1, test, test1)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
return group
def add_optimization_args(parser):
group = parser.add_argument_group('Optimization')
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--target-bleu', default=0.0, type=float, metavar='TARGET',
help='force stop training after reaching target bleu')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--update-freq', default=[1], nargs='+', type=int,
help='update parameters every N_i batches, when in epoch i')
# Optimizer definitions can be found under fairseq/optim/
group.add_argument('--optimizer', default='nag', metavar='OPT',
choices=OPTIMIZER_REGISTRY.keys(),
help='optimizer: {} (default: nag)'.format(', '.join(OPTIMIZER_REGISTRY.keys())))
group.add_argument('--lr', '--learning-rate', default=[0.25], nargs='+', type=float,
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--momentum', default=0.99, type=float, metavar='M',
help='momentum factor')
group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
# Learning rate schedulers can be found under fairseq/optim/lr_scheduler/
group.add_argument('--lr-scheduler', default='reduce_lr_on_plateau',
help='learning rate scheduler: {} (default: reduce_lr_on_plateau)'.format(
', '.join(LR_SCHEDULER_REGISTRY.keys())))
group.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS',
help='learning rate shrink factor for annealing, lr_new = (lr * lr_shrink)')
group.add_argument('--min-lr', default=1e-5, type=float, metavar='LR',
help='minimum learning rate')
# Criterion args
parser.add_argument('--label-smoothing', default=0., type=float, metavar='D',
help='epsilon for label smoothing, 0 means no label smoothing')
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename in save-dir from which to load checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
return group
def add_common_eval_args(group):
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--file', metavar='FILE', default=None, type=str,
help='path to a file with input data for inference')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring')
group.add_argument('--cpu', action='store_true', help='generate on CPU')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
def add_inference_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
group.add_argument('--beam', default=4, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--no-early-stop', action='store_true',
help=('continue searching even after finalizing k=beam '
'hypotheses; this is more correct, but increases '
'generation time by 50%%'))
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-temperature', default=1, type=float, metavar='N',
help='temperature for random sampling')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--online-eval', action='store_true',
help='score model at the end of epoch')
group.add_argument('--save-predictions', action='store_true',
help='Save predictions produced with online evaluation')
group.add_argument('--test-cased-bleu', action='store_true',
help='Use cased bleu for online eval')
group.add_argument('--bpe-codes', default=None, type=str, metavar='CODES',
help='file with bpe codes')
group.add_argument('--buffer-size', default=64, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--fp16', action='store_true', help='use fp16 precision')
return group
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
group.add_argument(
'--arch', '-a', default='fconv', metavar='ARCH', required=True,
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture: {} (default: fconv)'.format(
', '.join(ARCH_MODEL_REGISTRY.keys())),
)
# Criterion definitions can be found under fairseq/criterions/
group.add_argument(
'--criterion', default='cross_entropy', metavar='CRIT',
choices=CRITERION_REGISTRY.keys(),
help='training criterion: {} (default: cross_entropy)'.format(
', '.join(CRITERION_REGISTRY.keys())),
)
return group
def add_perf_args(parser):
group = parser.add_argument_group('Performance')
group.add_argument('--fuse-dropout-add', action='store_true',
help='Fuse dropout and residual adds.')
group.add_argument('--fuse-relu-dropout', action='store_true',
help='Fuse Relu and Dropout.')
group.add_argument('--fuse-layer-norm', action='store_true',
help='Use APEX\'s FusedLayerNorm instead of torch.nn.LayerNorm')
return group
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/options.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import time
import torch
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
torch.cuda.synchronize()
self.start = time.time()
self.n = 0
self.last_update = time.time()
def update(self, val=1):
self.n += val
torch.cuda.synchronize()
self.last_update = time.time()
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
torch.cuda.synchronize()
return self.init + (time.time() - self.start)
@property
def u_avg(self):
return self.n / (self.last_update - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
self.intervals = []
def start(self):
torch.cuda.synchronize()
self.start_time = time.time()
def stop(self, n=1):
torch.cuda.synchronize()
if self.start_time is not None:
delta = time.time() - self.start_time
self.intervals.append(delta)
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
self.intervals = []
@property
def avg(self):
return self.sum / self.n
def p(self, i):
assert i <= 100
idx = int(len(self.intervals) * i / 100)
return sorted(self.intervals)[idx]
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/meters.py |
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
class CrossEntropyCriterion(_Loss):
def __init__(self, args):
super().__init__()
self.padding_idx = args.padding_idx
def forward(self, norm_probs, target, reduce=True):
"""Compute the loss for the given sample.
"""
lprobs = norm_probs.view(-1, norm_probs.size(-1))
target = target.view(-1)
loss = F.nll_loss(lprobs, target, size_average=False, ignore_index=self.padding_idx,
reduce=reduce)
return loss
class LabelSmoothedCrossEntropyCriterion(_Loss):
def __init__(self, args):
super().__init__()
self.eps = args.label_smoothing
self.padding_idx = args.padding_idx
def forward(self, norm_probs, target, reduce=True):
"""Compute the loss for the given sample.
"""
target = target.view(-1, 1)
lprobs = norm_probs.view(-1, norm_probs.size(-1))
non_pad_mask = target.ne(self.padding_idx)
nll_loss = -lprobs.gather(dim=-1, index=target)[non_pad_mask]
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)[non_pad_mask]
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = self.eps / lprobs.size(-1)
loss = (1. - self.eps) * nll_loss + eps_i * smooth_loss
return loss
CRITERION_REGISTRY = {
'label_smoothed_cross_entropy' : LabelSmoothedCrossEntropyCriterion,
'cross_entropy' : CrossEntropyCriterion,
}
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/criterions.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .multiprocessing_pdb import pdb
__all__ = ['pdb']
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
import torch.nn.functional as F
from torch.cuda import amp
from fairseq import utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self, models, vocab_meta, maxlen, beam_size=1, minlen=1, stop_early=True,
normalize_scores=True, len_penalty=1, unk_penalty=0, retain_dropout=False,
sampling=False, sampling_topk=-1, sampling_temperature=1, use_amp=False
):
"""Generates translations of a given source sentence.
Args:
min/maxlen: The length of the generated output will be bounded by
minlen and maxlen (not including the end-of-sentence marker).
stop_early: Stop generation immediately after we finalize beam_size
hypotheses, even though longer hypotheses might have better
normalized scores.
normalize_scores: Normalize scores by the length of the output.
"""
self.models = models
self.pad = vocab_meta['pad']
self.unk = vocab_meta['unk']
self.eos = vocab_meta['eos']
self.vocab_size = vocab_meta['len']
self.beam_size = beam_size
self.minlen = minlen
#max_decoder_len = min(m.max_decoder_positions() for m in self.models)
#max_decoder_len -= 1 # we define maxlen not including the EOS marker
#self.maxlen = max_decoder_len if maxlen is None else min(maxlen, max_decoder_len)
self.maxlen = maxlen
self.stop_early = stop_early
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.sampling = sampling
self.sampling_topk = sampling_topk
self.sampling_temperature = sampling_temperature
self.use_amp = use_amp
def cuda(self):
for model in self.models:
model.cuda()
return self
def generate_batched_itr(
self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None,
cuda=False, timer=None, prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
if maxlen_b is None:
maxlen_b = self.maxlen
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if 'net_input' not in s:
continue
input = s['net_input']
srclen = input['src_tokens'].size(1)
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
input['src_tokens'],
input['src_lengths'],
beam_size=beam_size,
maxlen=int(maxlen_a * srclen + maxlen_b),
prefix_tokens=s['target'][:, :prefix_size] if prefix_size > 0 else None,
)
if timer is not None:
timer.stop(sum(len(h[0]['tokens']) for h in hypos))
for i, id in enumerate(s['id'].data):
# remove padding
src = utils.strip_pad(input['src_tokens'].data[i, :], self.pad)
ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None
yield id, src, ref, hypos[i]
def generate(self, src_tokens, src_lengths, beam_size=None, maxlen=None, prefix_tokens=None):
"""Generate a batch of translations."""
with torch.no_grad():
with amp.autocast(enabled=self.use_amp):
return self._generate(src_tokens, src_lengths, beam_size, maxlen, prefix_tokens)
def _generate(self, src_tokens, src_lengths, beam_size=None, maxlen=None, prefix_tokens=None):
bsz, srclen = src_tokens.size()
maxlen = min(maxlen, self.maxlen) if maxlen is not None else self.maxlen
# the max beam size is the dictionary size - 1, since we never select pad
beam_size = beam_size if beam_size is not None else self.beam_size
beam_size = min(beam_size, self.vocab_size - 1)
encoder_outs = []
incremental_states = {}
for model in self.models:
if not self.retain_dropout:
model.eval()
if isinstance(model.decoder, FairseqIncrementalDecoder):
incremental_states[model] = {}
else:
incremental_states[model] = None
# compute the encoder output for each beam
encoder_out = model.encoder(
src_tokens.repeat(1, beam_size).view(-1, srclen),
src_lengths.expand(beam_size, src_lengths.numel()).t().contiguous().view(-1),
)
encoder_outs.append(encoder_out)
# initialize buffers
scores = src_tokens.data.new(bsz * beam_size, maxlen + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.data.new(bsz * beam_size, maxlen + 2).fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos
attn, attn_buf = None, None
nonpad_idxs = None
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfinalized_scores=None):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size:
if self.stop_early or step == maxlen or unfinalized_scores is None:
return True
# stop if the best unfinalized score is worse than the worst
# finalized one
best_unfinalized_score = unfinalized_scores[sent].max()
if self.normalize_scores:
best_unfinalized_score /= maxlen ** self.len_penalty
if worst_finalized[sent]['score'] >= best_unfinalized_score:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
unfinalized_scores: A vector containing scores for all
unfinalized hypotheses
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step + 2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i][nonpad_idxs[sent]]
_, alignment = hypo_attn.max(dim=0)
else:
hypo_attn = None
alignment = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': alignment,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
elif not self.stop_early and score > worst_finalized[sent]['score']:
# replace worst hypo for this sentence with new/better one
worst_idx = worst_finalized[sent]['idx']
if worst_idx is not None:
finalized[sent][worst_idx] = get_hypo()
# find new worst finalized hypo for this sentence
idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score'])
worst_finalized[sent] = {
'score': s['score'],
'idx': idx,
}
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfinalized_scores):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(maxlen + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
for i, model in enumerate(self.models):
if isinstance(model.decoder, FairseqIncrementalDecoder):
model.decoder.reorder_incremental_state(incremental_states[model], reorder_state)
encoder_outs[i] = model.encoder.reorder_encoder_out(*encoder_outs[i], reorder_state)
probs, avg_attn_scores = self._decode(tokens[:, :step + 1], encoder_outs, incremental_states)
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
probs = probs.unfold(0, 1, beam_size).squeeze(2).contiguous()
scores = scores.type_as(probs)
scores_buf = scores_buf.type_as(probs)
elif not self.sampling:
# make probs contain cumulative scores for each hypothesis
probs.add_(scores[:, step - 1].view(-1, 1))
probs[:, self.pad] = -math.inf # never select pad
probs[:, self.unk] -= self.unk_penalty # apply unk penalty
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), maxlen + 2)
attn_buf = attn.clone()
nonpad_idxs = src_tokens.ne(self.pad)
attn[:, :, step + 1].copy_(avg_attn_scores)
cand_beams = buffer('cand_beams')
if step < maxlen:
if prefix_tokens is not None and step < prefix_tokens.size(1):
probs_slice = probs.view(bsz, -1, probs.size(-1))[:, 0, :]
cand_scores = torch.gather(
probs_slice, dim=1,
index=prefix_tokens[:, step].view(-1, 1).data
).expand(-1, cand_size)
cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, cand_size).data
cand_beams.resize_as_(cand_indices).fill_(0)
elif self.sampling:
assert self.pad == 1, 'sampling assumes the first two symbols can be ignored'
if self.sampling_topk > 0:
values, indices = probs[:, 2:].topk(self.sampling_topk)
exp_probs = values.div_(self.sampling_temperature).exp()
if step == 0:
cand_indices = torch.multinomial(exp_probs, beam_size, replacement=True)
else:
cand_indices = torch.multinomial(exp_probs, 1, replacement=True)
cand_scores = torch.gather(exp_probs, dim=1, index=cand_indices)
cand_indices = torch.gather(indices, dim=1, index=cand_indices)
cand_indices.add_(2)
else:
exp_probs = probs.div_(self.sampling_temperature).exp_().view(-1, self.vocab_size)
if step == 0:
# we exclude the first two vocab items, one of which is pad
cand_indices = torch.multinomial(exp_probs[:, 2:], beam_size, replacement=True)
else:
cand_indices = torch.multinomial(exp_probs[:, 2:], 1, replacement=True)
cand_indices.add_(2)
cand_scores = torch.gather(exp_probs, dim=1, index=cand_indices)
cand_scores.log_()
cand_indices = cand_indices.view(bsz, -1).repeat(1, 2)
cand_scores = cand_scores.view(bsz, -1).repeat(1, 2)
if step == 0:
cand_beams = torch.zeros(bsz, cand_size).type_as(cand_indices)
else:
cand_beams = torch.arange(0, beam_size).repeat(bsz, 2).type_as(cand_indices)
# make scores cumulative
cand_scores.add_(
torch.gather(
scores[:, step - 1].view(bsz, beam_size), dim=1,
index=cand_beams,
)
)
else:
# take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
cand_scores, cand_indices = torch.topk(
probs.view(bsz, -1),
k=min(cand_size, probs.view(bsz, -1).size(1) - 1), # -1 so we never select pad
)
cand_beams = torch.div(cand_indices, self.vocab_size, rounding_mode='trunc')
cand_indices.fmod_(self.vocab_size)
else:
# finalize all active hypotheses once we hit maxlen
# pick the hypothesis with the highest prob of EOS right now
eos_scores, eos_bbsz_idx = torch.sort(
probs[:, self.eos],
descending=True,
)
num_remaining_sent -= len(finalize_hypos(
step, eos_bbsz_idx, eos_scores))
assert num_remaining_sent == 0
break
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
eos_mask = cand_indices.eq(self.eos)
finalized_sents = set()
if step >= self.minlen:
# only consider eos when it's among the top beam_size indices
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
)
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
)
finalized_sents = finalize_hypos(
step, eos_bbsz_idx, eos_scores, cand_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < maxlen
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(bsz).type_as(cand_indices)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
)
active_bbsz_idx = torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
def _decode(self, tokens, encoder_outs, incremental_states):
if len(self.models) == 1:
return self._decode_one(tokens, self.models[0], encoder_outs[0], incremental_states, log_probs=True)
avg_probs = None
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(tokens, model, encoder_out, incremental_states, log_probs=False)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs.div_(len(self.models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(self, tokens, model, encoder_out, incremental_states, log_probs):
with torch.no_grad():
if incremental_states[model] is not None:
decoder_out = list(model.decoder(tokens, encoder_out[0], encoder_out[1], incremental_state=incremental_states[model]))
else:
decoder_out = list(model.decoder(tokens, encoder_out[0], encoder_out[1]))
decoder_out[0] = decoder_out[0][:, -1, :]
attn = decoder_out[1]
if isinstance(attn, torch.Tensor) and attn.numel() == 0:
attn = None
if attn is not None:
attn = attn[:, -1, :]
logits = decoder_out[0]
if log_probs:
probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
else:
probs = F.softmax(logits, dim=-1, dtype=torch.float32)
return probs, attn
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/sequence_generator.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Train a network across multiple GPUs.
"""
import math
from collections import defaultdict
from itertools import chain
import torch
import torch.nn.functional as F
from torch.cuda import amp
from apex.parallel import DistributedDataParallel as DDP
from fairseq import distributed_utils, optim, utils
from fairseq.optim import lr_scheduler
from fairseq.meters import TimeMeter, AverageMeter
from fairseq.criterions import CRITERION_REGISTRY
import dllogger as DLLogger
class DDPTrainer():
"""Main class for data parallel training.
This class supports data parallel training, where multiple workers each
have a full model replica and gradients are accumulated synchronously via
torch.distributed.all_reduce.
"""
def __init__(self, args, model):
if not torch.cuda.is_available():
raise NotImplementedError('Training on CPU is not supported')
self.args = args
self.model = model.cuda()
self.criterion = CRITERION_REGISTRY[args.criterion](args).cuda()
self.optimizer = optim.build_optimizer(self.args, self.model.parameters())
self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self.scaler = amp.GradScaler(enabled=self.args.amp, init_scale=2**15)
if self.args.distributed_world_size > 1:
self.model = DDP(model)
self._buffered_stats = defaultdict(lambda: [])
self._num_updates = 0
self._optim_history = None
self.throughput_meter = TimeMeter()
self.avg_loss_meter = AverageMeter()
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if distributed_utils.is_master(self.args): # only save one checkpoint
utils.save_state(
filename, self.args, self.get_model(), self.criterion, self.optimizer,
self.lr_scheduler, self._num_updates, self._optim_history, extra_state,
)
def load_checkpoint(self, filename, load_optim=True):
"""Load all training state from a checkpoint file."""
extra_state, optim_history, last_optim_state = \
utils.load_model_state(filename, self.get_model())
if last_optim_state is not None:
# rebuild optimizer after loading model, since params may have changed
#self.optimizer = optim.build_optimizer(self.args, self.model.parameters())
self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
if load_optim:
self._optim_history = optim_history
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
if last_optim['criterion_name'] == self.criterion.__class__.__name__:
self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])
if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:
self.optimizer.load_state_dict(last_optim_state)
self._num_updates = last_optim['num_updates']
return extra_state
def train_step(self, sample, update_params=True, last_step=False):
"""Do forward, backward and parameter update."""
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
self.model.train()
if isinstance(self.model, DDP):
if last_step:
self.model.disable_allreduce()
else:
self.model.enable_allreduce()
# forward and backward pass
sample = self._prepare_sample(sample)
loss, oom_fwd = self._forward(sample)
# If this is a last batch forward pass is skipped on some workers
# Batch with sample_size 0 is not accounted for in weighted loss
logging_output = {
'ntokens': sample['ntokens'] if sample is not None else 0,
'nsentences': sample['target'].size(0) if sample is not None else 0,
'loss': utils.item(loss.data) if loss is not None else 0,
}
sample_size = sample['ntokens'] if sample is not None else 0
oom_bwd = self._backward(loss)
# buffer stats and logging outputs
self._buffered_stats['sample_sizes'].append(sample_size)
self._buffered_stats['logging_outputs'].append(logging_output)
self._buffered_stats['ooms_fwd'].append(oom_fwd)
self._buffered_stats['ooms_bwd'].append(oom_bwd)
# update parameters
if update_params and not last_step:
# gather logging outputs from all replicas
sample_sizes = self._buffered_stats['sample_sizes']
logging_outputs = self._buffered_stats['logging_outputs']
ooms_fwd = self._buffered_stats['ooms_fwd']
ooms_bwd = self._buffered_stats['ooms_bwd']
if self.args.distributed_world_size > 1:
sample_sizes, logging_outputs, ooms_fwd, ooms_bwd = map(
lambda l: list(chain.from_iterable(l)),
zip(*distributed_utils.all_gather_list(
(sample_sizes, logging_outputs, ooms_fwd, ooms_bwd)
))
)
ooms_fwd = sum(ooms_fwd)
ooms_bwd = sum(ooms_bwd)
ooms = ooms_fwd + ooms_bwd # this is always <= distributed_world_size
if ooms == self.args.distributed_world_size:
print('| WARNING: OOM in all workers, skipping batch')
self.zero_grad()
return
# aggregate stats and logging outputs
grad_denom = sum(sample_sizes)
for p in self.model.parameters():
if p.requires_grad and p.grad is not None:
p.grad /= grad_denom
self._opt()
# Handle logging
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
self.throughput_meter.update(ntokens)
info_log_data = {
'tokens/s': self.throughput_meter.avg,
'tokens': ntokens,
'loss': sum(log.get('loss', 0) for log in logging_outputs) / ntokens / math.log(2)
}
self.avg_loss_meter.update(info_log_data['loss'])
debug_log_data = {
'batch_size': sum(log.get('nsentences', 0) for log in logging_outputs),
'lr': self.get_lr(),
'grad_denom': grad_denom,
'updates': 1
}
DLLogger.log(step=self._num_updates, data=info_log_data, verbosity=0)
DLLogger.log(step=self._num_updates, data=debug_log_data, verbosity=1)
self.clear_buffered_stats()
def _forward(self, sample):
loss = None
oom = 0
try:
if sample is not None:
with amp.autocast(enabled=self.args.amp):
# calculate loss and sample size
logits, _ = self.model(**sample['net_input'])
target = sample['target']
probs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = self.criterion(probs, target)
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory in worker {}, skipping batch'.format(
self.args.distributed_rank), force=True)
oom = 1
loss = None
else:
raise e
return loss, oom
def _backward(self, loss):
oom = 0
if loss is not None:
try:
self.scaler.scale(loss).backward()
except RuntimeError as e:
if 'out of memory' in str(e):
print('| WARNING: ran out of memory in worker {}, skipping batch'.format(
self.args.distributed_rank), force=True)
oom = 1
self.zero_grad()
else:
raise e
return oom
def _opt(self):
# take an optimization step
self.scaler.step(self.optimizer.optimizer)
self.scaler.update()
self.zero_grad()
self._num_updates += 1
# update learning rate
self.lr_scheduler.step_update(self._num_updates)
def valid_step(self, sample):
"""Do forward pass in evaluation mode."""
self.model.eval()
# forward pass
sample = self._prepare_sample(sample)
with torch.no_grad():
loss, oom_fwd = self._forward(sample)
logging_output = {
'ntokens': sample['ntokens'] if sample is not None else 0,
'nsentences': sample['target'].size(0) if sample is not None else 0,
}
loss = loss.item() if loss is not None else 0
assert not oom_fwd, 'Ran out of memory during validation'
# gather logging outputs from all GPUs
if self.args.distributed_world_size > 1:
losses, logging_outputs = zip(*distributed_utils.all_gather_list(
(loss, logging_output)
))
else:
losses = [loss]
logging_outputs = [logging_output]
weight = sum(log.get('ntokens', 0) for log in logging_outputs)
scaled_loss = sum(losses) / weight / math.log(2)
return scaled_loss
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, update_params=False)
self.zero_grad()
self.clear_buffered_stats()
def zero_grad(self):
self.optimizer.zero_grad()
def clear_buffered_stats(self):
self._buffered_stats.clear()
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate based on the validation loss."""
return self.lr_scheduler.step(epoch, val_loss)
def lr_step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.lr_scheduler.step_update(num_updates)
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_throughput_meter(self):
"""Get the throughput meter"""
return self.throughput_meter
def get_model(self):
"""Get the model replica."""
return self.model.module if isinstance(self.model, DDP) else self.model
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def _prepare_sample(self, sample):
if not sample:
return None
return utils.move_to_cuda(sample)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/ddp_trainer.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import Counter
import re
import torch
SPACE_NORMALIZER = re.compile("\s+")
path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'prefixes/nonbreaking_prefix.en')
prefixes ={}
with open(path, 'r') as f:
for line in f:
line = line.strip()
if line and not line[0] == '#':
match = re.search(r'(.*)[\s]+(\#NUMERIC_ONLY\#)', line)
if match:
prefixes[match.group(1)] = 2
else:
prefixes[line] = 1
def get_unicode_categories():
import sys
from collections import defaultdict
import unicodedata
cats = defaultdict(list)
for c in map(chr, range(sys.maxunicode + 1)):
cats[unicodedata.category(c)].append(c)
return cats
NUMERICS = ''.join(get_unicode_categories()['No'])
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line
def tokenize_en(line):
line = line.strip()
line = ' ' + line + ' '
# remove ASCII junk
line = re.sub(r'\s+', ' ', line)
line = re.sub(r'[\x00-\x1F]', '', line)
#fix whitespaces
line = re.sub('\ +', ' ', line)
line = re.sub('^ ', '', line)
line = re.sub(' $', '', line)
#separate other special characters
line = re.sub(r'([^\s\.\'\`\,\-\w]|[_'+NUMERICS+'])', r' \g<1> ', line)
line = re.sub(r'(\w)\-(?=\w)', r'\g<1> @-@ ', line)
#multidots stay together
line = re.sub(r'\.([\.]+)', r' DOTMULTI\g<1>', line)
while re.search(r'DOTMULTI\.', line):
line = re.sub(r'DOTMULTI\.([^\.])', r'DOTDOTMULTI \g<1>', line)
line = re.sub(r'DOTMULTI\.', r'DOTDOTMULTI', line)
# separate out "," except if within numbers (5,300)
line = re.sub(r'([\D])[,]', r'\g<1> , ', line)
line = re.sub(r'[,]([\D])', r' , \g<1>', line)
# separate "," after a number if it's the end of sentence
line = re.sub(r'(\d)[,]$', r'\g<1> ,', line)
# split contractions right
line = re.sub(r'([\W\d])[\']([\W\d])', '\g<1> \' \g<2>', line)
line = re.sub(r'(\W)[\']([\w\D])', '\g<1> \' \g<2>', line)
line = re.sub(r'([\w\D])[\']([\W\d])', '\g<1> \' \g<2>', line)
line = re.sub(r'([\w\D])[\']([\w\D])', '\g<1> \'\g<2>', line)
# special case for "1990's"
line = re.sub(r'([\W\d])[\']([s])', '\g<1> \'\g<2>', line)
# apply nonbreaking prefixes
words = line.split()
line = ''
for i in range(len(words)):
word = words[i]
match = re.search(r'^(\S+)\.$', word)
if match:
pre = match.group(1)
if i==len(words)-1:
# split last words independently as they are unlikely to be non-breaking prefixes
word = pre+' .'
elif ((re.search(r'\.', pre) and re.search(r'[^\.\W\d]', pre))
or (pre in prefixes and prefixes[pre]==1)
or re.search(r'^[a-z]', words[i+1])
or (pre in prefixes and prefixes[pre]==2 and re.search(r'^[0-9]+', words[i+1]))):
pass
else:
word = pre+' .'
word +=' '
line += word
# clean up extraneous spaces
line = re.sub(' +', ' ', line)
line = re.sub('^ ', '', line)
line = re.sub(' $', '', line)
# .' at end of sentence is missed
line = re.sub(r'\.\' ?$', ' . \' ', line)
#restore multi-dots
while re.search('DOTDOTMULTI', line):
line = re.sub('DOTDOTMULTI', 'DOTMULTI.', line)
line = re.sub('DOTMULTI', '.', line)
# escape special characters
line = re.sub(r'\&', r'&', line)
line = re.sub(r'\|', r'|', line)
line = re.sub(r'\<', r'<', line)
line = re.sub(r'\>', r'>', line)
line = re.sub(r'\'', r''', line)
line = re.sub(r'\"', r'"', line)
line = re.sub(r'\[', r'[', line)
line = re.sub(r'\]', r']', line)
#ensure final line breaks
if line[-1] != '\n':
line += '\n'
return line
def deescape(line):
line = re.sub(r'|', r'|', line)
line = re.sub(r'<', r'<', line)
line = re.sub(r'>', r'>', line)
line = re.sub(r'"', '\"', line)
line = re.sub(r''', '\'', line)
line = re.sub(r'[', r'[', line)
line = re.sub(r']', r']', line)
line = re.sub(r'&', r'&', line)
return line
class Tokenizer:
@staticmethod
def add_file_to_dictionary(filename, dict, tokenize):
with open(filename, 'r') as f:
for line in f:
for word in tokenize(line).split():
dict.add_symbol(word)
dict.add_symbol(dict.eos_word)
@staticmethod
def binarize(filename, dict, consumer, tokenize=tokenize_line,
append_eos=True, reverse_order=False):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, 'r') as f:
for line in f:
ids = Tokenizer.tokenize(
line=line,
dictionary=dict,
tokenize=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
consumer(ids)
ntok += len(ids)
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': len(replaced)}
@staticmethod
def tokenize(line, dictionary, tokenize=tokenize_line, add_if_not_exist=True,
consumer=None, append_eos=True, reverse_order=False, bpe=None):
line = tokenize(line)
if bpe:
line = bpe.process_line(line)
words = line.split()
if reverse_order:
words = list(reversed(words))
nwords = len(words)
ids = torch.IntTensor(nwords + 1 if append_eos else nwords)
for i, word in enumerate(words):
if add_if_not_exist:
idx = dictionary.add_symbol(word)
else:
idx = dictionary.index(word)
if consumer is not None:
consumer(word, idx)
ids[i] = idx
if append_eos:
ids[nwords] = dictionary.eos_index
return ids
@staticmethod
def detokenize(line, lang):
#don't try to detokenize XML/HTML tag lines
if re.search(r'^<.+>$', line) or re.search(r'^\s*$', line):
return line
line = line.strip()
line = ' '+line+' '
line = re.sub(r' @-@ ', '-', line)
line = deescape(line)
words = line.split()
line = ''
quote_count = {'\'':0, '\"':0}
prepend_space = ' '
for i in range(len(words)):
#perform rught shift of currency and some punctuation
if re.search(r'^[\u20ac\x24\(\[\{]+$', words[i]):
line += prepend_space + words[i]
prepend_space = ''
elif re.search(r'^[\,\.\?\!\:\;\\\%\}\]\)]+$', words[i]):
if lang=='fr' and re.search(r'^[\?\!\:\;\\\%]$', words[i]):
line += ' '
line += words[i]
prepend_space = ' '
elif lang=='en' and i>0 and re.search(r'^[\'][\w\D]', words[i]) and re.search(r'\w$', words[i-1]):
line += words[i]
prepend_space = ' '
elif lang=='cs' and i>1 and re.search(r'^\d+$', words[i-2]) and re.search(r'^[.,]$', words[i-1]) and re.search(r'^\w+$', words[i]):
line += words[i]
prepend_space = ' '
elif (lang=='fr' or lang=='it') and i<len(words)-1 and re.search(r'[\w\D][\']$', words[i]) and re.search(r'^[\w\D]', words[i+1]):
line += prepend_space + words[i]
prepend_space = ''
elif lang=='cs' and i<len(words)-3 and \
re.search(r'[\w\D]$', words[i]) and \
re.search(r'^-$', words[i+1]) and \
re.search(r'^li$|^mail.*', words[i+2], re.I):
#line += ' '+words[i]+words[i+1]
pass #TODO: skip one word
elif re.search(r'^[\'\"\x60\u201c\u201d]+$', words[i]):
normalized_quo = '\"' if re.search(r'^[\u201c\u201d]+$', words[i]) else words[i]
quote_count[normalized_quo] = 0 if normalized_quo not in quote_count.keys() else quote_count[normalized_quo]
if lang=='cs' and words[i] == '\u201c':
quote_count[normalized_quo] = 0
if lang=='cs' and words[i] == '\u201d':
quote_count[normalized_quo] = 1
if quote_count[normalized_quo] % 2 == 0:
if lang=='en' and words[i]=='\'' and i > 0 and re.search(r'[s]$', words[i-1]):
#single quote for posessives ending in s... "The Jones' house"
#left shift
line += words[i]
prepend_space = ' '
else:
#right shift
line += prepend_space + words[i]
prepend_space = ''
quote_count[normalized_quo] += 1
else:
#left shift
line += words[i]
prepend_space = ' '
quote_count[normalized_quo] += 1
elif lang=='fi' and re.search(r':$', words[i-1]) and re.search(r'^(N|n|A|a|Ä|ä|ssa|Ssa|ssä|Ssä|sta|stä|Sta|Stä|hun|Hun|hyn|Hyn|han|Han|hän|Hän|hön|Hön|un|Un|yn|Yn|an|An|än|Än|ön|Ön|seen|Seen|lla|Lla|llä|Llä|lta|Lta|ltä|Ltä|lle|Lle|ksi|Ksi|kse|Kse|tta|Tta|ine|Ine)(ni|si|mme|nne|nsa)?(ko|kö|han|hän|pa|pä|kaan|kään|kin)?$', words[i]):
line += words[i].lower()
prepend_space = ' '
else:
line += prepend_space + words[i]
prepend_space = ' '
#clean up spaces at head and tail of each line as well as any double-spacing
line = re.sub(r' +', ' ', line)
line = re.sub(r'\n ', '\n', line)
line = re.sub(r' \n', '\n', line)
line = re.sub(r'^ ', '', line)
line = re.sub(r' $', '', line)
#add trailing break
line += '\n' if line[-1] != '\n' else ''
return line
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/tokenizer.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import multiprocessing
import os
import pdb
import sys
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
_stdin_fd = sys.stdin.fileno()
_stdin = None
_stdin_lock = multiprocessing.Lock()
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with self._stdin_lock:
try:
if not self._stdin:
self._stdin = os.fdopen(self._stdin_fd)
sys.stdin = self._stdin
self.cmdloop()
finally:
sys.stdin = stdin_bak
pdb = MultiprocessingPdb()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/multiprocessing_pdb.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#--------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict, OrderedDict
import logging
import os
import re
import torch
import traceback
from torch.serialization import default_restore_location
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logging.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(filename, args, model, criterion, optimizer, lr_scheduler,
num_updates, optim_history=None, extra_state=None):
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
'args': args,
'model': convert_state_dict_type(model.state_dict()),
'optimizer_history': optim_history + [
{
'criterion_name': criterion.__class__.__name__,
'optimizer_name': optimizer.__class__.__name__,
'lr_scheduler_state': lr_scheduler.state_dict(),
'num_updates': num_updates,
}
],
'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()),
'extra_state': extra_state,
}
torch_persistent_save(state_dict, filename)
def load_model_state(filename, model):
if not os.path.exists(filename):
return None, [], None
state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu'))
# load model parameters
try:
model.load_state_dict(state['model'], strict=True)
except Exception:
raise Exception('Cannot load model parameters from checkpoint, '
'please ensure that the architectures match')
return state['extra_state'], state['optimizer_history'], state['last_optimizer_state']
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda()
elif isinstance(maybe_tensor, dict):
return {
key: _move_to_cuda(value)
for key, value in maybe_tensor.items()
}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
else:
return maybe_tensor
return _move_to_cuda(sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str):
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, 'r') as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying
# the original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]])
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return ' '.join(hypo_tokens)
def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe):
from fairseq import tokenizer
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string())
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, tgt_dict, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx, left_pad):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1.
Padding symbols are ignored, but it is necessary to specify whether padding
is added on the left side (left_pad=True) or right side (left_pad=False).
"""
max_pos = padding_idx + 1 + tensor.size(1)
if not hasattr(make_positions, 'range_buf'):
make_positions.range_buf = torch.arange(padding_idx + 1, 768,
dtype=tensor.dtype, device=tensor.device)
make_positions.range_buf = make_positions.range_buf.type_as(tensor)
if make_positions.range_buf.numel() < max_pos:
torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf)
mask = tensor.ne(padding_idx)
positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor)
if left_pad:
positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1)
return tensor.clone().masked_scatter_(mask, positions[mask])
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def checkpoint_paths(path, pattern=r'checkpoint(\d+)\.pt'):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/utils.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import os
import socket
import torch.distributed
from fairseq import utils
def is_master(args):
return args.distributed_rank == 0
def distributed_init(args):
args.distributed_world_size = int(os.environ.get('WORLD_SIZE',1))
args.distributed_rank = int(os.environ.get('RANK',0))
args.local_rank = int(os.environ.get('LOCAL_RANK', 0))
if args.distributed_world_size > 1:
print('| distributed init (rank {}): env://'.format(args.distributed_rank), flush=True)
print(f"| distributed env init. MASTER_ADDR: {os.environ['MASTER_ADDR']}, MASTER_PORT: {os.environ['MASTER_PORT']}" +
f", WORLD_SIZE: {os.environ['WORLD_SIZE']}, RANK: {os.environ['RANK']}", flush=True)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
print("| distributed init done!", flush=True)
suppress_output(args)
print('| initialized host {} as rank {} and device id {}'
.format(socket.gethostname(), args.distributed_rank, args.local_rank))
def suppress_output(main_args):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print_master(*args, **kwargs):
if 'force' in kwargs:
kwargs.pop('force')
builtin_print(*args, **kwargs)
def print(*args, **kwargs):
if 'force' in kwargs:
force = kwargs.pop('force')
if force:
builtin_print(*args, **kwargs)
if is_master(main_args):
__builtin__.print = print_master
else:
__builtin__.print = print
def all_gather_list(data, max_size=16384):
"""Gathers arbitrary data from all nodes into a list."""
world_size = torch.distributed.get_world_size()
if not hasattr(all_gather_list, '_in_buffer') or \
max_size != len(all_gather_list._in_buffer):
all_gather_list._in_buffer = torch.cuda.ByteTensor(max_size)
all_gather_list._out_buffers = [
torch.cuda.ByteTensor(max_size)
for i in range(world_size)
]
in_buffer = all_gather_list._in_buffer
out_buffers = all_gather_list._out_buffers
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError('encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255 * 256
in_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
in_buffer[1] = enc_size % 255
in_buffer[2:enc_size + 2] = torch.ByteTensor(list(enc))
torch.distributed.all_gather(out_buffers, in_buffer.cuda())
result = []
for i in range(world_size):
out_buffer = out_buffers[i]
size = (255 * utils.item(out_buffer[0])) + utils.item(out_buffer[1])
result.append(
pickle.loads(bytes(out_buffer[2:size + 2].tolist()))
)
return result
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/distributed_utils.py |
import os
import atexit
import time
import itertools
from collections import OrderedDict
import dllogger
from dllogger import Backend, JSONStreamBackend
from tensorboardX import SummaryWriter
import torch
class AverageMeter():
def __init__(self):
self.reset()
def reset(self):
self.updated = False
self.avg = 0
self.sum = 0
self.count = 0
def update(self, value):
self.updated = True
if isinstance(value, (tuple, list)):
val = value[0]
n = value[1]
else:
val = value
n = 1
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
@property
def value(self):
return self.avg
class PerformanceMeter():
def __init__(self):
self.reset()
def reset(self):
self.updated = False
torch.cuda.synchronize()
self.start = time.time()
self.n = 0
def update(self, val=1):
self.updated = True
self.n += val
@property
def value(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
torch.cuda.synchronize()
return time.time() - self.start
METRIC = {'average': AverageMeter, 'performance': PerformanceMeter}
class AggregatorBackend(Backend):
def __init__(self, verbosity, agg_dict):
super().__init__(verbosity=verbosity)
agg_dict = OrderedDict({k: v if isinstance(v, (tuple, list)) else (v,) for k, v in agg_dict.items()})
self.metrics = OrderedDict({k: [METRIC[x]() for x in v] for k, v in agg_dict.items()})
self.metrics.flushed = True
self.step = 0
self.epoch = 0
torch.cuda.synchronize()
self.start_time = time.time()
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def _reset_perf_meter(self, name):
for agg in self.metrics[name]:
if isinstance(agg, PerformanceMeter):
agg.reset()
def reset_perf_meters(self):
for name in self.metrics.keys():
self._reset_perf_meter(name)
def log(self, timestamp, elapsedtime, step, data):
self.step = step
if 'epoch' in data.keys():
self.epoch = data['epoch']
for k, v in data.items():
if k not in self.metrics.keys():
continue
self.metrics.flushed = False
for ag in self.metrics[k]:
ag.update(v)
def flush(self):
if self.metrics.flushed:
return
result_string = 'Transformer | epoch {} | step {} |'.format(self.epoch, self.step)
for name, aggregators in self.metrics.items():
for agg in aggregators:
if not agg.updated:
continue
if isinstance(agg, AverageMeter):
_name = 'avg ' + name
elif isinstance(agg, PerformanceMeter):
_name = name + '/s'
result_string += _name + ' {:.3f} |'.format(agg.value)
agg.reset()
torch.cuda.synchronize()
result_string += 'walltime {:.3f} |'.format(time.time() - self.start_time)
self.metrics.flushed = True
print(result_string)
class TensorBoardBackend(Backend):
def __init__(self, verbosity, log_dir):
super().__init__(verbosity=verbosity)
self.summary_writer = SummaryWriter(log_dir=os.path.join(log_dir, 'TB_summary'),
flush_secs=120,
max_queue=200
)
atexit.register(self.summary_writer.close)
@property
def log_level(self):
return self._log_level
def metadata(self, timestamp, elapsedtime, metric, metadata):
pass
def log(self, timestamp, elapsedtime, step, data):
if not isinstance(step, int):
return
for k, v in data.items():
self.summary_writer.add_scalar(k, v, step)
def flush(self):
pass
def setup_logger(args):
aggregator_dict = OrderedDict([
('loss', 'average'),
('weighted_loss', 'average'),
('tokens', ('average', 'performance')),
('updates', 'performance'),
('gnorm', 'average')
])
os.makedirs(args.save_dir, exist_ok=True)
log_path = os.path.join(args.save_dir, args.stat_file)
if os.path.exists(log_path):
for i in itertools.count():
s_fname = args.stat_file.split('.')
fname = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1] if len(s_fname) > 1 else args.stat_file + f'.{i}'
log_path = os.path.join(args.save_dir, fname)
if not os.path.exists(log_path):
break
if not args.distributed_world_size > 1 or args.distributed_rank == 0:
dllogger.init(backends=[JSONStreamBackend(verbosity=1, filename=log_path),
AggregatorBackend(verbosity=0, agg_dict=aggregator_dict),
TensorBoardBackend(verbosity=1, log_dir=args.save_dir)])
else:
dllogger.init(backends=[])
for k, v in vars(args).items():
dllogger.log(step='PARAMETER', data={k: v}, verbosity=0)
container_setup_info = get_framework_env_vars()
dllogger.log(step='PARAMETER', data=container_setup_info, verbosity=0)
dllogger.metadata('loss', {'unit': 'nat', 'GOAL': 'MINIMIZE', 'STAGE': 'TRAIN'})
dllogger.metadata('val_loss', {'unit': 'nat', 'GOAL': 'MINIMIZE', 'STAGE': 'VAL'})
dllogger.metadata('speed', {'unit': 'tokens/s', 'format': ':.3f', 'GOAL': 'MAXIMIZE', 'STAGE': 'TRAIN'})
dllogger.metadata('accuracy', {'unit': 'bleu', 'format': ':.2f', 'GOAL': 'MAXIMIZE', 'STAGE': 'VAL'})
def get_framework_env_vars():
return {
'NVIDIA_PYTORCH_VERSION': os.environ.get('NVIDIA_PYTORCH_VERSION'),
'PYTORCH_VERSION': os.environ.get('PYTORCH_VERSION'),
'CUBLAS_VERSION': os.environ.get('CUBLAS_VERSION'),
'NCCL_VERSION': os.environ.get('NCCL_VERSION'),
'CUDA_DRIVER_VERSION': os.environ.get('CUDA_DRIVER_VERSION'),
'CUDNN_VERSION': os.environ.get('CUDNN_VERSION'),
'CUDA_VERSION': os.environ.get('CUDA_VERSION'),
'NVIDIA_PIPELINE_ID': os.environ.get('NVIDIA_PIPELINE_ID'),
'NVIDIA_BUILD_ID': os.environ.get('NVIDIA_BUILD_ID'),
'NVIDIA_TF32_OVERRIDE': os.environ.get('NVIDIA_TF32_OVERRIDE'),
}
def reset_perf_meters():
for backend in dllogger.GLOBAL_LOGGER.backends:
if isinstance(backend, AggregatorBackend):
backend.reset_perf_meters()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/log_helper.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from torch.optim.optimizer import Optimizer, required
from . import FairseqOptimizer, register_optimizer
@register_optimizer('nag')
class FairseqNAG(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = NAG(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
class NAG(Optimizer):
def __init__(self, params, lr=required, momentum=0, weight_decay=0):
defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay)
super(NAG, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
lr = group['lr']
lr_old = group.get('lr_old', lr)
lr_correct = lr / lr_old
for p in group['params']:
if p.grad is None:
continue
d_p = p.grad.data
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
param_state['momentum_buffer'] = d_p.clone().zero_()
buf = param_state['momentum_buffer']
if weight_decay != 0:
p.data.mul_(1 - lr * weight_decay)
p.data.add_(momentum * momentum * lr_correct, buf)
p.data.add_(-(1 + momentum) * lr, d_p)
buf.mul_(momentum * lr_correct).add_(-lr, d_p)
group['lr_old'] = lr
return loss
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/nag.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('sgd')
class SGD(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = torch.optim.SGD(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'momentum': self.args.momentum,
'weight_decay': self.args.weight_decay,
}
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/sgd.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch.optim
class FairseqOptimizer(object):
def __init__(self, args, params):
super().__init__()
self.args = args
self.params = params
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
pass
@property
def optimizer(self):
"""Return a torch.optim.optimizer.Optimizer instance."""
if not hasattr(self, '_optimizer'):
raise NotImplementedError
if not isinstance(self._optimizer, torch.optim.Optimizer):
raise ValueError('_optimizer must be an instance of torch.optim.Optimizer')
return self._optimizer
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
raise NotImplementedError
def get_lr(self):
"""Return the current learning rate."""
return self.optimizer.param_groups[0]['lr']
def set_lr(self, lr):
"""Set the learning rate."""
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def state_dict(self):
"""Return the optimizer's state dict."""
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
"""Load an optimizer state dict.
In general we should prefer the configuration of the existing optimizer
instance (e.g., learning rate) over that found in the state_dict. This
allows us to resume training from a checkpoint using a new set of
optimizer args.
"""
self.optimizer.load_state_dict(state_dict)
# override learning rate, momentum, etc. with latest values
for group in self.optimizer.param_groups:
group.update(self.optimizer_config)
def step(self, closure=None):
"""Performs a single optimization step."""
return self.optimizer.step(closure)
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for group in self.optimizer.param_groups:
for p in group['params']:
p.grad = None
return self.optimizer.zero_grad()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/fairseq_optimizer.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_optimizer import FairseqOptimizer
OPTIMIZER_REGISTRY = {}
OPTIMIZER_CLASS_NAMES = set()
def build_optimizer(args, params):
params = filter(lambda p: p.requires_grad, params)
return OPTIMIZER_REGISTRY[args.optimizer](args, params)
def register_optimizer(name):
"""Decorator to register a new optimizer."""
def register_optimizer_cls(cls):
if name in OPTIMIZER_REGISTRY:
raise ValueError('Cannot register duplicate optimizer ({})'.format(name))
if not issubclass(cls, FairseqOptimizer):
raise ValueError('Optimizer ({}: {}) must extend FairseqOptimizer'.format(name, cls.__name__))
if cls.__name__ in OPTIMIZER_CLASS_NAMES:
# We use the optimizer class name as a unique identifier in
# checkpoints, so all optimizer must have unique class names.
raise ValueError('Cannot register optimizer with duplicate class name ({})'.format(cls.__name__))
OPTIMIZER_REGISTRY[name] = cls
OPTIMIZER_CLASS_NAMES.add(cls.__name__)
return cls
return register_optimizer_cls
# automatically import any Python files in the optim/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.' + module)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adagrad')
class Adagrad(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config)
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'weight_decay': self.args.weight_decay,
}
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/adagrad.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
#
#-------------------------------------------------------------------------
#
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import FairseqOptimizer, register_optimizer
from apex.optimizers.fused_adam import FusedAdam
@register_optimizer('adam')
class FairseqAdam(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = FusedAdam(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
parser.add_argument('--adam-betas', default=(0.9, 0.999), nargs=2, type=float, metavar='B1 B2',
help='betas for Adam optimizer')
parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': self.args.adam_betas,
'eps': self.args.adam_eps,
'weight_decay': self.args.weight_decay,
}
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/adam.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('fixed')
class FixedSchedule(FairseqLRScheduler):
"""Decay the LR on a fixed schedule."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
# set defaults
args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0
self.lr = args.lr[0]
if args.warmup_updates > 0:
self.warmup_factor = 1. / args.warmup_updates
else:
self.warmup_factor = 1
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--force-anneal', '--fa', type=int, metavar='N',
help='force annealing at specified epoch')
parser.add_argument('--warmup-updates', default=0, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
def get_next_lr(self, epoch):
lrs = self.args.lr
if self.args.force_anneal is None or epoch < self.args.force_anneal:
# use fixed LR schedule
next_lr = lrs[min(epoch, len(lrs) - 1)]
else:
# annneal based on lr_shrink
next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal)
return next_lr
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
self.lr = self.get_next_lr(epoch)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates:
self.warmup_factor = num_updates / float(self.args.warmup_updates)
self.optimizer.set_lr(self.warmup_factor * self.lr)
return self.optimizer.get_lr()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler/fixed_schedule.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch.optim.lr_scheduler
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('reduce_lr_on_plateau')
class ReduceLROnPlateau(FairseqLRScheduler):
"""Decay the LR by a factor every time the validation loss plateaus."""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.'
' Consider --lr-scheduler=fixed instead.'
)
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer.optimizer, patience=0, factor=args.lr_shrink)
def state_dict(self):
"""Return the LR scheduler state dict."""
return {
'best': self.lr_scheduler.best,
'last_epoch': self.lr_scheduler.last_epoch,
}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.lr_scheduler.best = state_dict['best']
if 'last_epoch' in state_dict:
self.lr_scheduler.last_epoch = state_dict['last_epoch']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
self.lr_scheduler.step(val_loss, epoch)
else:
self.lr_scheduler.last_epoch = epoch
return self.optimizer.get_lr()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_lr_scheduler import FairseqLRScheduler
LR_SCHEDULER_REGISTRY = {}
def build_lr_scheduler(args, optimizer):
return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer)
def register_lr_scheduler(name):
"""Decorator to register a new LR scheduler."""
def register_lr_scheduler_cls(cls):
if name in LR_SCHEDULER_REGISTRY:
raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name))
if not issubclass(cls, FairseqLRScheduler):
raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__))
LR_SCHEDULER_REGISTRY[name] = cls
return cls
return register_lr_scheduler_cls
# automatically import any Python files in the optim/lr_scheduler/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.optim.lr_scheduler.' + module)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler/__init__.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from . import FairseqLRScheduler, register_lr_scheduler
@register_lr_scheduler('inverse_sqrt')
class InverseSquareRootSchedule(FairseqLRScheduler):
"""Decay the LR based on the inverse square root of the update number.
We also support a warmup phase where we linearly increase the learning rate
from some initial learning rate (`--warmup-init-lr`) until the configured
learning rate (`--lr`). Thereafter we decay proportional to the number of
updates, with a decay factor set to align with the configured learning rate.
During warmup:
lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates)
lr = lrs[update_num]
After warmup:
lr = decay_factor / sqrt(update_num)
where
decay_factor = args.lr * sqrt(args.warmup_updates)
"""
def __init__(self, args, optimizer):
super().__init__(args, optimizer)
if len(args.lr) > 1:
raise ValueError(
'Cannot use a fixed learning rate schedule with inverse_sqrt.'
' Consider --lr-scheduler=fixed instead.'
)
warmup_end_lr = args.lr[0]
if args.warmup_init_lr < 0:
args.warmup_init_lr = warmup_end_lr
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates
# then, decay prop. to the inverse square root of the update number
self.decay_factor = warmup_end_lr * args.warmup_updates**0.5
# initial learning rate
self.lr = args.warmup_init_lr
self.optimizer.set_lr(self.lr)
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N',
help='warmup the learning rate linearly for the first N updates')
parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR',
help='initial learning rate during warmup phase; default is args.lr')
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
super().step(epoch, val_loss)
# we don't change the learning rate at epoch boundaries
return self.optimizer.get_lr()
def step_update(self, num_updates):
"""Update the learning rate after each update."""
if num_updates < self.args.warmup_updates:
self.lr = self.args.warmup_init_lr + num_updates * self.lr_step
else:
self.lr = self.decay_factor * num_updates**-0.5
self.optimizer.set_lr(self.lr)
return self.lr
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from .. import FairseqOptimizer
class FairseqLRScheduler(object):
def __init__(self, args, optimizer):
super().__init__()
if not isinstance(optimizer, FairseqOptimizer):
raise ValueError('optimizer must be an instance of FairseqOptimizer')
self.args = args
self.optimizer = optimizer
self.best = None
@staticmethod
def add_args(parser):
"""Add arguments to the parser for this LR scheduler."""
pass
def state_dict(self):
"""Return the LR scheduler state dict."""
return {'best': self.best}
def load_state_dict(self, state_dict):
"""Load an LR scheduler state dict."""
self.best = state_dict['best']
def step(self, epoch, val_loss=None):
"""Update the learning rate at the end of the given epoch."""
if val_loss is not None:
if self.best is None:
self.best = val_loss
else:
self.best = min(self.best, val_loss)
def step_update(self, num_updates):
"""Update the learning rate after each update."""
return self.optimizer.get_lr()
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import importlib
import os
from .fairseq_incremental_decoder import FairseqIncrementalDecoder # noqa: F401
MODEL_REGISTRY = {}
ARCH_MODEL_REGISTRY = {}
ARCH_CONFIG_REGISTRY = {}
def build_model(args):
return ARCH_MODEL_REGISTRY[args.arch].build_model(args)
def register_model(name):
"""Decorator to register a new model (e.g., LSTM)."""
def register_model_cls(cls):
if name in MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model ({})'.format(name))
MODEL_REGISTRY[name] = cls
return cls
return register_model_cls
def register_model_architecture(model_name, arch_name):
"""Decorator to register a new model architecture (e.g., lstm_luong_wmt_en_de)."""
def register_model_arch_fn(fn):
if model_name not in MODEL_REGISTRY:
raise ValueError('Cannot register model architecture for unknown model type ({})'.format(model_name))
if arch_name in ARCH_MODEL_REGISTRY:
raise ValueError('Cannot register duplicate model architecture ({})'.format(arch_name))
if not callable(fn):
raise ValueError('Model architecture must be callable ({})'.format(arch_name))
ARCH_MODEL_REGISTRY[arch_name] = MODEL_REGISTRY[model_name]
ARCH_CONFIG_REGISTRY[arch_name] = fn
return fn
return register_model_arch_fn
# automatically import any Python files in the models/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('fairseq.models.' + module)
| DeepLearningExamples-master | PyTorch/Translation/Transformer/fairseq/models/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.