python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import argparse
import sys
sys.path.append('./')
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str, required=True,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, default="trtis_repo/tacotron/1/model.pt",
help='filename for the Tacotron 2 TorchScript model')
parser.add_argument('--fp16', action='store_true',
help='inference with mixed precision')
return parser
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args = parser.parse_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False,
forward_is_infer=True)
jitted_tacotron2 = torch.jit.script(tacotron2)
torch.jit.save(jitted_tacotron2, args.output)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/export_tacotron2_ts.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
import six
from io import open
logger = logging.getLogger(__name__)
PRETRAINED_VOCAB_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt",
}
PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
}
VOCAB_NAME = 'vocab.txt'
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizer(object):
"""Runs end-to-end tokenization: punctuation splitting + wordpiece"""
def __init__(self, vocab_file, do_lower_case=True, max_len=None,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file))
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict(
[(ids, tok) for tok, ids in self.vocab.items()])
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case,
never_split=never_split)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab)
self.max_len = max_len if max_len is not None else int(1e12)
def tokenize(self, text):
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
return split_tokens
def convert_tokens_to_ids(self, tokens):
"""Converts a sequence of tokens into ids using the vocab."""
ids = []
for token in tokens:
ids.append(self.vocab[token])
if len(ids) > self.max_len:
raise ValueError(
"Token indices sequence length is longer than the specified maximum "
" sequence length for this BERT model ({} > {}). Running this"
" sequence through BERT will result in indexing errors".format(len(ids), self.max_len)
)
return ids
def convert_ids_to_tokens(self, ids):
"""Converts a sequence of ids in wordpiece tokens using the vocab."""
tokens = []
for i in ids:
tokens.append(self.ids_to_tokens[i])
return tokens
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file.
Download and cache the pre-trained model file if needed.
"""
if pretrained_model_name_or_path in PRETRAINED_VOCAB_ARCHIVE_MAP:
vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
vocab_file = pretrained_model_name_or_path
if os.path.isdir(vocab_file):
vocab_file = os.path.join(vocab_file, VOCAB_NAME)
# redirect to the cache, if necessary
try:
resolved_vocab_file = vocab_file
except EnvironmentError:
logger.error(
"Model name '{}' was not found in model name list ({}). "
"We assumed '{}' was a path or url but couldn't find any file "
"associated to this path or url.".format(
pretrained_model_name_or_path,
', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()),
vocab_file))
return None
if resolved_vocab_file == vocab_file:
logger.info("loading vocabulary file {}".format(vocab_file))
else:
logger.info("loading vocabulary file {} from cache at {}".format(
vocab_file, resolved_vocab_file))
if pretrained_model_name_or_path in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP:
# if we're using a pretrained model, ensure the tokenizer wont index sequences longer
# than the number of positional embeddings
max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name_or_path]
kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len)
# Instantiate tokenizer.
tokenizer = cls(resolved_vocab_file, *inputs, **kwargs)
return tokenizer
class BasicTokenizer(object):
"""Runs basic tokenization (punctuation splitting, lower casing, etc.)."""
def __init__(self,
do_lower_case=True,
never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")):
"""Constructs a BasicTokenizer.
Args:
do_lower_case: Whether to lower case the input.
"""
self.do_lower_case = do_lower_case
self.never_split = never_split
def tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.do_lower_case and token not in self.never_split:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text):
"""Splits punctuation on a piece of text."""
if text in self.never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xfffd or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer(object):
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through `BasicTokenizer`.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/bert/tokenization.py |
# coding=utf-8
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import json
import numpy as np
import collections
from utils.bert.tokenization import (BasicTokenizer, BertTokenizer, whitespace_tokenize)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_example(question_text, context, version_2_with_negative):
""" reads a question and a context, and turns it into a SquadExample """
#
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
#
doc_tokens = []
prev_is_whitespace = True
for c in context:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
#
example = SquadExample(
qas_id=0,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False
)
return example
def convert_example_to_feature(example, tokenizer, max_seq_length,
doc_stride, max_query_length):
""" converts an example into a feature """
unique_id = 1000000000
examples = [example]
features = []
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible))
unique_id += 1
assert len(features) == 1, "too large input"
return features[0]
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult", ["unique_id", "start_logits", "end_logits"])
def get_predictions(example, feature, start_logits, end_logits, n_best_size,
max_answer_length, do_lower_case,
version_2_with_negative, null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
all_examples = [example]
all_features = [feature]
all_results = [RawResult(unique_id=1000000000,start_logits=start_logits,end_logits=end_logits)]
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_indices_of_largest_logits(result.start_logits)
end_indexes = _get_indices_of_largest_logits(result.end_logits)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
return nbest_json
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def _get_indices_of_largest_logits(logits):
""" sort logits and return the indices of the sorted array """
indices_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
indices = map(lambda x: x[0], indices_and_score)
indices = list(indices)
return indices
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/bert/preprocessing.py |
#!/usr/bin/python
# Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import librosa
import soundfile as sf
import math
from os import system
import numpy as np
from tensorrtserver.api import *
import tensorrtserver.api.model_config_pb2 as model_config
import grpc
from tensorrtserver.api import api_pb2
from tensorrtserver.api import grpc_service_pb2
from tensorrtserver.api import grpc_service_pb2_grpc
WINDOWS_FNS = {"hanning": np.hanning, "hamming": np.hamming, "none": None}
def model_dtype_to_np(model_dtype):
if model_dtype == model_config.TYPE_BOOL:
return np.bool
elif model_dtype == model_config.TYPE_INT8:
return np.int8
elif model_dtype == model_config.TYPE_INT16:
return np.int16
elif model_dtype == model_config.TYPE_INT32:
return np.int32
elif model_dtype == model_config.TYPE_INT64:
return np.int64
elif model_dtype == model_config.TYPE_UINT8:
return np.uint8
elif model_dtype == model_config.TYPE_UINT16:
return np.uint16
elif model_dtype == model_config.TYPE_UINT32:
return np.uint32
elif model_dtype == model_config.TYPE_FP16:
return np.float16
elif model_dtype == model_config.TYPE_FP32:
return np.float32
elif model_dtype == model_config.TYPE_FP64:
return np.float64
elif model_dtype == model_config.TYPE_STRING:
return np.dtype(object)
return None
def ctc_decoder_predictions_tensor(prediction_cpu_tensor, batch_size, labels):
"""
Takes output of greedy ctc decoder and performs ctc decoding algorithm to
remove duplicates and special symbol. Returns prediction
Args:
tensor: model output tensor
label: A list of labels
Returns:
prediction
"""
blank_id = len(labels) - 1
hypotheses = []
labels_map = dict([(i, labels[i]) for i in range(len(labels))])
# iterate over batch
prediction_cpu_tensor = prediction_cpu_tensor.reshape((batch_size, int(prediction_cpu_tensor.size/batch_size)))
for ind in range(batch_size):
prediction = prediction_cpu_tensor[ind].tolist()
# CTC decoding procedure
decoded_prediction = []
previous = len(labels) - 1 # id of a blank symbol
for p in prediction:
if (p != previous or previous == blank_id) and p != blank_id:
decoded_prediction.append(p)
previous = p
hypothesis = ''.join([labels_map[c] for c in decoded_prediction])
hypotheses.append(hypothesis)
return hypotheses
class SpeechClient(object):
def __init__(self, url, protocol, model_name, model_version, batch_size,
model_platform=None, verbose=False,
mode="batch",
from_features=True):
self.model_name = model_name
self.model_version = model_version
self.verbose = verbose
self.batch_size = batch_size
self.transpose_audio_features = False
self.grpc_stub = None
self.ctx = None
self.correlation_id = 0
self.first_run = True
if mode == "streaming" or mode == "asynchronous":
self.correlation_id = 1
self.buffer = []
self.ctx = InferContext(url, protocol, model_name, model_version,
verbose, self.correlation_id, False)
server_ctx = ServerStatusContext(url, protocol, model_name,
verbose)
server_status = server_ctx.get_server_status()
self.audio_signals_name, self.num_samples_name, self.transcripts_name, \
self.audio_signals_type, self.num_samples_type, self.transcripts_type = self.parse_model(server_status, model_name,
batch_size, model_platform, verbose)
self.labels = [" ", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", "'", "<BLANK>"]
def postprocess(self, results, labels):
if len(results) != 1:
raise Exception("expected 1 result, got {}".format(len(results)))
transcript_values = results['TRANSCRIPT']
for transcript, filename in zip(transcript_values,
labels):
hypotheses = ctc_decoder_predictions_tensor(transcript, self.batch_size, self.labels)
print('---')
print('File: ', filename)
print("Final transcript: ", hypotheses)
print('---')
return hypotheses
def check_num_samples(self, num_samples):
if num_samples.data_type != model_config.TYPE_UINT32 and num_samples.data_type != model_config.TYPE_INT32:
raise Exception(
"expecting num_samples datatype to be TYPE_UINT32/TYPE_INT32, "
"model '" + model_name + "' output type is " +
model_config.DataType.Name(num_samples.data_type))
if len(num_samples.dims) != 1:
raise Exception("Expecting num_samples to have 1 dimension, "
"model '{}' num_samples has {}".format(
model_name,len(num_samples.dims)))
def parse_model(self, server_status,
model_name, batch_size,
model_platform=None, verbose=False):
"""
Check the configuration of the ensemble model
"""
if model_name not in server_status.model_status:
raise Exception("unable to get status for '" + model_name + "'")
status = server_status.model_status[model_name]
config = status.config
self.model_platform = model_platform
# Inputs are:
# 1) audio_signal: raw audio samples [num_samples]
# 2) sample_rate: sample rate of audio
# 3) num_samples: length of audio
if len(config.input) < 2:
raise Exception(
"expecting 2-3 inputs, got {}".format(len(config.input)))
# Outputs are:
# 1) transcripts: candidate transcripts
if len(config.output) != 1:
raise Exception(
"expecting 1 output, got {}".format(len(config.output)))
audio_signal = config.input[0]
if len(config.input) > 1:
num_samples = config.input[1]
self.check_num_samples(num_samples);
transcripts = config.output[0]
expected_audio_signal_dim = 1
expected_audio_signal_type = model_config.TYPE_FP32
if audio_signal.data_type != expected_audio_signal_type:
raise Exception("expecting audio_signal datatype to be " +
model_config.DataType.Name(
expected_audio_signal_type) +
"model '" + model_name + "' output type is " +
model_config.DataType.Name(audio_signal.data_type))
# Model specifying maximum batch size of 0 indicates that batching
# is not supported and so the input tensors do not expect an "N"
# dimension (and 'batch_size' should be 1 so that only a single
# image instance is inferred at a time).
max_batch_size = config.max_batch_size
if max_batch_size == 0:
if batch_size != 1:
raise Exception(
"batching not supported for model '" + model_name + "'")
else: # max_batch_size > 0
if batch_size > max_batch_size:
raise Exception(
"expecting batch size <= {} for model {}".format(
max_batch_size, model_name))
if len(audio_signal.dims) != expected_audio_signal_dim:
raise Exception("Expecting audio signal to have {} dimensions, "
"model '{}' audio_signal has {}".format(
expected_audio_signal_dim,
model_name,
len(audio_signal.dims)))
return (audio_signal.name, num_samples.name, transcripts.name,
model_dtype_to_np(audio_signal.data_type),
model_dtype_to_np(num_samples.data_type),
model_dtype_to_np(transcripts.data_type),
)
def update_audio_request(self, request, audio_generator):
for audio_signal, sample_rate, start, end in audio_generator:
# Delete the current inputs
input_batch = [audio_signal.astype(self.audio_signals_type)]
num_samples_batch = audio_signal.shape[0]
num_samples_batch = [np.asarray([num_samples_batch],
dtype=self.num_samples_type)]
flags = InferRequestHeader.FLAG_NONE
input_batch[0] = np.expand_dims(input_batch[0], axis=0)
audio_bytes = input_batch[0].tobytes()
num_samples_bytes = num_samples_batch[0].tobytes()
request.meta_data.input[0].dims[0] = audio_signal.shape[0]
request.meta_data.input[0].batch_byte_size = len(audio_bytes)
request.meta_data.input[1].dims[0] = 1
request.meta_data.input[1].batch_byte_size = len(num_samples_bytes)
if start:
request.meta_data.flags = flags | \
InferRequestHeader.FLAG_SEQUENCE_START
else:
request.meta_data.flags = flags;
# Send request with audio signal
del request.raw_input[:]
request.raw_input.extend([audio_bytes])
request.raw_input.extend([num_samples_bytes])
yield request
# If end, send empty request to flush out remaining audio
if end:
request.meta_data.flags = flags | \
InferRequestHeader.FLAG_SEQUENCE_END
zero_bytes = np.zeros(shape=input_batch[0].shape,
dtype=input_batch[0].dtype).tobytes()
del request.raw_input[:]
request.raw_input.extend([zero_bytes])
request.raw_input.extend([num_samples_bytes])
yield request
def recognize(self, audio_signal, filenames):
# Send requests of FLAGS.batch_size audio signals. If the number of
# audios isn't an exact multiple of FLAGS.batch_size then just
# start over with the first audio until the batch is filled.
flags = InferRequestHeader.FLAG_NONE
flags = flags | InferRequestHeader.FLAG_SEQUENCE_START
input_batch = []
input_filenames = []
max_num_samples_batch = 0
for idx in range(self.batch_size):
input_batch.append(audio_signal[idx].astype(
self.audio_signals_type))
input_filenames.append(filenames[idx])
num_samples = audio_signal[idx].shape[0]
if (num_samples > max_num_samples_batch):
max_num_samples_batch = num_samples
for idx in range(self.batch_size):
num_samples = input_batch[idx].shape[0]
print("num_samples : ", num_samples)
# input_batch[idx] = np.pad(input_batch[idx],
# ((0,
# max_num_samples_batch -
# num_samples)),
# mode='constant')
mean = np.mean(input_batch[idx])
std_var = np.std(input_batch[idx])
gauss_noise = np.random.normal(
mean,std_var,
max_num_samples_batch-num_samples)
input_batch[idx]= np.concatenate(
(input_batch[idx], gauss_noise.astype(
self.audio_signals_type)))
max_num_samples_batch = np.asarray([max_num_samples_batch],
dtype=self.num_samples_type)
num_samples_batch = [max_num_samples_batch] * self.batch_size
#print(num_samples_batch)
#print(input_batch)
#print(input_sample_rates)
# Send request
print("Sending request to transcribe file(s):", ",".join(
input_filenames))
if (self.model_platform == "obsolete_pyt"):
result = self.ctx.run(
{self.audio_signals_name: input_batch,
self.num_samples_name: num_samples_batch},
{self.transcripts_name: InferContext.ResultFormat.RAW},
self.batch_size, flags)
else:
result = self.ctx.run(
{self.audio_signals_name: input_batch,
self.num_samples_name: num_samples_batch},
{self.transcripts_name: InferContext.ResultFormat.RAW},
self.batch_size, flags)
hypotheses = self.postprocess(result, input_filenames)
return hypotheses
def preemphasis(signal, coeff=0.97):
return np.append(signal[0], signal[1:] - coeff * signal[:-1])
def normalize_signal(signal, gain=None):
"""
Normalize float32 signal to [-1, 1] range
"""
if gain is None:
gain = 1.0 / (np.max(np.abs(signal)) + 1e-5)
return signal * gain
class AudioSegment(object):
"""Monaural audio segment abstraction.
:param samples: Audio samples [num_samples x num_channels].
:type samples: ndarray.float32
:param sample_rate: Audio sample rate.
:type sample_rate: int
:raises TypeError: If the sample data type is not float or int.
"""
def __init__(self, samples, sample_rate, target_sr=16000, trim=False,
trim_db=60):
"""Create audio segment from samples.
Samples are convert float32 internally, with int scaled to [-1, 1].
"""
samples = self._convert_samples_to_float32(samples)
if target_sr is not None and target_sr != sample_rate:
samples = librosa.core.resample(samples, sample_rate, target_sr)
sample_rate = target_sr
if trim:
samples, _ = librosa.effects.trim(samples, trim_db)
self._samples = samples
self._sample_rate = sample_rate
if self._samples.ndim >= 2:
self._samples = np.mean(self._samples, 1)
@staticmethod
def _convert_samples_to_float32(samples):
"""Convert sample type to float32.
Audio sample type is usually integer or float-point.
Integers will be scaled to [-1, 1] in float32.
"""
float32_samples = samples.astype('float32')
if samples.dtype in np.sctypes['int']:
bits = np.iinfo(samples.dtype).bits
float32_samples *= (1. / 2 ** (bits - 1))
elif samples.dtype in np.sctypes['float']:
pass
else:
raise TypeError("Unsupported sample type: %s." % samples.dtype)
return float32_samples
@classmethod
def from_file(cls, filename, target_sr=16000, int_values=False, offset=0,
duration=0, trim=False):
"""
Load a file supported by librosa and return as an AudioSegment.
:param filename: path of file to load
:param target_sr: the desired sample rate
:param int_values: if true, load samples as 32-bit integers
:param offset: offset in seconds when loading audio
:param duration: duration in seconds when loading audio
:return: numpy array of samples
"""
with sf.SoundFile(filename, 'r') as f:
dtype = 'int32' if int_values else 'float32'
sample_rate = f.samplerate
if offset > 0:
f.seek(int(offset * sample_rate))
if duration > 0:
samples = f.read(int(duration * sample_rate), dtype=dtype)
else:
samples = f.read(dtype=dtype)
samples = samples.transpose()
return cls(samples, sample_rate, target_sr=target_sr, trim=trim)
@property
def samples(self):
return self._samples.copy()
@property
def sample_rate(self):
return self._sample_rate
# define our clear function
def clear_screen():
_ = system('clear')
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/jasper/speech_utils.py |
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/cmudict.py |
""" from https://github.com/keithito/tacotron """
import re
from utils.tacotron2 import cleaners
from utils.tacotron2.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/__init__.py |
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/numbers.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from utils.tacotron2 import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/symbols.py |
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecoder(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/cleaners.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/notebooks/conversationalai/client/speech_ai_demo/utils/tacotron2/unidecoder/homoglyphs.py |
from .entrypoints import nvidia_tacotron2, nvidia_tts_utils
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/__init__.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.utils.data
import tacotron2_common.layers as layers
from tacotron2_common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
from tacotron2.text import text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.text_cleaners = args.text_cleaners
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.load_mel_from_disk = args.load_mel_from_disk
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
len_text = len(text)
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel, len_text)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.load(filename)
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
# count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
return text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x
def batch_to_gpu(batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths, len_x = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
x = (text_padded, input_lengths, mel_padded, max_len, output_lengths)
y = (mel_padded, gate_padded)
len_x = torch.sum(output_lengths)
return (x, y, len_x)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/data_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from math import sqrt
import torch
from torch import nn
from torch.nn import functional as F
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/../'))
from tacotron2_common.layers import ConvNorm, LinearNorm
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(2)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
alignment = alignment.masked_fill(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, n_mel_channels, postnet_embedding_dim,
postnet_kernel_size, postnet_n_convolutions):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(n_mel_channels, postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
for i in range(1, postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim,
postnet_embedding_dim,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(postnet_embedding_dim, n_mel_channels,
kernel_size=postnet_kernel_size, stride=1,
padding=int((postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(n_mel_channels))
)
self.n_convs = len(self.convolutions)
def forward(self, x):
i = 0
for conv in self.convolutions:
if i < self.n_convs - 1:
x = F.dropout(torch.tanh(conv(x)), 0.5, training=self.training)
else:
x = F.dropout(conv(x), 0.5, training=self.training)
i += 1
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, encoder_n_convolutions,
encoder_embedding_dim, encoder_kernel_size):
super(Encoder, self).__init__()
convolutions = []
for _ in range(encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(encoder_embedding_dim,
encoder_embedding_dim,
kernel_size=encoder_kernel_size, stride=1,
padding=int((encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(encoder_embedding_dim,
int(encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
@torch.jit.ignore
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
@torch.jit.export
def infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, self.training)
x = x.transpose(1, 2)
input_lengths = input_lengths.cpu()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
class Decoder(nn.Module):
def __init__(self, n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
early_stopping):
super(Decoder, self).__init__()
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.encoder_embedding_dim = encoder_embedding_dim
self.attention_rnn_dim = attention_rnn_dim
self.decoder_rnn_dim = decoder_rnn_dim
self.prenet_dim = prenet_dim
self.max_decoder_steps = max_decoder_steps
self.gate_threshold = gate_threshold
self.p_attention_dropout = p_attention_dropout
self.p_decoder_dropout = p_decoder_dropout
self.early_stopping = early_stopping
self.prenet = Prenet(
n_mel_channels * n_frames_per_step,
[prenet_dim, prenet_dim])
self.attention_rnn = nn.LSTMCell(
prenet_dim + encoder_embedding_dim,
attention_rnn_dim)
self.attention_layer = Attention(
attention_rnn_dim, encoder_embedding_dim,
attention_dim, attention_location_n_filters,
attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
attention_rnn_dim + encoder_embedding_dim,
decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim,
n_mel_channels * n_frames_per_step)
self.gate_layer = LinearNorm(
decoder_rnn_dim + encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
dtype = memory.dtype
device = memory.device
decoder_input = torch.zeros(
B, self.n_mel_channels*self.n_frames_per_step,
dtype=dtype, device=device)
return decoder_input
def initialize_decoder_states(self, memory):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
dtype = memory.dtype
device = memory.device
attention_hidden = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device)
attention_cell = torch.zeros(
B, self.attention_rnn_dim, dtype=dtype, device=device)
decoder_hidden = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device)
decoder_cell = torch.zeros(
B, self.decoder_rnn_dim, dtype=dtype, device=device)
attention_weights = torch.zeros(
B, MAX_TIME, dtype=dtype, device=device)
attention_weights_cum = torch.zeros(
B, MAX_TIME, dtype=dtype, device=device)
attention_context = torch.zeros(
B, self.encoder_embedding_dim, dtype=dtype, device=device)
processed_memory = self.attention_layer.memory_layer(memory)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, processed_memory)
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = alignments.transpose(0, 1).contiguous()
# (T_out, B) -> (B, T_out)
gate_outputs = gate_outputs.transpose(0, 1).contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = mel_outputs.transpose(0, 1).contiguous()
# decouple frames per step
shape = (mel_outputs.shape[0], -1, self.n_mel_channels)
mel_outputs = mel_outputs.view(*shape)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input, attention_hidden, attention_cell,
decoder_hidden, decoder_cell, attention_weights,
attention_weights_cum, attention_context, memory,
processed_memory, mask):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, attention_context), -1)
attention_hidden, attention_cell = self.attention_rnn(
cell_input, (attention_hidden, attention_cell))
attention_hidden = F.dropout(
attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(attention_weights.unsqueeze(1),
attention_weights_cum.unsqueeze(1)), dim=1)
attention_context, attention_weights = self.attention_layer(
attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
attention_weights_cum += attention_weights
decoder_input = torch.cat(
(attention_hidden, attention_context), -1)
decoder_hidden, decoder_cell = self.decoder_rnn(
decoder_input, (decoder_hidden, decoder_cell))
decoder_hidden = F.dropout(
decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(decoder_hidden, attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, attention_hidden,
attention_cell, decoder_hidden, decoder_cell, attention_weights,
attention_weights_cum, attention_context)
@torch.jit.ignore
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = self.initialize_decoder_states(memory)
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
(mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context) = self.decode(decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze()]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
torch.stack(mel_outputs),
torch.stack(gate_outputs),
torch.stack(alignments))
return mel_outputs, gate_outputs, alignments
@torch.jit.export
def infer(self, memory, memory_lengths):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = self.initialize_decoder_states(memory)
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device=memory.device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device=memory.device)
mel_outputs, gate_outputs, alignments = (
torch.zeros(1), torch.zeros(1), torch.zeros(1))
first_iter = True
while True:
decoder_input = self.prenet(decoder_input)
(mel_output,
gate_output,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context) = self.decode(decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
if first_iter:
mel_outputs = mel_output.unsqueeze(0)
gate_outputs = gate_output
alignments = attention_weights
first_iter = False
else:
mel_outputs = torch.cat(
(mel_outputs, mel_output.unsqueeze(0)), dim=0)
gate_outputs = torch.cat((gate_outputs, gate_output), dim=0)
alignments = torch.cat((alignments, attention_weights), dim=0)
dec = torch.le(torch.sigmoid(gate_output),
self.gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if self.early_stopping and torch.sum(not_finished) == 0:
break
if len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments, mel_lengths
class Tacotron2(nn.Module):
def __init__(self, mask_padding, n_mel_channels,
n_symbols, symbols_embedding_dim, encoder_kernel_size,
encoder_n_convolutions, encoder_embedding_dim,
attention_rnn_dim, attention_dim, attention_location_n_filters,
attention_location_kernel_size, n_frames_per_step,
decoder_rnn_dim, prenet_dim, max_decoder_steps, gate_threshold,
p_attention_dropout, p_decoder_dropout,
postnet_embedding_dim, postnet_kernel_size,
postnet_n_convolutions, decoder_no_early_stopping):
super(Tacotron2, self).__init__()
self.mask_padding = mask_padding
self.n_mel_channels = n_mel_channels
self.n_frames_per_step = n_frames_per_step
self.embedding = nn.Embedding(n_symbols, symbols_embedding_dim)
std = sqrt(2.0 / (n_symbols + symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(encoder_n_convolutions,
encoder_embedding_dim,
encoder_kernel_size)
self.decoder = Decoder(n_mel_channels, n_frames_per_step,
encoder_embedding_dim, attention_dim,
attention_location_n_filters,
attention_location_kernel_size,
attention_rnn_dim, decoder_rnn_dim,
prenet_dim, max_decoder_steps,
gate_threshold, p_attention_dropout,
p_decoder_dropout,
not decoder_no_early_stopping)
self.postnet = Postnet(n_mel_channels, postnet_embedding_dim,
postnet_kernel_size,
postnet_n_convolutions)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths):
# type: (List[Tensor], Tensor) -> List[Tensor]
if self.mask_padding and output_lengths is not None:
mask = get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].masked_fill_(mask, 0.0)
outputs[1].masked_fill_(mask, 0.0)
outputs[2].masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
inputs, input_lengths, targets, max_len, output_lengths = inputs
input_lengths, output_lengths = input_lengths.data, output_lengths.data
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def infer(self, inputs, input_lengths):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.infer(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments, mel_lengths = self.decoder.infer(
encoder_outputs, input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
BS = mel_outputs_postnet.size(0)
alignments = alignments.unfold(1, BS, BS).transpose(0,2)
return mel_outputs_postnet, mel_lengths, alignments
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/model.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/loss_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
from tacotron2.text import symbols
def tacotron2_parser(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--mask-padding', default=False, type=bool,
help='Use mask padding')
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# symbols parameters
global symbols
len_symbols = len(symbols)
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=len_symbols, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=512, type=int,
help='Input embedding dimension')
# encoder parameters
encoder = parser.add_argument_group('encoder parameters')
encoder.add_argument('--encoder-kernel-size', default=5, type=int,
help='Encoder kernel size')
encoder.add_argument('--encoder-n-convolutions', default=3, type=int,
help='Number of encoder convolutions')
encoder.add_argument('--encoder-embedding-dim', default=512, type=int,
help='Encoder embedding dimension')
# decoder parameters
decoder = parser.add_argument_group('decoder parameters')
decoder.add_argument('--n-frames-per-step', default=1,
type=int,
help='Number of frames processed per step') # currently only 1 is supported
decoder.add_argument('--decoder-rnn-dim', default=1024, type=int,
help='Number of units in decoder LSTM')
decoder.add_argument('--prenet-dim', default=256, type=int,
help='Number of ReLU units in prenet layers')
decoder.add_argument('--max-decoder-steps', default=2000, type=int,
help='Maximum number of output mel spectrograms')
decoder.add_argument('--gate-threshold', default=0.5, type=float,
help='Probability threshold for stop token')
decoder.add_argument('--p-attention-dropout', default=0.1, type=float,
help='Dropout probability for attention LSTM')
decoder.add_argument('--p-decoder-dropout', default=0.1, type=float,
help='Dropout probability for decoder LSTM')
decoder.add_argument('--decoder-no-early-stopping', action='store_true',
help='Stop decoding once all samples are finished')
# attention parameters
attention = parser.add_argument_group('attention parameters')
attention.add_argument('--attention-rnn-dim', default=1024, type=int,
help='Number of units in attention LSTM')
attention.add_argument('--attention-dim', default=128, type=int,
help='Dimension of attention hidden representation')
# location layer parameters
location = parser.add_argument_group('location parameters')
location.add_argument(
'--attention-location-n-filters', default=32, type=int,
help='Number of filters for location-sensitive attention')
location.add_argument(
'--attention-location-kernel-size', default=31, type=int,
help='Kernel size for location-sensitive attention')
# Mel-post processing network parameters
postnet = parser.add_argument_group('postnet parameters')
postnet.add_argument('--postnet-embedding-dim', default=512, type=int,
help='Postnet embedding dimension')
postnet.add_argument('--postnet-kernel-size', default=5, type=int,
help='Postnet kernel size')
postnet.add_argument('--postnet-n-convolutions', default=5, type=int,
help='Number of postnet convolutions')
return parser
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/arg_parser.py |
# *****************************************************************************
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
#from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_tacotron2(pretrained=True, **kwargs):
"""Constructs a Tacotron 2 model (nn.module with additional infer(input) method).
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args (type[, default value]):
pretrained (bool, True): If True, returns a model pretrained on LJ Speech dataset.
model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16')
n_symbols (int, 148): Number of symbols used in a sequence passed to the prenet, see
https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/symbols.py
p_attention_dropout (float, 0.1): dropout probability on attention LSTM (1st LSTM layer in decoder)
p_decoder_dropout (float, 0.1): dropout probability on decoder LSTM (2nd LSTM layer in decoder)
max_decoder_steps (int, 1000): maximum number of generated mel spectrograms during inference
"""
from tacotron2 import model as tacotron2
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
if pretrained:
if fp16:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_pyt_ckpt_amp/versions/19.09.0/files/nvidia_tacotron2pyt_fp16_20190427'
else:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/tacotron2_pyt_ckpt_fp32/versions/19.09.0/files/nvidia_tacotron2pyt_fp32_20190427'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
else:
config = {'mask_padding': False, 'n_mel_channels': 80, 'n_symbols': 148,
'symbols_embedding_dim': 512, 'encoder_kernel_size': 5,
'encoder_n_convolutions': 3, 'encoder_embedding_dim': 512,
'attention_rnn_dim': 1024, 'attention_dim': 128,
'attention_location_n_filters': 32,
'attention_location_kernel_size': 31, 'n_frames_per_step': 1,
'decoder_rnn_dim': 1024, 'prenet_dim': 256,
'max_decoder_steps': 1000, 'gate_threshold': 0.5,
'p_attention_dropout': 0.1, 'p_decoder_dropout': 0.1,
'postnet_embedding_dim': 512, 'postnet_kernel_size': 5,
'postnet_n_convolutions': 5, 'decoder_no_early_stopping': False}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
m = tacotron2.Tacotron2(**config)
if pretrained:
m.load_state_dict(state_dict)
return m
def nvidia_tts_utils():
class Processing:
from tacotron2.text import text_to_sequence
@staticmethod
def pad_sequences(batch):
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, :text.size(0)] = text
return text_padded, input_lengths
@staticmethod
def prepare_input_sequence(texts, cpu_run=False):
d = []
for i,text in enumerate(texts):
d.append(torch.IntTensor(
Processing.text_to_sequence(text, ['english_cleaners'])[:]))
text_padded, input_lengths = Processing.pad_sequences(d)
if not cpu_run:
text_padded = text_padded.cuda().long()
input_lengths = input_lengths.cuda().long()
else:
text_padded = text_padded.long()
input_lengths = input_lengths.long()
return text_padded, input_lengths
return Processing()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/entrypoints.py |
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/cmudict.py |
""" from https://github.com/keithito/tacotron """
import re
from tacotron2.text import cleaners
from tacotron2.text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s is not '_' and s is not '~'
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/__init__.py |
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/numbers.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from tacotron2.text import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/symbols.py |
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .numbers import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecoder(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/cleaners.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2/text/unidecoder/homoglyphs.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import csv
import glob
import json
import logging
import os
import time
from collections import OrderedDict
from contextlib import suppress
import itertools
import dllogger
import torch
import torch.nn as nn
import torch.nn.parallel
from timm.data import (
RealLabelsImagenet,
create_dataset,
create_loader,
resolve_data_config,
)
from timm.models import (
apply_test_time_pool,
create_model,
is_model,
list_models,
load_checkpoint,
)
# GPUNet Integration
from timm.models.registry import register_model
from timm.utils import ( # , set_jit_fuser
AverageMeter,
accuracy,
natural_key,
setup_default_logging,
)
from configs.model_hub import get_configs, get_model_list
from models.gpunet_builder import GPUNet_Builder
@register_model
def gpunet_0(pretrained=False, **kwargs):
"""Constructs GPUNet-0."""
modelJSON, checkpoint_path = get_configs(batch=1, latency="0.65ms", gpuType="GV100")
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_0",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_1(pretrained=False, **kwargs):
"""Constructs GPUNet-1."""
modelJSON, checkpoint_path = get_configs(batch=1, latency="0.85ms", gpuType="GV100")
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_2(pretrained=False, **kwargs):
"""Constructs GPUNet-2."""
modelJSON, checkpoint_path = get_configs(batch=1, latency="1.75ms", gpuType="GV100")
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d1(pretrained=False, **kwargs):
"""Constructs GPUNet-D1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="1.25ms-D", gpuType="GV100"
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d2(pretrained=False, **kwargs):
"""Constructs GPUNet-D2."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="2.25ms-D", gpuType="GV100"
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_p0(pretrained=False, **kwargs):
"""Constructs GPUNet-P0."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.5ms-D", gpuType="GV100"
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p0",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
@register_model
def gpunet_p1(pretrained=False, **kwargs):
"""Constructs GPUNet-P1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.8ms-D", gpuType="GV100"
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p1",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
has_apex = False
try:
from apex import amp
has_apex = True
except ImportError:
pass
has_native_amp = False
try:
if getattr(torch.cuda.amp, "autocast") is not None:
has_native_amp = True
except AttributeError:
pass
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger("validate")
parser = argparse.ArgumentParser(description="PyTorch ImageNet Validation")
parser.add_argument("data", metavar="DIR", help="path to dataset")
parser.add_argument(
"--dataset",
"-d",
metavar="NAME",
default="",
help="dataset type (default: ImageFolder/ImageTar if empty)",
)
parser.add_argument(
"--split",
metavar="NAME",
default="validation",
help="dataset split (default: validation)",
)
# DLlogger
parser.add_argument(
"--dllogger-name", default="/logs/log.json", type=str, help="name of dllogger file"
)
parser.add_argument(
"--dataset-download",
action="store_true",
default=False,
help="Allow download of dataset for torch/ and tfds/ datasets that support it.",
)
parser.add_argument(
"--model",
"-m",
metavar="NAME",
default="dpn92",
help="model architecture (default: dpn92)",
)
parser.add_argument(
"-j",
"--workers",
default=4,
type=int,
metavar="N",
help="number of data loading workers (default: 2)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256)",
)
parser.add_argument(
"--img-size",
default=None,
type=int,
metavar="N",
help="Input image dimension, uses model default if empty",
)
parser.add_argument(
"--input-size",
default=None,
nargs=3,
type=int,
metavar="N N N",
help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty",
)
parser.add_argument(
"--crop-pct",
default=None,
type=float,
metavar="N",
help="Input image center crop pct",
)
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=None,
metavar="MEAN",
help="Override mean pixel value of dataset",
)
parser.add_argument(
"--std",
type=float,
nargs="+",
default=None,
metavar="STD",
help="Override std deviation of of dataset",
)
parser.add_argument(
"--interpolation",
default="",
type=str,
metavar="NAME",
help="Image resize interpolation type (overrides model)",
)
parser.add_argument(
"--num-classes", type=int, default=None, help="Number classes in dataset"
)
parser.add_argument(
"--class-map",
default="",
type=str,
metavar="FILENAME",
help='path to class to idx mapping file (default: "")',
)
parser.add_argument(
"--gp",
default=None,
type=str,
metavar="POOL",
help="Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.",
)
parser.add_argument(
"--log-freq",
default=10,
type=int,
metavar="N",
help="batch logging frequency (default: 10)",
)
parser.add_argument(
"--checkpoint",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--pretrained", dest="pretrained", action="store_true", help="use pre-trained model"
)
parser.add_argument("--num-gpu", type=int, default=1, help="Number of GPUS to use")
parser.add_argument(
"--test-pool", dest="test_pool", action="store_true", help="enable test time pool"
)
parser.add_argument(
"--no-prefetcher",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--pin-mem",
action="store_true",
default=False,
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="Use channels_last memory layout",
)
parser.add_argument(
"--amp",
action="store_true",
default=False,
help="Use AMP mixed precision. Defaults to Apex, fallback to native Torch AMP.",
)
parser.add_argument(
"--apex-amp",
action="store_true",
default=False,
help="Use NVIDIA Apex AMP mixed precision",
)
parser.add_argument(
"--native-amp",
action="store_true",
default=False,
help="Use Native Torch AMP mixed precision",
)
parser.add_argument(
"--tf-preprocessing",
action="store_true",
default=False,
help="Use Tensorflow preprocessing pipeline (require CPU TF installed",
)
parser.add_argument(
"--use-ema",
dest="use_ema",
action="store_true",
help="use ema version of weights if present",
)
parser.add_argument(
"--torchscript",
dest="torchscript",
action="store_true",
help="convert model torchscript for inference",
)
parser.add_argument(
"--fuser",
default="",
type=str,
help="Select jit fuser. One of ('', 'te', 'old', 'nvfuser')",
)
parser.add_argument(
"--results-file",
default="",
type=str,
metavar="FILENAME",
help="Output csv file for validation results (summary)",
)
parser.add_argument(
"--real-labels",
default="",
type=str,
metavar="FILENAME",
help="Real labels JSON file for imagenet evaluation",
)
parser.add_argument(
"--valid-labels",
default="",
type=str,
metavar="FILENAME",
help="Valid label indices txt file for validation of partial label space",
)
def validate(args):
# might as well try to validate something
args.pretrained = args.pretrained or not args.checkpoint
args.prefetcher = not args.no_prefetcher
amp_autocast = suppress # do nothing
if args.amp:
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
else:
_logger.warning("Neither APEX or Native Torch AMP is available.")
assert not args.apex_amp or not args.native_amp, "Only one AMP mode should be set."
if args.native_amp:
amp_autocast = torch.cuda.amp.autocast
_logger.info("Validating in mixed precision with native PyTorch AMP.")
elif args.apex_amp:
_logger.info("Validating in mixed precision with NVIDIA APEX AMP.")
else:
_logger.info("Validating in float32. AMP not enabled.")
if args.fuser:
set_jit_fuser(args.fuser)
# create model
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
in_chans=3,
global_pool=args.gp,
scriptable=args.torchscript,
)
if args.num_classes is None:
assert hasattr(
model, "num_classes"
), "Model must have `num_classes` attr if not set on cmd line/config."
args.num_classes = model.num_classes
if args.checkpoint:
load_checkpoint(model, args.checkpoint, args.use_ema)
param_count = sum([m.numel() for m in model.parameters()])
_logger.info("Model %s created, param count: %d" % (args.model, param_count))
data_config = resolve_data_config(
vars(args), model=model, use_test_size=True, verbose=True
)
dllogger_dir = os.path.dirname(args.dllogger_name)
if dllogger_dir and not os.path.exists(dllogger_dir):
os.makedirs(dllogger_dir, exist_ok=True)
log_path = args.dllogger_name
original_log_path = log_path
if os.path.exists(log_path):
for i in itertools.count():
s_fname = original_log_path.split('.')
log_path = '.'.join(s_fname[:-1]) + f'_{i}.' + s_fname[-1]
if not os.path.exists(log_path):
break
dllogger.init(
backends=[
dllogger.JSONStreamBackend(verbosity=1, filename=log_path),
dllogger.StdOutBackend(verbosity=0),
]
)
dllogger.metadata("top1", {"unit": None})
dllogger.metadata("top5", {"unit": None})
dllogger.metadata("average_ips", {"unit": "images/s"})
test_time_pool = False
if args.test_pool:
model, test_time_pool = apply_test_time_pool(
model, data_config, use_test_size=True
)
if args.torchscript:
torch.jit.optimized_execution(True)
model = torch.jit.script(model)
model = model.cuda()
if args.apex_amp:
model = amp.initialize(model, opt_level="O1")
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
if args.num_gpu > 1:
model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu)))
criterion = nn.CrossEntropyLoss().cuda()
dataset = create_dataset(
root=args.data,
name=args.dataset,
split=args.split,
download=args.dataset_download,
load_bytes=args.tf_preprocessing,
class_map=args.class_map,
)
if args.valid_labels:
with open(args.valid_labels, "r") as f:
valid_labels = {int(line.rstrip()) for line in f}
valid_labels = [i in valid_labels for i in range(args.num_classes)]
else:
valid_labels = None
if args.real_labels:
real_labels = RealLabelsImagenet(
dataset.filenames(basename=True), real_json=args.real_labels
)
else:
real_labels = None
crop_pct = 1.0 if test_time_pool else data_config["crop_pct"]
loader = create_loader(
dataset,
input_size=data_config["input_size"],
batch_size=args.batch_size,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
crop_pct=crop_pct,
pin_memory=args.pin_mem,
tf_preprocessing=args.tf_preprocessing,
)
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
model.eval()
with torch.no_grad():
# warmup, reduce variability of first batch time, especially for comparing torchscript vs non
input = torch.randn(
(args.batch_size,) + tuple(data_config["input_size"])
).cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
model(input)
end = time.time()
for batch_idx, (input, target) in enumerate(loader):
if args.no_prefetcher:
target = target.cuda()
input = input.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
# compute output
with amp_autocast():
output = model(input)
if valid_labels is not None:
output = output[:, valid_labels]
loss = criterion(output, target)
if real_labels is not None:
real_labels.add_result(output)
# measure accuracy and record loss
acc1, acc5 = accuracy(output.detach(), target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(acc1.item(), input.size(0))
top5.update(acc5.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if batch_idx % args.log_freq == 0:
_logger.info(
"Test: [{0:>4d}/{1}] "
"Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) "
"Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) "
"Acc@1: {top1.val:>7.3f} ({top1.avg:>7.3f}) "
"Acc@5: {top5.val:>7.3f} ({top5.avg:>7.3f})".format(
batch_idx,
len(loader),
batch_time=batch_time,
rate_avg=input.size(0) / batch_time.avg,
loss=losses,
top1=top1,
top5=top5,
)
)
if real_labels is not None:
# real labels mode replaces topk values at the end
top1a, top5a = real_labels.get_accuracy(k=1), real_labels.get_accuracy(k=5)
else:
top1a, top5a = top1.avg, top5.avg
results = OrderedDict(
model=args.model,
top1=round(top1a, 4),
top1_err=round(100 - top1a, 4),
top5=round(top5a, 4),
top5_err=round(100 - top5a, 4),
param_count=round(param_count / 1e6, 2),
img_size=data_config["input_size"][-1],
cropt_pct=crop_pct,
interpolation=data_config["interpolation"],
average_ips = len(dataset)/batch_time.sum
)
_logger.info(
" * Acc@1 {:.3f} ({:.3f}) Acc@5 {:.3f} ({:.3f})".format(
results["top1"], results["top1_err"], results["top5"], results["top5_err"]
)
)
return results
def _try_run(args, initial_batch_size):
batch_size = initial_batch_size
results = OrderedDict()
error_str = "Unknown"
while batch_size >= 1:
args.batch_size = batch_size
torch.cuda.empty_cache()
try:
results = validate(args)
return results
except RuntimeError as e:
error_str = str(e)
if "channels_last" in error_str:
break
_logger.warning(
f'"{error_str}" while running validation. Reducing batch size to {batch_size} for retry.'
)
batch_size = batch_size // 2
results["error"] = error_str
_logger.error(f"{args.model} failed to validate ({error_str}).")
return results
def main():
setup_default_logging()
args = parser.parse_args()
model_cfgs = []
model_names = []
if os.path.isdir(args.checkpoint):
# validate all checkpoints in a path with same model
checkpoints = glob.glob(args.checkpoint + "/*.pth.tar")
checkpoints += glob.glob(args.checkpoint + "/*.pth")
model_names = list_models(args.model)
model_cfgs = [(args.model, c) for c in sorted(checkpoints, key=natural_key)]
else:
if args.model == "all":
# validate all models in a list of names with pretrained checkpoints
args.pretrained = True
model_names = list_models(
pretrained=True, exclude_filters=["*_in21k", "*_in22k", "*_dino"]
)
model_cfgs = [(n, "") for n in model_names]
elif not is_model(args.model):
# model name doesn't exist, try as wildcard filter
model_names = list_models(args.model)
model_cfgs = [(n, "") for n in model_names]
if not model_cfgs and os.path.isfile(args.model):
with open(args.model) as f:
model_names = [line.rstrip() for line in f]
model_cfgs = [(n, None) for n in model_names if n]
if len(model_cfgs):
results_file = args.results_file or "./results-all.csv"
_logger.info(
"Running bulk validation on these pretrained models: {}".format(
", ".join(model_names)
)
)
results = []
try:
initial_batch_size = args.batch_size
for m, c in model_cfgs:
args.model = m
args.checkpoint = c
r = _try_run(args, initial_batch_size)
if "error" in r:
continue
if args.checkpoint:
r["checkpoint"] = args.checkpoint
results.append(r)
except KeyboardInterrupt as e:
pass
results = sorted(results, key=lambda x: x["top1"], reverse=True)
if len(results):
write_results(results_file, results)
else:
results = validate(args)
dllogger.log(step=tuple(), data={"average_ips": results["average_ips"], "top1": results["top1"], "top5": results["top5"]}, verbosity=1)
dllogger.flush()
# output results in JSON to stdout w/ delimiter for runner script
print(f"--result\n{json.dumps(results, indent=4)}")
def write_results(results_file, results):
with open(results_file, mode="w") as cf:
dw = csv.DictWriter(cf, fieldnames=results[0].keys())
dw.writeheader()
for r in results:
dw.writerow(r)
cf.flush()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/validate.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019-2022 Ross Wightman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
import glob
import re
from pathlib import Path
import time
from collections import OrderedDict
from contextlib import suppress
from datetime import datetime
import dllogger
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.utils
import yaml
from timm.data import (
AugMixDataset,
FastCollateMixup,
Mixup,
create_dataset,
create_loader,
resolve_data_config,
)
from timm.loss import (
JsdCrossEntropy,
LabelSmoothingCrossEntropy,
SoftTargetCrossEntropy,
)
from timm.models import (
convert_splitbn_model,
create_model,
load_checkpoint,
model_parameters,
resume_checkpoint,
safe_model_name,
)
from timm.optim import create_optimizer_v2, optimizer_kwargs
from timm.scheduler import create_scheduler
from timm.utils import *
from timm.utils import ApexScaler, NativeScaler
from torch.nn.parallel import DistributedDataParallel as NativeDDP
def cross_entropy_loss_with_soft_target(pred, soft_target):
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(-soft_target * logsoftmax(pred), 1))
try:
from apex import amp
from apex.parallel import DistributedDataParallel as ApexDDP
from apex.parallel import convert_syncbn_model
has_apex = True
except ImportError:
has_apex = False
has_native_amp = False
try:
if getattr(torch.cuda.amp, "autocast") is not None:
has_native_amp = True
except AttributeError:
pass
try:
import wandb
has_wandb = True
except ImportError:
has_wandb = False
torch.backends.cudnn.benchmark = True
_logger = logging.getLogger("train")
# to enable Boolean in add_argument
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
# The first arg parser parses out only the --config argument, this argument is used to
# load a yaml file containing key-values that override the defaults for the main parser below
config_parser = parser = argparse.ArgumentParser(
description="Training Config", add_help=False
)
parser.add_argument(
"-c",
"--config",
default="",
type=str,
metavar="FILE",
help="YAML config file specifying default arguments",
)
parser = argparse.ArgumentParser(description="PyTorch ImageNet Training")
# DLlogger
parser.add_argument(
"--dllogger-name", default="/logs/log.json", type=str, help="name of dllogger file"
)
# Dataset / Model parameters
parser.add_argument("data_dir", metavar="DIR", help="path to dataset")
parser.add_argument(
"--dataset",
"-d",
metavar="NAME",
default="",
help="dataset type (default: ImageFolder/ImageTar if empty)",
)
parser.add_argument(
"--train-split",
metavar="NAME",
default="train",
help="dataset train split (default: train)",
)
parser.add_argument(
"--val-split",
metavar="NAME",
default="validation",
help="dataset validation split (default: validation)",
)
parser.add_argument(
"--model",
default="resnet101",
type=str,
metavar="MODEL",
help='Name of model to train (default: "countception"',
)
parser.add_argument(
"--pretrained",
action="store_true",
default=False,
help="Start with pretrained version of specified network (if avail)",
)
parser.add_argument(
"--initial-checkpoint",
default="",
type=str,
metavar="PATH",
help="Initialize model from this checkpoint (default: none)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="Resume full model and optimizer state from checkpoint (default: none)",
)
parser.add_argument(
"--no-resume-opt",
action="store_true",
default=False,
help="prevent resume of optimizer state when resuming model",
)
parser.add_argument(
"--num-classes",
type=int,
default=None,
metavar="N",
help="number of label classes (Model default if None)",
)
parser.add_argument(
"--gp",
default=None,
type=str,
metavar="POOL",
help="Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.",
)
parser.add_argument(
"--img-size",
type=int,
default=None,
metavar="N",
help="Image patch size (default: None => model default)",
)
parser.add_argument(
"--input-size",
default=None,
nargs=3,
type=int,
metavar="N N N",
help="Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty",
)
parser.add_argument(
"--crop-pct",
default=None,
type=float,
metavar="N",
help="Input image center crop percent (for validation only)",
)
parser.add_argument(
"--mean",
type=float,
nargs="+",
default=None,
metavar="MEAN",
help="Override mean pixel value of dataset",
)
parser.add_argument(
"--std",
type=float,
nargs="+",
default=None,
metavar="STD",
help="Override std deviation of of dataset",
)
parser.add_argument(
"--interpolation",
default="",
type=str,
metavar="NAME",
help="Image resize interpolation type (overrides model)",
)
parser.add_argument(
"-b",
"--batch-size",
type=int,
default=32,
metavar="N",
help="input batch size for training (default: 32)",
)
parser.add_argument(
"-vb",
"--validation-batch-size-multiplier",
type=int,
default=1,
metavar="N",
help="ratio of validation batch size to training batch size (default: 1)",
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="sgd",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "sgd"',
)
parser.add_argument(
"--opt-eps",
default=None,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: None, use opt default)",
)
parser.add_argument(
"--opt-betas",
default=None,
type=float,
nargs="+",
metavar="BETA",
help="Optimizer Betas (default: None, use opt default)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="Optimizer momentum (default: 0.9)",
)
parser.add_argument(
"--weight-decay", type=float, default=0.0001, help="weight decay (default: 0.0001)"
)
parser.add_argument(
"--clip-grad",
type=float,
default=None,
metavar="NORM",
help="Clip gradient norm (default: None, no clipping)",
)
parser.add_argument(
"--clip-mode",
type=str,
default="norm",
help='Gradient clipping mode. One of ("norm", "value", "agc")',
)
# Learning rate schedule parameters
parser.add_argument(
"--sched",
default="step",
type=str,
metavar="SCHEDULER",
help='LR scheduler (default: "step"',
)
parser.add_argument(
"--lr", type=float, default=0.01, metavar="LR", help="learning rate (default: 0.01)"
)
parser.add_argument(
"--lr-noise",
type=float,
nargs="+",
default=None,
metavar="pct, pct",
help="learning rate noise on/off epoch percentages",
)
parser.add_argument(
"--lr-noise-pct",
type=float,
default=0.67,
metavar="PERCENT",
help="learning rate noise limit percent (default: 0.67)",
)
parser.add_argument(
"--lr-noise-std",
type=float,
default=1.0,
metavar="STDDEV",
help="learning rate noise std-dev (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-mul",
type=float,
default=1.0,
metavar="MULT",
help="learning rate cycle len multiplier (default: 1.0)",
)
parser.add_argument(
"--lr-cycle-limit",
type=int,
default=1,
metavar="N",
help="learning rate cycle limit",
)
parser.add_argument(
"--warmup-lr",
type=float,
default=0.0001,
metavar="LR",
help="warmup learning rate (default: 0.0001)",
)
parser.add_argument(
"--min-lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)",
)
parser.add_argument(
"--epochs",
type=int,
default=200,
metavar="N",
help="number of epochs to train (default: 2)",
)
parser.add_argument(
"--epoch-repeats",
type=float,
default=0.0,
metavar="N",
help="epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).",
)
parser.add_argument(
"--start-epoch",
default=None,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"--benchmark-steps",
default=None,
type=int,
metavar="N",
help="For benchmarking, run this number of steps per epoch instead of all.",
)
parser.add_argument(
"--decay-epochs",
type=float,
default=30,
metavar="N",
help="epoch interval to decay LR",
)
parser.add_argument(
"--warmup-epochs",
type=int,
default=3,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
parser.add_argument(
"--cooldown-epochs",
type=int,
default=10,
metavar="N",
help="epochs to cooldown LR at min_lr, after cyclic schedule ends",
)
parser.add_argument(
"--patience-epochs",
type=int,
default=10,
metavar="N",
help="patience epochs for Plateau LR scheduler (default: 10",
)
parser.add_argument(
"--decay-rate",
"--dr",
type=float,
default=0.1,
metavar="RATE",
help="LR decay rate (default: 0.1)",
)
# Augmentation & regularization parameters
parser.add_argument(
"--no-aug",
action="store_true",
default=False,
help="Disable all training augmentation, override other train aug args",
)
parser.add_argument(
"--scale",
type=float,
nargs="+",
default=[0.08, 1.0],
metavar="PCT",
help="Random resize scale (default: 0.08 1.0)",
)
parser.add_argument(
"--ratio",
type=float,
nargs="+",
default=[3.0 / 4.0, 4.0 / 3.0],
metavar="RATIO",
help="Random resize aspect ratio (default: 0.75 1.33)",
)
parser.add_argument(
"--hflip", type=float, default=0.5, help="Horizontal flip training aug probability"
)
parser.add_argument(
"--vflip", type=float, default=0.0, help="Vertical flip training aug probability"
)
parser.add_argument(
"--color-jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)",
)
parser.add_argument(
"--aa",
type=str,
default=None,
metavar="NAME",
help='Use AutoAugment policy. "v0" or "original". (default: None)',
),
parser.add_argument(
"--aug-splits",
type=int,
default=0,
help="Number of augmentation splits (default: 0, valid: 0 or >=2)",
)
parser.add_argument(
"--jsd",
action="store_true",
default=False,
help="Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.",
)
parser.add_argument(
"--reprob",
type=float,
default=0.0,
metavar="PCT",
help="Random erase prob (default: 0.)",
)
parser.add_argument(
"--remode", type=str, default="const", help='Random erase mode (default: "const")'
)
parser.add_argument(
"--recount", type=int, default=1, help="Random erase count (default: 1)"
)
parser.add_argument(
"--resplit",
action="store_true",
default=False,
help="Do not random erase first (clean) augmentation split",
)
parser.add_argument(
"--mixup",
type=float,
default=0.0,
help="mixup alpha, mixup enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix",
type=float,
default=0.0,
help="cutmix alpha, cutmix enabled if > 0. (default: 0.)",
)
parser.add_argument(
"--cutmix-minmax",
type=float,
nargs="+",
default=None,
help="cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)",
)
parser.add_argument(
"--mixup-prob",
type=float,
default=1.0,
help="Probability of performing mixup or cutmix when either/both is enabled",
)
parser.add_argument(
"--mixup-switch-prob",
type=float,
default=0.5,
help="Probability of switching to cutmix when both mixup and cutmix enabled",
)
parser.add_argument(
"--mixup-mode",
type=str,
default="batch",
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"',
)
parser.add_argument(
"--mixup-off-epoch",
default=0,
type=int,
metavar="N",
help="Turn off mixup after this epoch, disabled if 0 (default: 0)",
)
parser.add_argument(
"--smoothing", type=float, default=0.1, help="Label smoothing (default: 0.1)"
)
parser.add_argument(
"--train-interpolation",
type=str,
default="random",
help='Training interpolation (random, bilinear, bicubic default: "random")',
)
parser.add_argument(
"--drop", type=float, default=0.0, metavar="PCT", help="Dropout rate (default: 0.)"
)
parser.add_argument(
"--drop-connect",
type=float,
default=None,
metavar="PCT",
help="Drop connect rate, DEPRECATED, use drop-path (default: None)",
)
parser.add_argument(
"--drop-path",
type=float,
default=None,
metavar="PCT",
help="Drop path rate (default: None)",
)
parser.add_argument(
"--drop-block",
type=float,
default=None,
metavar="PCT",
help="Drop block rate (default: None)",
)
# Batch norm parameters (only works with gen_efficientnet based models currently)
parser.add_argument(
"--bn-tf",
action="store_true",
default=False,
help="Use Tensorflow BatchNorm defaults for models that support it (default: False)",
)
parser.add_argument(
"--bn-momentum",
type=float,
default=None,
help="BatchNorm momentum override (if not None)",
)
parser.add_argument(
"--bn-eps",
type=float,
default=None,
help="BatchNorm epsilon override (if not None)",
)
parser.add_argument(
"--sync-bn",
action="store_true",
help="Enable NVIDIA Apex or Torch synchronized BatchNorm.",
)
parser.add_argument(
"--dist-bn",
type=str,
default="",
help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")',
)
parser.add_argument(
"--split-bn",
action="store_true",
help="Enable separate BN layers per augmentation split.",
)
# Model Exponential Moving Average
parser.add_argument(
"--model-ema",
action="store_true",
default=False,
help="Enable tracking moving average of model weights",
)
parser.add_argument(
"--model-ema-force-cpu",
action="store_true",
default=False,
help="Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.",
)
parser.add_argument(
"--model-ema-decay",
type=float,
default=0.9998,
help="decay factor for model weights moving average (default: 0.9998)",
)
# Misc
parser.add_argument(
"--seed", type=int, default=42, metavar="S", help="random seed (default: 42)"
)
parser.add_argument(
"--log-interval",
type=int,
default=50,
metavar="N",
help="how many batches to wait before logging training status",
)
parser.add_argument(
"--recovery-interval",
type=int,
default=0,
metavar="N",
help="how many batches to wait before writing recovery checkpoint",
)
parser.add_argument(
"--checkpoint-hist",
type=int,
default=10,
metavar="N",
help="number of checkpoints to keep (default: 10)",
)
parser.add_argument(
"-j",
"--workers",
type=int,
default=2,
metavar="N",
help="how many training processes to use (default: 1)",
)
parser.add_argument(
"--save-images",
action="store_true",
default=False,
help="save images of input bathes every log interval for debugging",
)
parser.add_argument(
"--amp",
action="store_true",
default=False,
help="use NVIDIA Apex AMP or Native AMP for mixed precision training",
)
parser.add_argument(
"--apex-amp",
action="store_true",
default=False,
help="Use NVIDIA Apex AMP mixed precision",
)
parser.add_argument(
"--native-amp",
action="store_true",
default=False,
help="Use Native Torch AMP mixed precision",
)
parser.add_argument(
"--channels-last",
action="store_true",
default=False,
help="Use channels_last memory layout",
)
parser.add_argument(
"--pin-mem",
action="store_true",
default=False,
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument(
"--no-prefetcher",
action="store_true",
default=False,
help="disable fast prefetcher",
)
parser.add_argument(
"--output",
default="",
type=str,
metavar="PATH",
help="path to output folder (default: none, current dir)",
)
parser.add_argument(
"--experiment",
default="",
type=str,
metavar="NAME",
help="name of train experiment, name of sub-folder for output",
)
parser.add_argument(
"--eval-metric",
default="top1",
type=str,
metavar="EVAL_METRIC",
help='Best metric (default: "top1"',
)
parser.add_argument(
"--tta",
type=int,
default=0,
metavar="N",
help="Test/inference time augmentation (oversampling) factor. 0=None (default: 0)",
)
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument(
"--use-multi-epochs-loader",
action="store_true",
default=False,
help="use the multi-epochs-loader to save time at the beginning of every epoch",
)
parser.add_argument(
"--torchscript",
dest="torchscript",
action="store_true",
help="convert model torchscript for inference",
)
parser.add_argument(
"--log-wandb",
action="store_true",
default=False,
help="log training and validation metrics to wandb",
)
# Distillation
parser.add_argument(
"--enable-distill",
type=str2bool,
nargs="?",
const=True,
default=False,
metavar="Boolean",
help="to use distillation",
)
parser.add_argument(
"--test-teacher",
type=str2bool,
nargs="?",
const=True,
default=False,
metavar="Boolean",
help="to test the teacher before training",
)
parser.add_argument(
"--teacher", default="", type=str, metavar="MODEL", help="Name of teacher model"
)
parser.add_argument(
"--teacher-checkpoint",
default="",
type=str,
metavar="CHECKPOINT PATH",
help="The checkpoint to the teacher model",
)
parser.add_argument(
"--teacher-img-size",
default=224,
type=int,
metavar="INT",
help="image resolution for teacher",
)
from timm.models.registry import register_model
from configs.model_hub import get_configs
from models.gpunet_builder import GPUNet_Builder
@register_model
def gpunet_2(pretrained=False, **kwargs):
"""Constructs GPUNet-2."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="1.75ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_1(pretrained=False, **kwargs):
"""Constructs GPUNet-1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.85ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_0(pretrained=False, **kwargs):
"""Constructs GPUNet-0."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.65ms", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_0",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d1(pretrained=False, **kwargs):
"""Constructs GPUNet-D1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="1.25ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d1",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_d2(pretrained=False, **kwargs):
"""Constructs GPUNet-D2."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="2.25ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_d2",
"crop_pct": 1.0,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
load_checkpoint(model, checkpoint_path, use_ema=True)
return model
@register_model
def gpunet_p0(pretrained=False, **kwargs):
"""Constructs GPUNet-P0."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.5ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p0",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
@register_model
def gpunet_p1(pretrained=False, **kwargs):
"""Constructs GPUNet-P1."""
modelJSON, checkpoint_path = get_configs(
batch=1, latency="0.8ms-D", gpuType="GV100", download=False
)
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
model.default_cfg = {
"architecture": "gpunet_p1",
"crop_pct": 0.875,
"interpolation": "bicubic",
"input_size": (3, model.imgRes, model.imgRes),
"num_classes": 1000,
"mean": (0.485, 0.456, 0.406),
"std": (0.229, 0.224, 0.225),
}
print("model CFG:", model.default_cfg)
for key in model.default_cfg:
setattr(model, key, model.default_cfg[key])
if pretrained:
model.load_state_dict(torch.load(checkpoint_path))
return model
def _parse_args():
# Do we have a config file to parse?
args_config, remaining = config_parser.parse_known_args()
if args_config.config:
with open(args_config.config, "r") as f:
cfg = yaml.safe_load(f)
parser.set_defaults(**cfg)
# The main arg parser parses the rest of the args, the usual
# defaults will have been overridden if config file specified.
args = parser.parse_args(remaining)
# Cache the args as a text string to save them in the output dir later
args_text = yaml.safe_dump(args.__dict__, default_flow_style=False)
return args, args_text
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def main():
setup_default_logging()
args, args_text = _parse_args()
if args.log_wandb:
if has_wandb:
wandb.init(project=args.experiment, config=args)
else:
_logger.warning(
"You've requested to log metrics to wandb but package not found. "
"Metrics not being logged to wandb, try `pip install wandb`"
)
args.prefetcher = not args.no_prefetcher
args.distributed = False
if "WORLD_SIZE" in os.environ:
args.distributed = int(os.environ["WORLD_SIZE"]) > 1
args.device = "cuda:0"
args.world_size = 1
args.rank = 0 # global rank
if args.distributed:
args.local_rank = int(os.environ.get("LOCAL_RANK", args.local_rank))
args.device = "cuda:%d" % args.local_rank
torch.cuda.set_device(args.local_rank)
print("->setting device:", args.local_rank)
torch.distributed.init_process_group(backend="nccl", init_method="env://")
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
_logger.info(
"Training in distributed mode with multiple processes, 1 GPU per process. Process %d, total %d."
% (args.rank, args.world_size)
)
else:
_logger.info("Training with a single process on 1 GPUs.")
assert args.rank >= 0
# resolve AMP arguments based on PyTorch / Apex availability
use_amp = None
if args.amp:
# `--amp` chooses native amp before apex (APEX ver not actively maintained)
if has_native_amp:
args.native_amp = True
elif has_apex:
args.apex_amp = True
if args.apex_amp and has_apex:
use_amp = "apex"
elif args.native_amp and has_native_amp:
use_amp = "native"
elif args.apex_amp or args.native_amp:
_logger.warning(
"Neither APEX or native Torch AMP is available, using float32. "
"Install NVIDA apex or upgrade to PyTorch 1.6"
)
random_seed(args.seed, args.rank)
model = create_model(
args.model,
pretrained=args.pretrained,
num_classes=args.num_classes,
drop_rate=args.drop,
drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path
drop_path_rate=args.drop_path,
drop_block_rate=args.drop_block,
global_pool=args.gp,
bn_tf=args.bn_tf,
bn_momentum=args.bn_momentum,
bn_eps=args.bn_eps,
scriptable=args.torchscript,
checkpoint_path=args.initial_checkpoint,
)
if args.num_classes is None:
assert hasattr(
model, "num_classes"
), "Model must have `num_classes` attr if not set on cmd line/config."
args.num_classes = (
model.num_classes
)
if args.distributed:
torch.distributed.barrier()
if args.local_rank == 0:
_logger.info(
f"Model {safe_model_name(args.model)} created, param count:{sum([m.numel() for m in model.parameters()])}"
)
print(model)
dllogger_dir = os.path.dirname(args.dllogger_name)
if dllogger_dir and not os.path.exists(dllogger_dir):
os.makedirs(dllogger_dir, exist_ok=True)
log_path = args.dllogger_name
dllogger.init(
backends=[
dllogger.JSONStreamBackend(verbosity=1, filename=log_path, append=True),
dllogger.JSONStreamBackend(verbosity=1, filename=unique_log_fpath(log_path)),
dllogger.StdOutBackend(verbosity=0),
]
)
else:
dllogger.init(backends=[])
dllogger.metadata("train_loss", {"unit": None})
dllogger.metadata("items_sec", {"unit": "images/s"})
dllogger.metadata("val_loss", {"unit": None})
dllogger.metadata("val_top1", {"unit": None})
dllogger.metadata("val_top5", {"unit": None})
dllogger.metadata("top1", {"unit": None})
dllogger.metadata("top5", {"unit": None})
dllogger.metadata("average_ips", {"unit": "images/s"})
data_config = resolve_data_config(
vars(args), model=model, verbose=args.local_rank == 0
)
# setup augmentation batch splits for contrastive loss or split bn
num_aug_splits = 0
if args.aug_splits > 0:
assert args.aug_splits > 1, "A split of 1 makes no sense"
num_aug_splits = args.aug_splits
# enable split bn (separate bn stats per batch-portion)
if args.split_bn:
assert num_aug_splits > 1 or args.resplit
model = convert_splitbn_model(model, max(num_aug_splits, 2))
# move model to GPU, enable channels last layout if set
model.cuda()
if args.channels_last:
model = model.to(memory_format=torch.channels_last)
# setup synchronized BatchNorm for distributed training
if args.distributed and args.sync_bn:
assert not args.split_bn
if has_apex and use_amp != "native":
# Apex SyncBN preferred unless native amp is activated
model = convert_syncbn_model(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if args.local_rank == 0:
_logger.info(
"Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using "
"zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled."
)
if args.torchscript:
assert not use_amp == "apex", "Cannot use APEX AMP with torchscripted model"
assert not args.sync_bn, "Cannot use SyncBatchNorm with torchscripted model"
model = torch.jit.script(model)
optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args))
# setup automatic mixed-precision (AMP) loss scaling and op casting
amp_autocast = suppress # do nothing
loss_scaler = None
if use_amp == "apex":
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
loss_scaler = ApexScaler()
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX AMP. Training in mixed precision.")
elif use_amp == "native":
amp_autocast = torch.cuda.amp.autocast
loss_scaler = NativeScaler()
if args.local_rank == 0:
_logger.info("Using native Torch AMP. Training in mixed precision.")
else:
if args.local_rank == 0:
_logger.info("AMP not enabled. Training in float32.")
# optionally resume from a checkpoint
resume_epoch = None
if args.resume and os.path.isfile(args.resume):
resume_epoch = resume_checkpoint(
model,
args.resume,
optimizer=None if args.no_resume_opt else optimizer,
loss_scaler=None if args.no_resume_opt else loss_scaler,
log_info=args.local_rank == 0,
)
elif args.resume and not os.path.isfile(args.resume):
print("Warning, resume indicated, but file not found, starting training over")
# setup exponential moving average of model weights, SWA could be used here too
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEmaV2(
model,
decay=args.model_ema_decay,
device="cpu" if args.model_ema_force_cpu else None,
)
if args.resume and os.path.isfile(args.resume):
load_checkpoint(model_ema.module, args.resume, use_ema=True)
# setup distributed training
if args.distributed:
if has_apex and use_amp != "native":
# Apex DDP preferred unless native amp is activated
if args.local_rank == 0:
_logger.info("Using NVIDIA APEX DistributedDataParallel.")
model = ApexDDP(model, delay_allreduce=True)
else:
if args.local_rank == 0:
_logger.info("Using native Torch DistributedDataParallel.")
model = NativeDDP(
model, device_ids=[args.local_rank]
) # can use device str in Torch >= 1.1
# NOTE: EMA model does not need to be wrapped by DDP
# setup learning rate schedule and starting epoch
lr_scheduler, num_epochs = create_scheduler(args, optimizer)
start_epoch = 0
if args.start_epoch is not None:
# a specified start_epoch will always override the resume epoch
start_epoch = args.start_epoch
elif resume_epoch is not None:
start_epoch = resume_epoch
if lr_scheduler is not None and start_epoch > 0:
lr_scheduler.step(start_epoch)
if args.local_rank == 0:
_logger.info("Scheduled epochs: {}".format(num_epochs))
# create the train and eval datasets
dataset_train = create_dataset(
args.dataset,
root=args.data_dir,
split=args.train_split,
is_training=True,
batch_size=args.batch_size,
repeats=args.epoch_repeats,
)
dataset_eval = create_dataset(
args.dataset,
root=args.data_dir,
split=args.val_split,
is_training=False,
batch_size=args.batch_size,
)
# setup mixup / cutmix
collate_fn = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0.0 or args.cutmix_minmax is not None
if mixup_active:
mixup_args = dict(
mixup_alpha=args.mixup,
cutmix_alpha=args.cutmix,
cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob,
switch_prob=args.mixup_switch_prob,
mode=args.mixup_mode,
label_smoothing=args.smoothing,
num_classes=args.num_classes,
)
if args.prefetcher:
assert (
not num_aug_splits
) # collate conflict (need to support deinterleaving in collate mixup)
collate_fn = FastCollateMixup(**mixup_args)
else:
mixup_fn = Mixup(**mixup_args)
# wrap dataset in AugMix helper
if num_aug_splits > 1:
dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits)
# create data loaders w/ augmentation pipeiine
train_interpolation = args.train_interpolation
if args.no_aug or not train_interpolation:
train_interpolation = data_config["interpolation"]
_logger.info("Before creating loader from GPU: %s", args.local_rank)
student_res = data_config["input_size"]
useTwoRes = False
if student_res != data_config["input_size"]:
useTwoRes = True
loader_train = create_loader(
dataset_train,
input_size=data_config["input_size"],
batch_size=args.batch_size,
is_training=True,
use_prefetcher=args.prefetcher,
no_aug=args.no_aug,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
re_split=args.resplit,
scale=args.scale,
ratio=args.ratio,
hflip=args.hflip,
vflip=args.vflip,
color_jitter=args.color_jitter,
auto_augment=args.aa,
num_aug_splits=num_aug_splits,
interpolation=train_interpolation,
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
collate_fn=collate_fn,
pin_memory=args.pin_mem,
use_multi_epochs_loader=args.use_multi_epochs_loader,
)
teacher_res = (3, args.teacher_img_size, args.teacher_img_size)
student_res = (3, args.img_size, args.img_size)
print(
"teacher eval resolution: ",
teacher_res,
" student resolution:",
student_res,
" train resolution:",
data_config["input_size"],
)
# setup loss function
if args.jsd:
assert num_aug_splits > 1 # JSD only valid with aug splits set
train_loss_fn = JsdCrossEntropy(
num_splits=num_aug_splits, smoothing=args.smoothing
).cuda()
elif mixup_active:
# smoothing is handled with mixup target transform
train_loss_fn = SoftTargetCrossEntropy().cuda()
elif args.smoothing:
train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing).cuda()
else:
train_loss_fn = nn.CrossEntropyLoss().cuda()
validate_loss_fn = nn.CrossEntropyLoss().cuda()
# setup the distillation
teacher_model = None
if args.enable_distill:
loader_teacher_eval = create_loader(
dataset_eval,
input_size=teacher_res,
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config["crop_pct"],
pin_memory=args.pin_mem,
)
if args.local_rank == 0:
_logger.info("#" * 10)
_logger.info("create distillation")
_logger.info("model: %s", args.teacher)
_logger.info("checkpoint: %s", args.teacher_checkpoint)
_logger.info("teacher image size: %s", args.teacher_img_size)
_logger.info("#" * 10)
assert args.teacher != ""
_logger.info("#####GPU: %s, reached the barrier", args.local_rank)
if args.distributed:
torch.distributed.barrier()
teacher_model = create_model(
args.teacher, pretrained=True, num_classes=args.num_classes, in_chans=3
)
teacher_model.cuda()
teacher_model.eval()
if args.test_teacher:
print("==start testing the teacher==")
if args.local_rank == 0 and args.test_teacher:
eval_metrics = validate(
teacher_model, loader_teacher_eval, validate_loss_fn, args
)
print(
"teacher evaluation results:",
" loss:",
eval_metrics["loss"],
" top1:",
eval_metrics["top1"],
" top5:",
eval_metrics["top5"],
)
if args.distributed:
torch.distributed.barrier()
loader_eval = create_loader(
dataset_eval,
input_size=student_res,
batch_size=args.validation_batch_size_multiplier * args.batch_size,
is_training=False,
use_prefetcher=args.prefetcher,
interpolation=data_config["interpolation"],
mean=data_config["mean"],
std=data_config["std"],
num_workers=args.workers,
distributed=args.distributed,
crop_pct=data_config["crop_pct"],
pin_memory=args.pin_mem,
)
# setup checkpoint saver and eval metric tracking
eval_metric = args.eval_metric
best_metric = None
best_epoch = None
saver = None
output_dir = None
epoch_throughput = []
if args.local_rank == 0:
if args.experiment:
exp_name = args.experiment
else:
exp_name = "-".join(
[
datetime.now().strftime("%Y%m%d-%H%M%S"),
safe_model_name(args.model),
str(data_config["input_size"][-1]),
]
)
exp_name = "checkpoints"
output_dir = get_outdir(
args.output if args.output else "./output/train", exp_name
)
decreasing = True if eval_metric == "loss" else False
saver = CheckpointSaver(
model=model,
optimizer=optimizer,
args=args,
model_ema=model_ema,
amp_scaler=loss_scaler,
checkpoint_dir=output_dir,
recovery_dir=output_dir,
decreasing=decreasing,
max_history=args.checkpoint_hist,
)
with open(os.path.join(output_dir, "args.yaml"), "w") as f:
f.write(args_text)
try:
for epoch in range(start_epoch, num_epochs):
if args.distributed and hasattr(loader_train.sampler, "set_epoch"):
loader_train.sampler.set_epoch(epoch)
train_metrics = train_one_epoch(
epoch,
model,
loader_train,
optimizer,
train_loss_fn,
args,
lr_scheduler=lr_scheduler,
saver=saver,
output_dir=output_dir,
amp_autocast=amp_autocast,
loss_scaler=loss_scaler,
model_ema=model_ema,
mixup_fn=mixup_fn,
teacher_model=teacher_model,
student_res=student_res,
useTwoRes=useTwoRes,
benchmark_steps=args.benchmark_steps,
)
epoch_throughput.append(train_metrics["items_sec"])
dllogger.log(step=epoch, data={"train_loss": train_metrics["loss"], "items_sec": train_metrics["items_sec"]}, verbosity=1)
dllogger.log(step=(), data={"train_loss": train_metrics["loss"], "items_sec": train_metrics["items_sec"]}, verbosity=1)
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
if args.local_rank == 0:
_logger.info("Distributing BatchNorm running means and vars")
distribute_bn(model, args.world_size, args.dist_bn == "reduce")
eval_metrics = validate(
model, loader_eval, validate_loss_fn, args, amp_autocast=amp_autocast
)
if model_ema is not None and not args.model_ema_force_cpu:
if args.distributed and args.dist_bn in ("broadcast", "reduce"):
distribute_bn(model_ema, args.world_size, args.dist_bn == "reduce")
ema_eval_metrics = validate(
model_ema.module,
loader_eval,
validate_loss_fn,
args,
amp_autocast=amp_autocast,
log_suffix=" (EMA)",
)
eval_metrics = ema_eval_metrics
dllogger.log(step=epoch, data={"val_loss": eval_metrics["loss"], "val_top1": eval_metrics["top1"], "val_top5": eval_metrics["top5"]}, verbosity=1)
dllogger.log(step=(), data={"val_loss": eval_metrics["loss"], "val_top1": eval_metrics["top1"], "val_top5": eval_metrics["top5"]}, verbosity=1)
dllogger.flush()
if lr_scheduler is not None:
# step LR for next epoch
lr_scheduler.step(epoch + 1, eval_metrics[eval_metric])
if output_dir is not None:
update_summary(
epoch,
train_metrics,
eval_metrics,
os.path.join(output_dir, "summary.csv"),
write_header=best_metric is None,
log_wandb=args.log_wandb and has_wandb,
)
if saver is not None:
# save proper checkpoint with eval metric
save_metric = eval_metrics[eval_metric]
best_metric, best_epoch = saver.save_checkpoint(
epoch, metric=save_metric
)
if len(epoch_throughput) > 0:
mean_train_throughput = sum(epoch_throughput) / len(epoch_throughput)
else:
mean_train_throughput = 0
log_metrics = dict(eval_metrics)
log_metrics["average_ips"] = mean_train_throughput
dllogger.log(step=tuple(), data=log_metrics, verbosity=0)
dllogger.flush()
except KeyboardInterrupt:
pass
if best_metric is not None:
_logger.info("*** Best metric: {0} (epoch {1})".format(best_metric, best_epoch))
def train_one_epoch(
epoch,
model,
loader,
optimizer,
loss_fn,
args,
lr_scheduler=None,
saver=None,
output_dir=None,
amp_autocast=suppress,
loss_scaler=None,
model_ema=None,
mixup_fn=None,
teacher_model=None,
student_res=None,
useTwoRes=False,
benchmark_steps=None,
):
if teacher_model is not None:
assert student_res is not None
if args.mixup_off_epoch and epoch >= args.mixup_off_epoch:
if args.prefetcher and loader.mixup_enabled:
loader.mixup_enabled = False
elif mixup_fn is not None:
mixup_fn.mixup_enabled = False
second_order = hasattr(optimizer, "is_second_order") and optimizer.is_second_order
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
losses_m = AverageMeter()
model.train()
if teacher_model is not None:
teacher_model.eval()
end = time.time()
last_idx = len(loader) - 1
num_updates = epoch * len(loader)
rate_avg = 0
for batch_idx, (input, target) in enumerate(loader):
last_batch = (batch_idx == last_idx) or (batch_idx == benchmark_steps)
data_time_m.update(time.time() - end)
if not args.prefetcher:
input, target = input.cuda(), target.cuda()
if mixup_fn is not None:
input, target = mixup_fn(input, target)
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
if teacher_model is not None and useTwoRes:
student_input = F.interpolate(
input, size=(student_res[1], student_res[2]), mode="bicubic"
)
with amp_autocast():
if teacher_model is not None and useTwoRes:
output = model(student_input)
else:
output = model(input)
loss = loss_fn(output, target)
if teacher_model is not None:
with torch.no_grad():
soft_logits = teacher_model(input).detach()
soft_label = F.softmax(soft_logits, dim=1)
kd_loss = cross_entropy_loss_with_soft_target(output, soft_label)
loss = kd_loss + loss
if not args.distributed:
losses_m.update(loss.item(), input.size(0))
optimizer.zero_grad()
if loss_scaler is not None:
loss_scaler(
loss,
optimizer,
clip_grad=args.clip_grad,
clip_mode=args.clip_mode,
parameters=model_parameters(
model, exclude_head="agc" in args.clip_mode
),
create_graph=second_order,
)
else:
loss.backward(create_graph=second_order)
if args.clip_grad is not None:
dispatch_clip_grad(
model_parameters(model, exclude_head="agc" in args.clip_mode),
value=args.clip_grad,
mode=args.clip_mode,
)
optimizer.step()
if model_ema is not None:
model_ema.update(model)
torch.cuda.synchronize()
num_updates += 1
batch_time_m.update(time.time() - end)
if last_batch or batch_idx % args.log_interval == 0:
lrl = [param_group["lr"] for param_group in optimizer.param_groups]
lr = sum(lrl) / len(lrl)
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
losses_m.update(reduced_loss.item(), input.size(0))
rate_avg = input.size(0) * args.world_size / batch_time_m.avg
if args.local_rank == 0:
_logger.info(
"{} Train: {} [{:>4d}/{} ({:>3.0f}%)] "
"Loss: {loss.val:>9.6f} ({loss.avg:>6.4f}) "
"Time: {batch_time.val:.3f}s, {rate:>7.2f}/s "
"({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) "
"LR: {lr:.3e} "
"Data: {data_time.val:.3f} ({data_time.avg:.3f})".format(
datetime.now().strftime("%d.%b %Y %H:%M:%S"),
epoch,
batch_idx,
len(loader),
100.0 * batch_idx / last_idx,
loss=losses_m,
batch_time=batch_time_m,
rate=input.size(0) * args.world_size / batch_time_m.val,
rate_avg=input.size(0) * args.world_size / batch_time_m.avg,
lr=lr,
data_time=data_time_m,
)
)
if args.save_images and output_dir:
torchvision.utils.save_image(
input,
os.path.join(output_dir, "train-batch-%d.jpg" % batch_idx),
padding=0,
normalize=True,
)
if (
saver is not None
and args.recovery_interval
and (last_batch or (batch_idx + 1) % args.recovery_interval == 0)
):
saver.save_recovery(epoch, batch_idx=batch_idx)
if lr_scheduler is not None:
lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg)
end = time.time()
# end for
if (batch_idx == benchmark_steps):
break
if hasattr(optimizer, "sync_lookahead"):
optimizer.sync_lookahead()
return OrderedDict([("loss", losses_m.avg), ("items_sec", rate_avg)])
def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=""):
batch_time_m = AverageMeter()
losses_m = AverageMeter()
top1_m = AverageMeter()
top5_m = AverageMeter()
model.eval()
end = time.time()
last_idx = len(loader) - 1
with torch.no_grad():
for batch_idx, (input, target) in enumerate(loader):
last_batch = batch_idx == last_idx
if not args.prefetcher:
input = input.cuda()
target = target.cuda()
if args.channels_last:
input = input.contiguous(memory_format=torch.channels_last)
with amp_autocast():
output = model(input)
if isinstance(output, (tuple, list)):
output = output[0]
# augmentation reduction
reduce_factor = args.tta
if reduce_factor > 1:
output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2)
target = target[0 : target.size(0) : reduce_factor]
loss = loss_fn(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data, args.world_size)
acc1 = reduce_tensor(acc1, args.world_size)
acc5 = reduce_tensor(acc5, args.world_size)
else:
reduced_loss = loss.data
torch.cuda.synchronize()
losses_m.update(reduced_loss.item(), input.size(0))
top1_m.update(acc1.item(), output.size(0))
top5_m.update(acc5.item(), output.size(0))
batch_time_m.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (
last_batch or batch_idx % args.log_interval == 0
):
log_name = "Test" + log_suffix
_logger.info(
"{0}: [{1:>4d}/{2}] "
"Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) "
"Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) "
"Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) "
"Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})".format(
log_name,
batch_idx,
last_idx,
batch_time=batch_time_m,
loss=losses_m,
top1=top1_m,
top5=top5_m,
)
)
metrics = OrderedDict(
[("loss", losses_m.avg), ("top1", top1_m.avg), ("top5", top5_m.avg)]
)
return metrics
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/train.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Evaluating the latency and accuracy of GPUNet
--------Configurations of GPUNet--------
## Without distillation
# GPUNet-2
modelJSON, cpkPath = get_configs(batch=1, latency="1.75ms", gpuType="GV100")
# GPUNet-1
modelJSON, cpkPath = get_configs(batch=1, latency="0.85ms", gpuType="GV100")
# GPUNet-0
modelJSON, cpkPath = get_configs(batch=1, latency="0.65ms", gpuType="GV100")
## With distillation
# GPUNet-D2
modelJSON, cpkPath = get_configs(batch=1, latency="2.25ms-D", gpuType="GV100")
# GPUNet-D1
modelJSON, cpkPath = get_configs(batch=1, latency="1.25ms-D", gpuType="GV100")
# GPUNet-P0
modelJSON, cpkPath = get_configs(batch=1, latency="0.5ms-D", gpuType="GV100")
# GPUNet-P1
modelJSON, cpkPath = get_configs(batch=1, latency="0.8ms-D", gpuType="GV100")
----------------------------------------
What can you do?
1. Test GPUNet accuracy.
2. Benchmarking the latency:
Export GPUNet to ONNX, then 'trtexec --onnx=gpunet.onnx --fp16'.
We reported the median GPU compute time. Here is an example,
GPU Compute Time: ..., median = 0.752686 ms, ...
"""
from configs.model_hub import get_configs, get_model_list
from models.gpunet_builder import GPUNet_Builder
modelJSON, cpkPath = get_configs(batch=1, latency="0.65ms", gpuType="GV100")
print(get_model_list(1))
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
builder.export_onnx(model)
print(model, model.imgRes)
builder.test_model(
model,
testBatch=200,
checkpoint=cpkPath,
imgRes=(3, model.imgRes, model.imgRes),
dtype="fp16",
crop_pct=1,
val_path="/root/data/imagenet/val",
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/eval.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseSaver,
ExportFormat,
ModelInputType,
TorchJit,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, savers # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("export_model")
INPUT_MODEL_TYPES = [
ModelInputType.TF_ESTIMATOR,
ModelInputType.TF_KERAS,
ModelInputType.PYT,
]
OUTPUT_MODEL_TYPES = [
ExportFormat.TF_SAVEDMODEL,
ExportFormat.TORCHSCRIPT,
ExportFormat.ONNX,
]
TORCH_JIT_TYPES = [
TorchJit.NONE,
TorchJit.TRACE,
TorchJit.SCRIPT,
]
def _get_args():
parser = argparse.ArgumentParser(
description="Script for exporting models from supported frameworks.", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input python module", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument(
"--torch-jit",
help="Torch Jit",
choices=[f.value for f in TORCH_JIT_TYPES],
required=False,
default=None,
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
dataloader_fn = None
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
print(args.input_path)
print(os.path.isfile(args.input_path))
print(args.output_type)
model = loader.load(
args.input_path,
dataloader_fn=dataloader_fn,
output_type=args.output_type,
torch_jit=args.torch_jit,
)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if args.input_type == ModelInputType.PYT.value and args.output_type == ExportFormat.ONNX.value:
saver_type = f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}"
else:
saver_type = args.output_type
Saver: BaseSaver = savers.get(saver_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path, dataloader_fn)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/export_model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import pickle
import torch
from triton.deployment_toolkit.core import BaseMetricsCalculator
from timm.utils import accuracy, AverageMeter
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self):
self.top1 = AverageMeter()
self.top5 = AverageMeter()
@property
def metrics(self):
return {'top1': self.top1.avg, 'top5': self.top5.avg}
def update(
self,
ids,
y_pred,
x,
y_real,
):
output = torch.from_numpy(y_pred["OUTPUT__0"]).float()
label = torch.from_numpy(y_real['OUTPUT__0'][:,0]).long()
acc1, acc5 = accuracy(output.detach(), label, topk=(1, 5))
self.top1.update(acc1.item(), output.shape[0])
self.top5.update(acc5.item(), output.shape[0])
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import JsonDumpReader
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help="Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
reader = JsonDumpReader(args.dump_dir)
for ids, x, y_true, y_pred in reader.iterate_over(["ids", "inputs", "labels", "outputs"]):
ids = list(ids["ids"]) if ids is not None else None
metrics_calculator.update(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = metrics_calculator.metrics
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/calculate_metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import logging
import time
import traceback
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import JsonDumpWriter
from .deployment_toolkit.triton_inference_runner import TritonInferenceRunner
LOGGER = logging.getLogger("run_inference_on_triton")
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=True)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument(
"--response-wait-time", required=False, help="Maximal time to wait for response", default=120, type=float
)
parser.add_argument(
"--max-unresponded-requests",
required=False,
help="Maximal number of unresponded requests",
default=128,
type=int,
)
parser.add_argument(
"--synchronous", help="Enable synchronous calls to Triton Server", action="store_true", default=False
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
try:
runner = TritonInferenceRunner(
server_url=args.server_url,
model_name=args.model_name,
model_version=args.model_version,
dataloader_fn=dataloader_fn,
verbose=False,
response_wait_time=args.response_wait_time,
max_unresponded_requests=args.max_unresponded_requests,
synchronous=args.synchronous,
)
except Exception as e:
message = traceback.format_exc()
LOGGER.error(f"Encountered exception \n{message}")
raise e
with JsonDumpWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/run_inference_on_triton.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from timm.models.helpers import load_checkpoint
import os
import json
from models.gpunet_builder import GPUNet_Builder
def update_argparser(parser):
parser.add_argument(
"--config", type=str, required=True, help="Network to deploy")
parser.add_argument(
"--checkpoint", type=str, help="The checkpoint of the model. ")
parser.add_argument("--precision", type=str, default="fp32",
choices=["fp32", "fp16"], help="Inference precision")
parser.add_argument(
"--is-prunet", type=bool, required=True, help="Bool on whether network is a prunet")
def get_model(**model_args):
dtype = model_args['precision']
checkpoint = model_args['checkpoint']
configPath = model_args['config']
with open(configPath) as configFile:
modelJSON = json.load(configFile)
configFile.close()
builder = GPUNet_Builder()
model = builder.get_model(modelJSON)
if dtype == 'fp16':
dtype = torch.float16
elif dtype == 'fp32':
dtype = torch.float32
else:
raise NotImplementedError
if model_args['is_prunet'] == "True":
model.load_state_dict(torch.load(checkpoint))
else:
load_checkpoint(model, checkpoint, use_ema=True)
model = model.to('cuda', dtype)
model.eval()
tensor_names = {"inputs": ["INPUT__0"],
"outputs": ["OUTPUT__0"]}
return model, tensor_names
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/model.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into dump files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.dump import JsonDumpWriter # noqa: E402 module level import not at top of file
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, JsonDumpWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info("Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info("Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/run_inference_on_fw.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_performance_on_fw.py` script.
It infers data obtained from pointed data loader locally and calculate throughput and latency.
Those results are stored in path pointed by `--results-path` in form of CSV file.
Example call:
```shell script
python ./triton/run_performance_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-sizes 32 \
--results-path results.csv
```
"""
import argparse
import csv
import logging
import os
from pathlib import Path
from typing import List
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from .deployment_toolkit.args import ArgParserGenerator # noqa: E402 module level import not at top of file
from .deployment_toolkit.core import ( # noqa: E402 module level import not at top of file
DATALOADER_FN_NAME,
BaseLoader,
BaseRunner,
load_from_file,
)
from .deployment_toolkit.extensions import loaders, runners # noqa: E402 module level import not at top of file
LOGGER = logging.getLogger("run_performance_on_fw")
def _save_result(results_path: str, results: List):
LOGGER.info(f"Storing results to {results_path}")
item = results[0]
with open(results_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=list(item.keys()))
writer.writeheader()
for result in results:
writer.writerow(result)
LOGGER.info("Done")
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(
description="Measure inference performance of given model in framework container", allow_abbrev=False
)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument(
"--batch-sizes",
type=int,
default=[1],
help="List of batch sizes to test.",
nargs="*",
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of performance iterations per batch size.",
)
parser.add_argument(
"--results-path",
help="Path to results file where performance result will be stored",
required=True,
)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outptputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info("args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
if args.iterations < 10:
raise ValueError("The minimal number of iterations for performance measurement is 10")
if not args.results_path.endswith(".csv"):
raise ValueError("Results path for results is invalid. Please, provide the CSV file name. Example: results.csv")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
results = []
with runner.init_inference(model=model) as runner_session:
for batch_size in args.batch_sizes:
LOGGER.info(f"Running performance measurement for batch size {batch_size}.")
# WAR - override batch size for dataloader
args.batch_size = batch_size
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.debug("Data loader initialized.")
for _, x, _ in dataloader_fn():
input = x
break
runner_session.start_measurement()
LOGGER.info("Running measurement")
for idx in range(args.iterations):
LOGGER.debug(f"Iteration {idx}")
runner_session(input)
throughput, latency = runner_session.stop_measurement(batch_size=batch_size)
LOGGER.info("Done")
LOGGER.info(f"Throughput: {throughput:.2f} [infer/s]")
LOGGER.info(f"Latency: {latency:.2f} [ms]")
data = {
"Batch": batch_size,
"Throughput (infer/sec)": f"{throughput:.2f}",
"Latency (ms)": f"{latency:.2f}",
}
results.append(data)
if not results:
raise RuntimeError("No valid measurement performed.")
_save_result(args.results_path, results)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/run_performance_on_fw.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import json
from timm.data import create_dataset, create_loader
import torch
def update_argparser(parser):
parser.add_argument(
"--config", type=str, required=True, help="Network to deploy")
parser.add_argument("--val-path", type=str, help="Path to dataset to be used", required=True)
parser.add_argument("--batch-size", type=int, help="Batch size to use", default=10)
parser.add_argument("--precision", type=str, default="fp32",
choices=["fp32", "fp16"], help="Inference precision")
parser.add_argument(
"--is-prunet", type=bool, required=True, help="Bool on whether network is a prunet")
def get_dataloader_fn(config, val_path, batch_size, precision, is_prunet):
imagenet_val_path = val_path
dataset = create_dataset( root=imagenet_val_path, name='', split='validation', load_bytes=False, class_map='')
with open(config) as configFile:
modelJSON = json.load(configFile)
configFile.close()
config = modelJSON
assert len(config) > 0
dataLayer = config[0]
assert dataLayer['layer_type'] == 'data'
assert dataLayer['img_resolution'] > 0
imgRes = dataLayer['img_resolution']
crop_pct = 1.0
if is_prunet == "True":
crop_pct = 0.875
data_config = {'input_size': (3, imgRes, imgRes), 'interpolation': 'bicubic', 'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225), 'crop_pct': crop_pct}
batch_size = int(batch_size)
loader = create_loader(
dataset,
input_size=data_config['input_size'],
batch_size=batch_size,
use_prefetcher=True,
interpolation=data_config['interpolation'],
mean=data_config['mean'],
std=data_config['std'],
num_workers=1,
crop_pct=data_config['crop_pct'],
pin_memory=False,
tf_preprocessing=False)
dtype = precision
if dtype == 'fp16':
dtype = torch.float16
elif dtype == 'fp32':
dtype = torch.float32
else:
raise NotImplementedError
def _get_dataloader():
for batch_idx, (input, target) in enumerate(loader):
x = {"INPUT__0": input.to(dtype).cpu().numpy()}
y_real = {"OUTPUT__0": np.tile(target.to(dtype).cpu().numpy()[:, np.newaxis], (1, 1000))}
ids = np.tile(batch_idx, target.shape[0])
yield (ids, x, y_real)
return _get_dataloader | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import pathlib
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .deployment_toolkit.core import EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .deployment_toolkit.triton_performance_runner import TritonPerformanceRunner
LOGGER = logging.getLogger("run_performance_on_triton")
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model-name",
type=str,
required=True,
help="Name of the model to test",
)
parser.add_argument(
"--result-path",
type=pathlib.Path,
required=True,
help="Path where results files is stored.",
)
parser.add_argument(
"--server-url",
type=str,
default="http://127.0.0.1:8000",
help="Url to Triton server",
)
parser.add_argument(
"--model-version",
type=str,
default=1,
help="Version of model",
)
parser.add_argument(
"--input-data",
type=str,
default="random",
help="Input data to perform profiling.",
)
parser.add_argument(
"--input-shapes",
action="append",
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument(
"--batch-sizes",
type=int,
default=[1],
help="List of batch sizes to tests.",
nargs="*",
)
parser.add_argument(
"--concurrency",
type=int,
default=[1],
help="List of concurrency modes.",
nargs="*",
)
parser.add_argument(
"--measurement-mode",
choices=[item.value for item in MeasurementMode],
default=MeasurementMode.COUNT_WINDOWS.value,
type=str,
help="Select measurement mode "
"'time_windows' stabilize performance on measurement window. "
"'count_windows' stabilize performance on number of samples.",
)
parser.add_argument(
"--measurement-interval",
help="Time window perf_analyzer will wait to stabilize the measurement",
default=5000,
type=int,
)
parser.add_argument(
"--measurement-request-count",
help="Number of samples on which perf_analyzer will stabilize the measurement",
default=50,
type=int,
)
parser.add_argument(
"--evaluation-mode",
choices=[item.value for item in EvaluationMode],
default=EvaluationMode.OFFLINE.value,
type=str,
help="Select evaluation mode "
"'offline' run offline analysis and use GPU memory to pass tensors. "
"'online' run online analysis and use HTTP protocol.",
)
parser.add_argument(
"--offline-mode",
choices=[item.value for item in OfflineMode],
default=OfflineMode.SYSTEM.value,
type=str,
help="Select offline mode "
"'system' pass tensors through CPU RAM memory. "
"'cuda' pass tensors through GPU RAM memory.",
)
parser.add_argument(
"--output-shared-memory-size",
default=102400,
type=int,
help="Size of memory buffer allocated for output with dynamic shapes in bytes. "
"Has to be equal to maximal size of output tensor.",
)
parser.add_argument(
"--performance-tool",
choices=[item.value for item in PerformanceTool],
default=PerformanceTool.MODEL_ANALYZER.value,
type=str,
help="Select performance tool for measurement mode "
"'model_analyzer' use Model Analyzer "
"'perf_analyzer' use Perf Analyzer",
)
parser.add_argument(
"--model-repository",
default=None,
type=str,
help="Path to model repository. Valid when using Model Analyzer",
)
parser.add_argument(
"--warmup",
help="Enable model warmup before performance test",
action="store_true",
default=False,
)
parser.add_argument(
"--timeout",
help="Timeout for performance analysis",
type=int,
default=None,
required=False,
)
parser.add_argument(
"-v",
"--verbose",
help="Verbose logs",
action="store_true",
default=False,
)
args = parser.parse_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
runner = TritonPerformanceRunner(
server_url=args.server_url,
model_name=args.model_name,
input_data=args.input_data,
input_shapes=args.input_shapes or [],
batch_sizes=args.batch_sizes,
measurement_mode=MeasurementMode(args.measurement_mode),
measurement_interval=args.measurement_interval,
measurement_request_count=args.measurement_request_count,
concurrency=args.concurrency,
evaluation_mode=EvaluationMode(args.evaluation_mode),
offline_mode=OfflineMode(args.offline_mode),
output_shared_memory_size=args.output_shared_memory_size,
performance_tool=PerformanceTool(args.performance_tool),
model_repository=args.model_repository,
result_path=args.result_path,
warmup=args.warmup,
timeout=args.timeout,
verbose=args.verbose,
)
runner.run()
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/run_performance_on_triton.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.pipeline import Pipeline
pipeline = Pipeline()
pipeline.model_export(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
python3 triton/export_model.py \
--input-path triton/model.py \
--input-type pyt \
--output-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-type ${EXPORT_FORMAT} \
--ignore-unknown-parameters \
--onnx-opset 13 \
--torch-jit ${TORCH_JIT} \
\
--config /workspace/gpunet/configs/batch1/GV100/0.5ms-D.json \
--checkpoint ${CHECKPOINT_DIR}/0.5ms-D.pth.tar \
--precision ${EXPORT_PRECISION} \
\
--dataloader triton/dataloader.py \
--is-prunet True \
--val-path ${DATASETS_DIR}/ \
--batch-size 1
""",
)
)
pipeline.model_conversion(
commands=(
r"""
if [[ "${EXPORT_FORMAT}" == "torchscript" ]]; then
export FORMAT_SUFFIX="pt"
else
export FORMAT_SUFFIX="${EXPORT_FORMAT}"
fi
model-navigator convert \
--model-name ${MODEL_NAME} \
--model-path ${SHARED_DIR}/exported_model.${FORMAT_SUFFIX} \
--output-path ${SHARED_DIR}/converted_model \
--target-formats ${FORMAT} \
--target-precisions ${PRECISION} \
--launch-mode local \
--override-workspace \
--verbose \
\
--onnx-opsets 13 \
--max-batch-size ${MAX_BATCH_SIZE} \
--container-version 21.12 \
--max-workspace-size 10000000000 \
--atol OUTPUT__0=100 \
--rtol OUTPUT__0=100
""",
)
)
pipeline.model_deploy(
commands=(
r"""
model-navigator triton-config-model \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--model-version 1 \
--model-path ${SHARED_DIR}/converted_model \
--model-format ${FORMAT} \
--model-control-mode explicit \
--load-model \
--load-model-timeout-s 100 \
--verbose \
\
--backend-accelerator ${BACKEND_ACCELERATOR} \
--tensorrt-precision ${PRECISION} \
--tensorrt-capture-cuda-graph \
--tensorrt-max-workspace-size 10000000000 \
--max-batch-size ${MAX_BATCH_SIZE} \
--batching ${MODEL_BATCHING} \
--preferred-batch-sizes ${MAX_BATCH_SIZE} \
--engine-count-per-device gpu=${NUMBER_OF_MODEL_INSTANCES}
""",
)
)
pipeline.triton_performance_offline_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 2 4 8 16 32 64 \
--concurrency 1 \
--evaluation-mode offline \
--measurement-request-count 10 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_offline.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_offline.csv",
)
pipeline.triton_performance_online_tests(
commands=(
r"""
python triton/run_performance_on_triton.py \
--model-repository ${MODEL_REPOSITORY_PATH} \
--model-name ${MODEL_NAME} \
--input-data random \
--batch-sizes 1 \
--concurrency 8 16 24 32 40 48 56 64 72 80 88 96 104 112 120 128 136 144 152 160 168 176 184 192 200 208 216 224 232 240 248 256 \
--evaluation-mode online \
--measurement-request-count 500 \
--warmup \
--performance-tool perf_analyzer \
--result-path ${SHARED_DIR}/triton_performance_online.csv
""",
),
result_path="${SHARED_DIR}/triton_performance_online.csv",
) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/05ms-D/runner/pipeline_impl.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import pathlib
from typing import List
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...runner.config import Config
from ...runner.executor import Executor
from ...runner.finalizer import ExperimentFinalizer
from ...runner.maintainer import DockerMaintainer
from ...runner.preparer import ExperimentPreparer
from ...runner.runner_proxy import RunnerProxy
from .pipeline_impl import pipeline
class ExperimentRunner(RunnerProxy):
"""
Experiment Runner proxy for runner wrapper
"""
maintainer_cls = DockerMaintainer
executor_cls = Executor
preparer_cls = ExperimentPreparer
finalizer_cls = ExperimentFinalizer
def execute(config_path: str, devices: List[str]):
if len(devices) == 0:
devices = ["0"]
config = Config.from_file(config_path)
runner = ExperimentRunner(config=config, pipeline=pipeline, devices=devices)
runner.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--config-path", type=str, required=True, help="Path to configuration file with details.")
parser.add_argument(
"--devices", type=str, nargs="*", required=False, help="Path to configuration file with details."
)
args = parser.parse_args()
config_path = args.config_path
devices = args.devices
execute(config_path, devices) | DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/05ms-D/runner/__main__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
import time
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
def __str__(self):
return self.value
class BackendAccelerator(Parameter):
NONE = "none"
AMP = "amp"
TRT = "trt"
class ExportPrecision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
class Precision(Parameter):
INT8 = "int8"
FP16 = "fp16"
FP32 = "fp32"
class DeviceKind(Parameter):
CPU = "cpu"
GPU = "gpu"
class ModelInputType(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
PYT = "pyt"
class Format(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
ONNX = "onnx"
TORCHSCRIPT = "torchscript"
TRT = "trt"
FASTERTRANSFORMER = "fastertransformer"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class ExportFormat(Parameter):
TF_SAVEDMODEL = "tf-savedmodel"
TORCHSCRIPT = "torchscript"
ONNX = "onnx"
# deprecated, backward compatibility only
TS_TRACE = "ts-trace"
TS_SCRIPT = "ts-script"
class TorchJit(Parameter):
NONE = "none"
TRACE = "trace"
SCRIPT = "script"
class Model(NamedTuple):
handle: object
# TODO: precision should be removed
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
self._evaluations = []
self._measurement = False
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def start_measurement(self):
self._measurement = True
self._evaluations = []
def stop_measurement(self, batch_size: int = 1):
LOGGER.info("Removing worst and best results")
evaluations = sorted(self._evaluations)[2:-2]
LOGGER.debug(f"Filtered: {evaluations}")
average_latency_ms = sum(evaluations) / len(evaluations)
LOGGER.debug(f"Average latency: {average_latency_ms:.2f} [ms]")
throughput = (1000.0 / average_latency_ms) * batch_size
LOGGER.debug(f"Throughput: {throughput:.2f} [infer/sec]")
self._measurement = False
return throughput, average_latency_ms
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class TimeMeasurement:
def __init__(self, session: BaseRunnerSession):
self._session = session
self._start = 0
self._end = 0
def __enter__(self):
self._start = time.time()
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self._session._measurement:
return
self._end = time.time()
diff = (self._end - self._start) * 1000.0
LOGGER.debug(f"Iteration time {diff:.2f} [ms]")
self._session._evaluations.append(diff)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
@abc.abstractmethod
def update(
self,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
):
pass
@property
@abc.abstractmethod
def metrics(self) -> Dict[str, Any]:
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
class MeasurementMode(Enum):
"""
Available measurement stabilization modes
"""
COUNT_WINDOWS = "count_windows"
TIME_WINDOWS = "time_windows"
class PerformanceTool(Enum):
"""
Available performance evaluation tools
"""
MODEL_ANALYZER = "model_analyzer"
PERF_ANALYZER = "perf_analyzer"
class EvaluationMode(Enum):
"""
Available evaluation modes
"""
OFFLINE = "offline"
ONLINE = "online"
class OfflineMode(Enum):
"""
Available offline mode for memory
"""
SYSTEM = "system"
CUDA = "cuda"
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/core.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import json
import pickle
import threading
from pathlib import Path
from typing import Dict, Iterator, List, Union
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def _validate_batch(name: str, value: Union[list, np.ndarray]):
if not isinstance(value, (list, np.ndarray)):
raise ValueError(f"Values shall be lists or np.ndarrays; current type {type(value)}")
def _validate_prefix_data(prefix_data: Dict[str, List[np.ndarray]]):
batch_sizes_per_io_name = {name: [len(batch) for batch in batches] for name, batches in prefix_data.items()}
names = list(batch_sizes_per_io_name)
for io_name in names:
for batch_idx, batch_size in enumerate(batch_sizes_per_io_name[io_name]):
if not all([batch_sizes_per_io_name[other_name][batch_idx] == batch_size for other_name in names]):
non_equal_batch_sizes = {
other_name: batch_sizes_per_io_name[other_name][batch_idx] for other_name in names
}
non_equal_batch_sizes_str = ", ".join(
[f"{name}={batch_size}" for name, batch_size in non_equal_batch_sizes.items()]
)
raise ValueError(
"All inputs/outputs should have same number of batches with equal batch_size. "
f"At batch_idx={batch_idx} there are batch_sizes: {non_equal_batch_sizes_str}"
)
# ensure if each io has same number of batches with equal size
def _get_nitems_and_batches(prefix_data: Dict[str, List[np.ndarray]]):
nitems = 0
nbatches = 0
if prefix_data:
nitems_per_io_name = {name: sum(len(batch) for batch in batches) for name, batches in prefix_data.items()}
nbatches_per_io_name = {name: len(batches) for name, batches in prefix_data.items()}
nitems = list(nitems_per_io_name.values())[0]
nbatches = list(nbatches_per_io_name.values())[0]
return nitems, nbatches
class BaseDumpWriter(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, output_dir: Union[str, Path]):
self._output_dir = Path(output_dir)
# outer dict key is prefix (i.e. input/output/labels/...), inner dict key is input/output name
# list is list of batches
self._items_cache: Dict[str, Dict[str, List[np.ndarray]]] = {}
# key is prefix
self._items_counters: Dict[str, int] = {}
self._cache_lock = threading.RLock()
self._flush_threshold_b = FLUSH_THRESHOLD_B
@property
def cache_size(self):
def _get_bytes_size(name, batch):
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.narray(batch)
return batch.nbytes
with self._cache_lock:
return {
prefix: sum(_get_bytes_size(name, batch) for name, batches in data.items() for batch in batches)
for prefix, data in self._items_cache.items()
}
def _append_to_cache(self, prefix, prefix_data):
if prefix_data is None:
return
if not isinstance(prefix_data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
with self._cache_lock:
cached_prefix_data = self._items_cache.setdefault(prefix, {})
for name, batch in prefix_data.items():
_validate_batch(name, batch)
if not isinstance(batch, np.ndarray):
batch = np.array(batch)
cached_batches = cached_prefix_data.setdefault(name, [])
cached_batches += [batch]
def write(self, **kwargs):
with self._cache_lock:
for prefix, prefix_data in kwargs.items():
self._append_to_cache(prefix, prefix_data)
biggest_prefix_data_size = max(self.cache_size.values())
if biggest_prefix_data_size > self._flush_threshold_b:
self.flush()
def flush(self):
with self._cache_lock:
for prefix, prefix_data in self._items_cache.items():
_validate_prefix_data(prefix_data)
output_path = self._output_dir / self._get_filename(prefix)
self._dump(prefix_data, output_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
self._items_counters[prefix] += nitems
self._items_cache = {}
def _get_filename(self, prefix):
idx = self._items_counters.setdefault(prefix, 0)
return f"{prefix}-{idx:012d}{self.FILE_SUFFIX}"
@abc.abstractmethod
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
pass
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
class PickleDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".pkl"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("wb") as pickle_file:
pickle.dump(prefix_data, pickle_file)
class JsonDumpWriter(BaseDumpWriter):
FILE_SUFFIX = ".json"
def _dump(self, prefix_data: Dict[str, List[np.ndarray]], output_path: Path):
repacked_prefix_data = self._format_data(prefix_data)
output_path.parent.mkdir(parents=True, exist_ok=True)
with output_path.open("w") as json_file:
json.dump(repacked_prefix_data, json_file)
def _format_data(self, prefix_data: Dict[str, List[np.ndarray]]) -> Dict:
def _format_batch_for_perf_analyzer_json_format(batch: np.ndarray):
return {
"content": batch.flatten().tolist(),
"shape": list(batch.shape),
"dtype": str(batch.dtype),
}
_, nbatches = _get_nitems_and_batches(prefix_data)
batches = [{} for _ in range(nbatches)]
for io_name, batches_per_io in prefix_data.items():
for batch_idx, batch in enumerate(batches_per_io):
batches[batch_idx][io_name] = _format_batch_for_perf_analyzer_json_format(batch)
return {"data": batches}
class BaseDumpReader(abc.ABC):
FILE_SUFFIX = ".abstract"
def __init__(self, dump_dir: Union[Path, str]):
self._dump_dir = Path(dump_dir)
def get(self, prefix: str) -> Iterator[Dict[str, np.ndarray]]:
dump_files_paths = sorted(self._dump_dir.glob(f"{prefix}*{self.FILE_SUFFIX}"))
for dump_file_path in dump_files_paths:
prefix_data = self._load_file(dump_file_path)
nitems, nbatches = _get_nitems_and_batches(prefix_data)
for batch_idx in range(nbatches):
yield {io_name: prefix_data[io_name][batch_idx] for io_name in prefix_data}
@abc.abstractmethod
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
pass
def iterate_over(self, prefix_list: List[str]) -> Iterator:
iterators = [self.get(prefix) for prefix in prefix_list]
empty_iterators = [False] * len(iterators)
while not all(empty_iterators):
values = [None] * len(iterators)
for idx, iterator in enumerate(iterators):
if empty_iterators[idx]:
continue
try:
values[idx] = next(iterator)
except StopIteration:
empty_iterators[idx] = True
if all(empty_iterators):
break
if not all(empty_iterators):
yield values
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class PickleDumpReader(BaseDumpReader):
FILE_SUFFIX = ".pkl"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as pickle_file:
return pickle.load(pickle_file)
class JsonDumpReader(BaseDumpReader):
FILE_SUFFIX = ".json"
def _load_file(self, dump_file_path: Path) -> Dict[str, List[np.ndarray]]:
with dump_file_path.open("rb") as json_file:
data = json.load(json_file)
return self._repack_data(data)
def _repack_data(self, data: Dict) -> Dict[str, List[np.ndarray]]:
result: Dict[str, List[np.ndarray]] = {}
batches = data["data"]
for batch in batches:
for io_name, batch_as_dict in batch.items():
io_batches = result.setdefault(io_name, [])
flat_array = batch_as_dict["content"]
shape = batch_as_dict["shape"]
dtype = batch_as_dict["dtype"]
batch_as_array = np.array(flat_array).reshape(shape).astype(dtype)
io_batches.append(batch_as_array)
return result
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/dump.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from enum import Enum
from typing import Any, Dict, Tuple
LOGGER = logging.getLogger(__name__)
class TritonClientProtocol(Enum):
"""Describe protocol with which client communicates with Triton"""
GRPC = "grpc"
HTTP = "http"
def parse_server_url(server_url: str) -> Tuple[TritonClientProtocol, str, int]:
DEFAULT_PORTS = {
TritonClientProtocol.HTTP: 8000,
TritonClientProtocol.GRPC: 8001,
}
# extract protocol
server_url_items = server_url.split("://")
if len(server_url_items) != 2:
raise ValueError("Prefix server_url with protocol ex.: grpc://127.0.0.1:8001")
requested_protocol, server_url = server_url_items
requested_protocol = TritonClientProtocol(requested_protocol.lower())
if requested_protocol not in DEFAULT_PORTS:
raise ValueError(f"Unsupported protocol: {requested_protocol}")
# extract host and port
default_port = DEFAULT_PORTS[requested_protocol]
server_url_items = server_url.split(":")
if len(server_url_items) == 1:
host, port = server_url, default_port
elif len(server_url_items) == 2:
host, port = server_url_items
port = int(port)
if port != default_port:
LOGGER.warning(
f"Current server URL is {server_url} while default {requested_protocol} port is {default_port}"
)
else:
raise ValueError(f"Could not parse {server_url}. Example of correct server URL: grpc://127.0.0.1:8001")
return requested_protocol, host, port
def log_dict(title: str, dict_: Dict[str, Any]):
LOGGER.info(title)
for key, value in dict_.items():
LOGGER.info(f"\t{key} = {value}")
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Callable, Dict, Optional, Union
from model_navigator.utils.cli import is_dict_generic, is_list_generic, is_optional_generic
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
is_optional = is_optional_generic(parameter.annotation)
if is_optional:
annotation = parameter.annotation.__args__[0] # Optional[cls] will be changed into Union[cls, None]
else:
annotation = parameter.annotation
is_list = is_list_generic(annotation)
is_dict = is_dict_generic(annotation)
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif is_list:
argument_kwargs["type"] = annotation.__args__[0] # List[cls] -> cls
elif is_dict:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
else:
argument_kwargs["type"] = annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
init_method_name = "__init__"
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, init_method_name, None)
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/args.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/report.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import queue
import threading
from pathlib import Path
from typing import Optional
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
import tritonclientutils as client_utils # noqa: F401
try:
import tritonclient.grpc as grpc_client
except ImportError:
import tritongrpcclient as grpc_client
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .base import BaseRunner
LOGGER = logging.getLogger("triton_inference_runner.grpc")
class SyncInferenceRunner(BaseRunner):
def __iter__(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = grpc_client.InferenceServerClient(url=self._server_url, verbose=self._verbose)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [grpc_client.InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name].datatype
infer_input = grpc_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
timeout=self._response_wait_t,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
class AsyncInferenceRunner(BaseRunner):
DEFAULT_MAX_UNRESP_REQS = 128
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: Optional[int] = None,
):
super().__init__(
server_url,
model_name,
model_version,
dataloader=dataloader,
verbose=verbose,
response_wait_time=response_wait_time,
)
self._max_unresp_reqs = (
self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_requests is None else max_unresponded_requests
)
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
request_id = str(ids[0])
NOT_MATCHING_REQUEST_ID_MSG = (
"Error during processing result - request_id doesn't match. This shouldn't have happened."
)
if error:
response_id = error.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
self._errors.append(error)
else:
response_id = result.get_response().id
if response_id != request_id:
raise RuntimeError(NOT_MATCHING_REQUEST_ID_MSG)
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = grpc_client.InferenceServerClient(url=self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name].datatype
infer_input = grpc_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [grpc_client.InferRequestedOutput(name) for name in outputs]
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
self._sync.notify_all()
break
request_id = str(ids[0])
callback = functools.partial(AsyncInferenceRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
request_id=request_id,
)
self._num_waiting_for += 1
self._sync.notify_all()
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self._max_wait_time)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
self._sync.notify_all()
LOGGER.debug("Finished request thread")
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner/grpc.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from ..utils import TritonClientProtocol, parse_server_url
from .grpc import AsyncInferenceRunner as AsyncGRPCRunner
from .grpc import SyncInferenceRunner as SyncGRPCRunner
from .http import AsyncInferenceRunner as AsyncHTPPRunner
from .http import SyncInferenceRunner as SyncHTTPRunner
class TritonInferenceRunner:
async_runners = {
TritonClientProtocol.GRPC: AsyncGRPCRunner,
TritonClientProtocol.HTTP: AsyncHTPPRunner,
}
sync_runners = {
TritonClientProtocol.GRPC: SyncGRPCRunner,
TritonClientProtocol.HTTP: SyncHTTPRunner,
}
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
dataloader_fn,
verbose: bool = False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: int = 128,
synchronous: bool = False,
):
protocol, host, port = parse_server_url(server_url)
server_url = f"{host}:{port}"
if synchronous:
sync_runner_cls = TritonInferenceRunner.sync_runners[protocol]
self._runner = sync_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
)
else:
async_runner_cls = TritonInferenceRunner.async_runners[protocol]
self._runner = async_runner_cls(
server_url,
model_name,
model_version,
dataloader=dataloader_fn(),
verbose=verbose,
response_wait_time=response_wait_time,
max_unresponded_requests=max_unresponded_requests,
)
def __iter__(self):
return self._runner.__iter__()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner/runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonInferenceRunner # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from pathlib import Path
from typing import Optional
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
except ImportError:
import tritonclientutils as client_utils # noqa: F401
try:
import tritonclient.http as http_client
except (ImportError, RuntimeError):
import tritonhttpclient as http_client
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .base import BaseRunner
LOGGER = logging.getLogger("triton_inference_runner.http")
class HTTPInferenceRunner(BaseRunner):
def _parse_content(self, response):
return json.dumps(response, indent=4)
class SyncInferenceRunner(HTTPInferenceRunner):
def __iter__(self):
LOGGER.debug(f"Connecting to {self._server_url}")
client = http_client.InferenceServerClient(
url=self._server_url,
verbose=self._verbose,
connection_timeout=self._response_wait_t,
network_timeout=self._response_wait_t,
)
error = self._verify_triton_state(client)
if error:
raise RuntimeError(f"Could not communicate to Triton Server: {error}")
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {self._parse_content(model_config)}")
LOGGER.info(f"Model metadata {self._parse_content(model_metadata)}")
inputs = {tm["name"]: tm for tm in model_metadata["inputs"]}
outputs = {tm["name"]: tm for tm in model_metadata["outputs"]}
output_names = list(outputs)
outputs_req = [http_client.InferRequestedOutput(name) for name in outputs]
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name]["datatype"]
infer_input = http_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
results = client.infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
timeout=self._response_wait_t_ms,
)
y_pred = {name: results.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
class AsyncInferenceRunner(HTTPInferenceRunner):
DEFAULT_MAX_UNRESP_REQS = 128
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
max_unresponded_requests: Optional[int] = None,
):
super().__init__(
server_url,
model_name,
model_version,
dataloader=dataloader,
verbose=verbose,
response_wait_time=response_wait_time,
)
self._max_unresp_reqs = (
self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_requests is None else max_unresponded_requests
)
def __iter__(self):
client = http_client.InferenceServerClient(
url=self._server_url,
verbose=self._verbose,
concurrency=self._max_unresp_reqs,
connection_timeout=self._response_wait_t,
network_timeout=self._response_wait_t,
)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {self._parse_content(model_config)}")
LOGGER.info(f"Model metadata {self._parse_content(model_metadata)}")
inputs = {tm["name"]: tm for tm in model_metadata["inputs"]}
outputs = {tm["name"]: tm for tm in model_metadata["outputs"]}
output_names = list(outputs)
async_requests = []
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
datatype = inputs[name]["datatype"]
infer_input = http_client.InferInput(name, data.shape, datatype)
target_np_dtype = client_utils.triton_to_np_dtype(datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
outputs_req = [http_client.InferRequestedOutput(name) for name in outputs]
request_id = str(ids[0])
async_request = client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
request_id=request_id,
timeout=self._response_wait_t_ms,
)
async_requests.append((ids, x, y_real, async_request))
if len(async_requests) > self._max_unresp_reqs:
yield from self._yield_response(async_requests, output_names)
async_requests = []
yield from self._yield_response(async_requests, output_names)
LOGGER.debug("Finished request thread")
def _yield_response(self, async_requests, output_names):
for ids, x, y_real, async_response in async_requests:
result = async_response.get_result()
y_pred = {name: result.as_numpy(name) for name in output_names}
yield ids, x, y_pred, y_real
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner/http.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Optional
LOGGER = logging.getLogger("triton_inference_runner.base")
class BaseRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
response_wait_time: Optional[float] = None,
):
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = int(self.DEFAULT_MAX_RESP_WAIT_S if response_wait_time is None else response_wait_time)
self._response_wait_t_ms = self._response_wait_t * 1000 * 1000
self._max_wait_time = max(self._response_wait_t, self.DEFAULT_MAX_FINISH_WAIT_S)
self._server_url = server_url
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_inference_runner/base.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
from ..core import (
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
TensorSpec,
TimeMeasurement,
)
from ..extensions import loaders, runners, savers
from .utils import infer_precision
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple(_get_dim(d) for d in shape.dim)
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = ["CUDAExecutionProvider", "CPUExecutionProvider"]
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
with TimeMeasurement(self):
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
# def __call__(self, x: Dict[str, object]):
# io_binding = self._session.io_binding()
#
# for input_name in self._input_names:
# input = x[input_name]
# ortinput = onnxruntime.OrtValue.ortvalue_from_numpy(input, "cuda", 0)
# io_binding.bind_input(input_name, "cuda", 0, input.dtype, input.shape, ortinput.data_ptr())
#
# for output_name in self._output_names:
# io_binding.bind_output(output_name)
#
# with TimeMeasurement(self):
# self._session.run_with_iobinding(io_binding)
# y_pred = io_binding.copy_outputs_to_cpu()
#
# y_pred = dict(zip(self._output_names, y_pred))
#
# return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/library/onnx.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/library/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List, Optional
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
def _mark_batch_axis(shape, batch_axis: int):
shape = list(shape)
shape[batch_axis] = -1
return tuple(shape)
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
if batch_size_dim is not None:
input_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in input_shapes.items()}
output_shapes = {name: _mark_batch_axis(shape, batch_size_dim) for name, shape in output_shapes.items()}
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim: Optional[int] = None):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k in all_shapes:
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple(min(a, b) for a, b in zip(min_shapes[k], shape))
max_shapes[k] = tuple(max(a, b) for a, b in zip(max_shapes[k], shape))
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/library/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except Exception as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, Format, Model, TensorSpec, TimeMeasurement
from ..extensions import loaders, runners
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
# documentation:
# https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
_NP_DTYPE2TRT_DTYPE = {
np.dtype("float32"): trt.DataType.FLOAT,
np.dtype("float16"): trt.DataType.HALF,
np.dtype("int8"): trt.DataType.INT8,
np.dtype("int32"): trt.DataType.INT32,
np.dtype("bool"): trt.DataType.BOOL,
}
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
LOGGER.debug("Unable to load engine without plugins. Loading plugins.")
trt.init_libnvinfer_plugins(logger=TRT_LOGGER, namespace="")
LOGGER.debug(f"Loading TensorRT engine with plugins from {model_path}")
engine = self._load_engine(model_path)
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = np.dtype(trt.nptype(engine.get_binding_dtype(binding_idx))).name
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
def _load_engine(self, model_path: Path):
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
return engine
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
with TimeMeasurement(self):
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
binding_idx: int = self._engine[name]
dtype_from_trt_binding = np.dtype(trt.nptype(self._engine.get_binding_dtype(binding_idx)))
dtype_from_model_spec = np.dtype(self._model.outputs[name].dtype)
assert dtype_from_model_spec == dtype_from_trt_binding
y_pred_host[name] = np.zeros(shape, dtype=dtype_from_model_spec)
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
# cast host input into binding dtype
def _cast_input(name, data):
binding_idx: int = self._engine[name]
np_dtype = trt.nptype(self._engine.get_binding_dtype(binding_idx))
return data.astype(np_dtype)
x_host = {name: _cast_input(name, host_input) for name, host_input in x_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/library/tensorrt.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import typing
from collections import Counter
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
import torch # pytype: disable=import-error
import yaml
from model_navigator.model import ModelSignatureConfig
from model_navigator.tensor import TensorSpec
from model_navigator.utils.config import YamlConfigFile
from ..core import (
GET_MODEL_FN_NAME,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
ExportFormat,
Format,
Model,
ModelInputType,
Precision,
TimeMeasurement,
TorchJit,
load_from_file,
)
from ..extensions import loaders, runners, savers
from .utils import get_dynamic_axes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
def _get_dtype(v):
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
return np.dtype(dtype)
return {k: _get_dtype(v) for k, v in t.items()}
batch = next(dataloader)
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_model_signature(
inputs_names: typing.List[str],
outputs_names: typing.List[str],
precision,
dataloader_fn,
batch_size_dim: typing.Optional[int] = None,
):
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim=batch_size_dim)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in inputs_names
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in outputs_names
}
return ModelSignatureConfig(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, io_names_dict = get_model(**self._model_args)
dataloader_fn = kwargs.get("dataloader_fn", None)
output_type = kwargs.get("output_type", None)
torch_jit = kwargs.get("torch_jit", None)
precision = infer_model_precision(model)
batch_axis = getattr(model, "batch_axis", 0) # by default models supports batching; batch_axis=0
model_signature = _get_model_signature(
inputs_names=io_names_dict["inputs"],
outputs_names=io_names_dict["outputs"],
precision=precision,
dataloader_fn=dataloader_fn,
batch_size_dim=batch_axis,
)
model = Model(handle=model, precision=precision, inputs=model_signature.inputs, outputs=model_signature.outputs)
if output_type == ExportFormat.TORCHSCRIPT.value:
if torch_jit == TorchJit.TRACE.value:
return self._trace(model, dataloader_fn)
elif torch_jit == TorchJit.SCRIPT.value:
return self._script(model)
raise ValueError(f"Not supported PyTorch Jit operation type: {torch_jit}")
elif output_type == ExportFormat.ONNX.value:
return model
else:
raise ValueError(f"Not supported PyTorch format: {output_type}")
def _trace(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
traced_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
return Model(traced_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
def _script(self, model: Model) -> Model:
scripted_model = torch.jit.script(model.handle)
return Model(scripted_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = ModelSignatureConfig(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.name}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = ModelSignatureConfig(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXSaver(BaseSaver):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn(), batch_size_dim=0)
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
model_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
enable_onnx_checker=True,
)
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path], dataloader_fn) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
raise RuntimeError("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
signature_config = ModelSignatureConfig(inputs=model.inputs, outputs=model.outputs)
annotation_path = model_path.parent / f"{model_path.name}.yaml"
with YamlConfigFile(annotation_path) as config_file:
config_file.save_config(signature_config)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(
self,
model: Model,
):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
with TimeMeasurement(self):
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(ModelInputType.PYT.value, PyTorchModelLoader)
loaders.register_extension(ExportFormat.TORCHSCRIPT.value, TorchScriptLoader)
loaders.register_extension(Format.TORCHSCRIPT.value, TorchScriptLoader)
savers.register_extension(ExportFormat.TORCHSCRIPT.value, TorchScriptSaver)
savers.register_extension(f"{ModelInputType.PYT.value}--{ExportFormat.ONNX.value}", PYT2ONNXSaver)
runners.register_extension(Format.TORCHSCRIPT.value, PyTorchRunner)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/library/pyt.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# method from PEP-366 to support relative import in executed modules
import logging
import pathlib
from typing import List, Optional
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..core import EvaluationMode, MeasurementMode, OfflineMode, PerformanceTool
from .model_analyzer import ModelAnalyzerRunner
from .perf_analyzer import PerfAnalyzerRunner, PerfAnalyzerWarmupRunner
LOGGER = logging.getLogger("triton_performance_runner")
class TritonPerformanceRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int,
performance_tool: PerformanceTool,
model_repository: str,
result_path: pathlib.Path,
warmup: bool,
timeout: Optional[int],
verbose: bool,
):
self._warmup_runner = None
if warmup:
LOGGER.info("Running warmup before the main test")
self._warmup_runner = PerfAnalyzerWarmupRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
timeout=timeout,
)
if performance_tool == PerformanceTool.MODEL_ANALYZER:
LOGGER.info("Using Model Analyzer for performance evaluation")
self._runner = ModelAnalyzerRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
model_repository=model_repository,
result_path=result_path,
timeout=timeout,
verbose=verbose,
)
elif performance_tool == PerformanceTool.PERF_ANALYZER:
LOGGER.info("Using Perf Analyzer for performance evaluation")
self._runner = PerfAnalyzerRunner(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
concurrency=concurrency,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
output_shared_memory_size=output_shared_memory_size,
result_path=result_path,
timeout=timeout,
verbose=verbose,
)
else:
raise ValueError(f"Unsupported performance tool {performance_tool}")
def run(self):
if self._warmup_runner:
self._warmup_runner.run()
self._runner.run()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import TritonPerformanceRunner # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import shutil
import sys
from distutils.version import LooseVersion
from typing import List, Optional
import yaml
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import log_dict, parse_server_url
from .model_analyzer import ModelAnalyzer, ModelAnalyzerMode
from .model_analyzer_config import ModelAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(version("triton-model-analyzer"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
TRITON_MODEL_ANALYZER_VERSION = LooseVersion(pkg_resources.get_distribution("triton-model-analyzer").version)
LOGGER = logging.getLogger("triton_performance_runner.model_analyzer")
class ModelAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"model_repository": model_repository,
"result_path": result_path,
"verbose": verbose,
},
)
if result_path.suffix:
raise ValueError(
"Results path for Model Analyzer is invalid. Please, provide the directory name. Example: results"
)
self._checkpoints = pathlib.Path("./checkpoints")
self._result_path = result_path
self._verbose = verbose
self._filename_model_inference = "metrics-model-inference.csv"
self._filename_model_gpu = "metrics-model-gpu.csv"
self._profile_config = self._prepare_profile_config(
server_url=server_url,
model_name=model_name,
input_data=input_data,
input_shapes=input_shapes,
batch_sizes=batch_sizes,
concurrency=concurrency,
measurement_mode=measurement_mode,
measurement_interval=measurement_interval,
measurement_request_count=measurement_request_count,
evaluation_mode=evaluation_mode,
offline_mode=offline_mode,
model_repository=model_repository,
output_shared_memory_size=output_shared_memory_size,
checkpoints=self._checkpoints,
verbose=verbose,
)
self._analyze_config = self._prepare_analyze_config(
model_name=model_name,
result_path=result_path,
verbose=verbose,
filename_model_inference=self._filename_model_inference,
filename_model_gpu=self._filename_model_gpu,
)
def run(self):
self._result_path.mkdir(parents=True, exist_ok=True)
if self._checkpoints.is_dir():
shutil.rmtree(self._checkpoints.as_posix())
self._checkpoints.mkdir(parents=True, exist_ok=True)
model_analyzer = ModelAnalyzer(config=self._profile_config)
model_analyzer.run(mode=ModelAnalyzerMode.PROFILE, verbose=self._verbose)
for file in self._checkpoints.iterdir():
if not file.is_file() or file.suffix != ".ckpt":
continue
LOGGER.info(f"Moving checkpoint {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
model_analyzer = ModelAnalyzer(config=self._analyze_config)
model_analyzer.run(mode=ModelAnalyzerMode.ANALYZE, verbose=self._verbose)
inference_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_inference
gpu_metrics_file = pathlib.Path("/tmp") / "results" / self._filename_model_gpu
for file in [inference_metrics_file, gpu_metrics_file]:
LOGGER.info(f"Moving metrics {file.name} to {self._result_path}")
shutil.move(file, self._result_path / file.name)
def _prepare_profile_config(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
model_repository: str,
checkpoints: pathlib.Path,
output_shared_memory_size: int = 102400,
verbose: bool = False,
):
protocol, host, port = parse_server_url(server_url)
perf_analyzer_config = self._perf_analyzer_config(
input_data,
input_shapes,
measurement_mode,
measurement_interval,
measurement_request_count,
evaluation_mode,
offline_mode,
output_shared_memory_size,
)
config = {
"model_repository": model_repository,
"triton_launch_mode": "remote",
"run_config_search_disable": True,
"perf_analyzer_flags": perf_analyzer_config,
"perf_analyzer_timeout": 3600, # Workaround for Perf Analyzer timeout - use 1h
"profile_models": [model_name],
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"verbose": verbose,
"checkpoint_directory": checkpoints.as_posix(),
"override_output_model_repository": True,
"client_protocol": protocol.value,
f"triton_{protocol.value}_endpoint": f"{host}:{port}",
}
if verbose:
log_dict("Model Analyzer profiling configuration", config)
with open("config_profile.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_profile.yaml"
return config
def _prepare_analyze_config(
self,
model_name: str,
result_path: pathlib.Path,
filename_model_inference: str,
filename_model_gpu: str,
verbose: bool,
):
inference_output_fields = [
"batch_size",
"concurrency",
"perf_throughput",
"perf_latency",
"perf_client_send_recv",
"perf_client_response_wait",
"perf_server_queue",
"perf_server_compute_input",
"perf_server_compute_infer",
"perf_server_compute_output",
]
gpu_output_fields = [
"gpu_uuid",
"batch_size",
"concurrency",
"gpu_used_memory",
"gpu_free_memory",
"gpu_utilization",
"gpu_power_usage",
]
config = {
"analysis_models": model_name,
"checkpoint_directory": result_path.as_posix(),
"export_path": "/tmp",
"inference_output_fields": inference_output_fields,
"gpu_output_fields": gpu_output_fields,
"filename_model_inference": filename_model_inference,
"filename_model_gpu": filename_model_gpu,
"summarize": False,
}
if verbose:
log_dict("Model Analyzer analysis configuration", config)
with open("config_analyze.yaml", "w") as file:
yaml.safe_dump(config, file)
config = ModelAnalyzerConfig()
config["config-file"] = "config_analyze.yaml"
return config
def _perf_analyzer_config(
self,
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
output_shared_memory_size: int = 102400,
):
perf_analyzer_config = {
"measurement-interval": measurement_interval,
}
if TRITON_MODEL_ANALYZER_VERSION >= LooseVersion("1.8.0"):
perf_analyzer_config["input-data"] = [input_data]
else:
perf_analyzer_config["input-data"] = input_data
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
perf_analyzer_config["measurement-mode"] = measurement_mode.value
perf_analyzer_config["measurement-request-count"] = measurement_request_count
if evaluation_mode == EvaluationMode.OFFLINE:
perf_analyzer_config["shared-memory"] = offline_mode.value
perf_analyzer_config["output-shared-memory-size"] = output_shared_memory_size
if input_shapes:
if TRITON_MODEL_ANALYZER_VERSION > LooseVersion("1.8.0"):
perf_analyzer_config["shape"] = input_shapes
else:
perf_analyzer_config["shape"] = input_shapes[0]
LOGGER.warning("Model Analyzer <= 1.8.0 support only single shape param for Perf Analyzer.")
return perf_analyzer_config
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/model_analyzer/runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import ModelAnalyzerRunner # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/model_analyzer/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .exceptions import ModelAnalyzerException
class ModelAnalyzerConfig:
"""
A config class to set arguments to the Model Analyzer.
An argument set to None will use the default.
"""
model_analyzer_args = [
"config-file",
]
input_to_options = [
"config-file",
]
def __init__(self):
# Args will be a dict with the string representation as key
self._args = {k: None for k in self.model_analyzer_args}
self._options = {
"-f": "config.yaml",
}
self._input_to_options = {
"config-file": "-f",
}
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the server with CLI.
Returns
-------
str
the command consisting of all set arguments to
the model analyzer.
e.g. '--model-repository=/models --verbose=True'
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
return " ".join(args)
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into model_analyzer
"""
return list(cls.model_analyzer_args) + list(cls.input_to_options)
def __getitem__(self, key):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the model analyzer
Returns
-------
The value that the argument is set to in this config
"""
if key in self._args:
return self._args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
else:
raise ModelAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key, value):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the model analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
else:
raise ModelAnalyzerException(f"The argument '{key}' to the Model Analyzer is not supported.")
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/model_analyzer/model_analyzer_config.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ModelAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/model_analyzer/exceptions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import subprocess
from subprocess import CalledProcessError
from .exceptions import ModelAnalyzerException
SERVER_OUTPUT_TIMEOUT_SECS = 5
LOGGER = logging.getLogger(__name__)
class ModelAnalyzerMode:
PROFILE = "profile"
ANALYZE = "analyze"
REPORT = "report"
class ModelAnalyzerReportMode:
OFFLINE = "offline"
ONLINE = "online"
class ModelAnalyzer:
"""
Concrete Implementation of Model Analyzer interface that runs
analyzer locally as as subprocess.
"""
_analyzer_path = "model-analyzer"
def __init__(self, config, timeout: int = None):
"""
Parameters
----------
config : AnalyzerConfig
the config object containing arguments for this server instance
"""
self._analyzer_process = None
self._analyzer_config = config
self._log = None
self._timeout = timeout
def run(self, mode: str, verbose: bool = False, quiet: bool = False, report_mode: str = None):
"""
Starts the model analyzer locally
"""
if self._analyzer_path:
cmd = []
if self._timeout:
cmd = ["timeout", str(self._timeout)]
cmd += [self._analyzer_path]
if verbose:
cmd += ["--verbose"]
if quiet:
cmd += ["--quiet"]
if report_mode:
cmd += ["-m"]
cmd += [report_mode]
cmd += [mode]
cmd += self._analyzer_config.to_cli_string().split()
LOGGER.debug(f"Model Analyze command: {cmd}")
try:
subprocess.run(cmd, check=True, start_new_session=True)
except CalledProcessError as e:
raise ModelAnalyzerException(
f"Running {self._analyzer_path} with {e.cmd} failed with"
f" exit status {e.returncode} : {e.output}"
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/model_analyzer/model_analyzer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import logging
import os
import pathlib
import sys
from distutils.version import LooseVersion
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...report import save_results, show_results, sort_results
from ...utils import log_dict, parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
if LooseVersion(sys.version) >= LooseVersion("3.8.0"):
from importlib.metadata import version
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
else:
import pkg_resources
TRITON_CLIENT_VERSION = LooseVersion(pkg_resources.get_distribution("tritonclient").version)
LOGGER = logging.getLogger("triton_performance_runner.perf_analyzer")
class PerfAnalyzerRunner:
def __init__(
self,
server_url: str,
model_name: str,
input_data: str,
input_shapes: List[str],
batch_sizes: List[int],
concurrency: List[int],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
evaluation_mode: EvaluationMode,
offline_mode: OfflineMode,
result_path: pathlib.Path,
output_shared_memory_size: int = 102400,
timeout: Optional[int] = None,
verbose: bool = False,
):
log_dict(
"Selected configuration",
{
"server_url": server_url,
"model_name": model_name,
"input_data": input_data,
"input_shapes": input_shapes,
"batch_sizes": batch_sizes,
"concurrency": concurrency,
"measurement_mode": measurement_mode,
"measurement_interval": measurement_interval,
"measurement_request_count": measurement_request_count,
"evaluation_mode": evaluation_mode,
"offline_mode": offline_mode,
"output_shared_memory_size": output_shared_memory_size,
"result_path": result_path,
"timeout": timeout,
"verbose": verbose,
},
)
if result_path.suffix != ".csv":
raise ValueError(
"Results path for Perf Analyzer is invalid. Please, provide the CSV file name. Example: results.csv"
)
self._server_url = server_url
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._batch_sizes = batch_sizes
self._concurrency = concurrency
self._measurement_mode = measurement_mode
self._measurement_interval = measurement_interval
self._measurement_request_count = measurement_request_count
self._evaluation_mode = evaluation_mode
self._offline_mode = offline_mode
self._result_path = result_path
self._output_shared_memory_size = output_shared_memory_size
self._timeout = timeout
self._verbose = verbose
self._protocol, self._host, self._port = parse_server_url(server_url)
def run(self):
results: List[Dict] = []
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
performance_partial_file = (
f"{self._evaluation_mode.value.lower()}_partial_{batch_size}_{concurrency}.csv"
)
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"latency-report-file": performance_partial_file,
}
if self._verbose:
params["extra-verbose"] = True
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
if self._verbose:
log_dict(
f"Perf Analyzer config for batch_size: {batch_size} and concurrency: {concurrency}", params
)
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
self._update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=self._result_path.as_posix(), data=results)
show_results(results=results)
def _calculate_average_latency(self, r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum(int(r.get(f, 0)) for f in avg_sum_fields)
return avg_latency
def _update_performance_data(self, results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"Batch": batch_size}
with open(performance_partial_file) as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = self._calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .runner import PerfAnalyzerRunner # noqa: F401
from .warmup import PerfAnalyzerWarmupRunner # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from distutils.version import LooseVersion
from importlib.metadata import version
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...core import EvaluationMode, MeasurementMode, OfflineMode
from ...utils import parse_server_url
from .perf_analyzer import PerfAnalyzer
from .perf_config import PerfAnalyzerConfig
LOGGER = logging.getLogger("warmup")
TRITON_CLIENT_VERSION = LooseVersion(version("tritonclient"))
class PerfAnalyzerWarmupRunner:
def __init__(
self,
server_url: str,
model_name: str,
batch_sizes: List[int],
concurrency: List[int],
input_data: str,
input_shapes: List[str],
measurement_mode: MeasurementMode,
measurement_interval: int,
measurement_request_count: int,
offline_mode: OfflineMode,
evaluation_mode: EvaluationMode,
output_shared_memory_size: int,
timeout: Optional[int],
):
self._model_name = model_name
self._input_data = input_data
self._input_shapes = input_shapes
self._measurement_mode = measurement_mode
self._offline_mode = offline_mode
self._evaluation_mode = evaluation_mode
self._output_shared_memory_size = output_shared_memory_size
self._protocol, self._host, self._port = parse_server_url(server_url)
self._measurement_interval = 2 * measurement_interval
self._measurement_request_count = 2 * measurement_request_count
self._batch_sizes = [min(batch_sizes)]
self._concurrency = [max(concurrency)]
self._timeout = timeout
def run(self):
for batch_size in self._batch_sizes:
for concurrency in self._concurrency:
params = {
"model-name": self._model_name,
"model-version": 1,
"batch-size": batch_size,
"url": f"{self._host}:{self._port}",
"protocol": self._protocol.value,
"input-data": self._input_data,
"measurement-interval": self._measurement_interval,
"concurrency-range": f"{concurrency}:{concurrency}:1",
"verbose": True,
}
if TRITON_CLIENT_VERSION >= LooseVersion("2.11.0"):
params["measurement-mode"] = self._measurement_mode.value
params["measurement-request-count"] = self._measurement_request_count
if self._evaluation_mode == EvaluationMode.OFFLINE:
params["shared-memory"] = self._offline_mode.value
params["output-shared-memory-size"] = self._output_shared_memory_size
config = PerfAnalyzerConfig()
for param, value in params.items():
config[param] = value
for shape in self._input_shapes:
config["shape"] = shape
perf_analyzer = PerfAnalyzer(config=config, timeout=self._timeout)
perf_analyzer.run()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/warmup.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
from .exceptions import PerfAnalyzerException
class PerfAnalyzerConfig:
"""
A config class to set arguments to the perf_analyzer.
An argument set to None will use the perf_analyzer's default.
"""
perf_analyzer_args = [
"async",
"sync",
"measurement-interval",
"measurement-mode",
"measurement-request-count",
"concurrency-range",
"request-rate-range",
"request-distribution",
"request-intervals",
"binary-search",
"num-of-sequence",
"latency-threshold",
"max-threads",
"stability-percentage",
"max-trials",
"percentile",
"input-data",
"shared-memory",
"output-shared-memory-size",
"sequence-length",
"string-length",
"string-data",
]
perf_analyzer_multiple_args = [
"shape",
]
input_to_options = [
"model-name",
"model-version",
"batch-size",
"url",
"protocol",
"latency-report-file",
"streaming",
]
input_to_verbose = ["verbose", "extra-verbose"]
def __init__(self):
"""
Construct a PerfAnalyzerConfig
"""
self._args = {k: None for k in self.perf_analyzer_args}
self._multiple_args = {k: [] for k in self.perf_analyzer_multiple_args}
self._options = {
"-m": None,
"-x": None,
"-b": None,
"-u": None,
"-i": None,
"-f": None,
"-H": None,
"-c": None,
"-t": None,
}
self._verbose = {"-v": None, "-v -v": None}
self._input_to_options = {
"model-name": "-m",
"model-version": "-x",
"batch-size": "-b",
"url": "-u",
"protocol": "-i",
"latency-report-file": "-f",
"streaming": "-H",
"concurrency": "-c",
"threads": "-t",
}
self._input_to_verbose = {"verbose": "-v", "extra-verbose": "-v -v"}
@classmethod
def allowed_keys(cls):
"""
Returns
-------
list of str
The keys that are allowed to be
passed into perf_analyzer
"""
return (
list(cls.perf_analyzer_args)
+ list(cls.perf_analyzer_multiple_args)
+ list(cls.input_to_options)
+ list(cls.input_to_verbose)
)
def update_config(self, params=None):
"""
Allows setting values from a
params dict
Parameters
----------
params: dict
keys are allowed args to perf_analyzer
"""
if params:
for key in params:
self[key] = params[key]
def to_cli_string(self):
"""
Utility function to convert a config into a
string of arguments to the perf_analyzer with CLI.
Returns
-------
str
cli command string consisting of all arguments
to the perf_analyzer set in the config, without
the executable name.
"""
# single dashed options, then verbose flags, then main args
args = [f"{k} {v}" for k, v in self._options.items() if v]
args += [k for k, v in self._verbose.items() if v]
args += [f"--{k}={v}" for k, v in self._args.items() if v]
for k, v in self._multiple_args.items():
for item in v:
args.append(f"--{k}={item}")
return " ".join(args)
def __getitem__(self, key: str):
"""
Gets an arguments value in config
Parameters
----------
key : str
The name of the argument to the perf_analyzer
Returns
-------
The value that the argument is set to in this config
Raises
------
TritonModelAnalyzerException
If argument not found in the config
"""
if key in self._args:
return self._args[key]
elif key in self._multiple_args:
return self._multiple_args[key]
elif key in self._input_to_options:
return self._options[self._input_to_options[key]]
elif key in self._input_to_verbose:
return self._verbose[self._input_to_verbose[key]]
else:
raise PerfAnalyzerException(f"'{key}' Key not found in config")
def __setitem__(self, key: str, value: Any):
"""
Sets an arguments value in config
after checking if defined/supported.
Parameters
----------
key : str
The name of the argument to the perf_analyzer
value : (any)
The value to which the argument is being set
Raises
------
TritonModelAnalyzerException
If key is unsupported or undefined in the
config class
"""
if key in self._args:
self._args[key] = value
elif key in self._multiple_args:
self._multiple_args[key].append(value)
elif key in self._input_to_options:
self._options[self._input_to_options[key]] = value
elif key in self._input_to_verbose:
self._verbose[self._input_to_verbose[key]] = value
else:
raise PerfAnalyzerException(
f"The argument '{key}' to the perf_analyzer " "is not supported by the model analyzer."
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_config.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class PerfAnalyzerException(Exception):
def __init__(self, message: str):
self._message = message
def __str__(self):
"""
Get the exception string representation.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
@property
def message(self):
"""
Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/exceptions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
from subprocess import PIPE, CalledProcessError, Popen
# method from PEP-366 to support relative import in executed modules
from typing import List, Optional
if __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .exceptions import PerfAnalyzerException
MAX_INTERVAL_CHANGES = 10
COUNT_INTERVAL_DELTA = 50
TIME_INTERVAL_DELTA = 2000
LOGGER = logging.getLogger(__name__)
class PerfAnalyzer:
"""
This class provides an interface for running workloads
with perf_analyzer.
"""
def __init__(self, config, timeout: Optional[int]):
"""
Parameters
----------
config : PerfAnalyzerConfig
keys are names of arguments to perf_analyzer,
values are their values.
"""
self.bin_path = "perf_analyzer"
self._config = config
self._output = ""
self._timeout = timeout
def run(self):
"""
Runs the perf analyzer with the
initialized configuration
Returns
-------
List of Records
List of the metrics obtained from this
run of perf_analyzer
Raises
------
PerfAnalyzerException
If subprocess throws CalledProcessError
"""
self._output = ""
for _ in range(MAX_INTERVAL_CHANGES):
command = [self.bin_path]
command += self._config.to_cli_string().replace("=", " ").split()
LOGGER.debug(f"Perf Analyze command: {command}")
if not self._timeout:
LOGGER.debug("Perf Analyze command timeout not set")
else:
LOGGER.debug(f"Perf Analyze command timeout: {self._timeout} [s]")
try:
self._run_with_stream(command=command)
return
except CalledProcessError as e:
if self._failed_with_measurement_inverval(e.output):
if self._config["measurement-mode"] is None or self._config["measurement-mode"] == "count_windows":
self._increase_request_count()
else:
self._increase_time_interval()
else:
raise PerfAnalyzerException(
f"Running perf_analyzer with {e.cmd} failed with" f" exit status {e.returncode} : {e.output}"
)
raise PerfAnalyzerException(f"Ran perf_analyzer {MAX_INTERVAL_CHANGES} times, but no valid requests recorded.")
def output(self):
"""
Returns
-------
The stdout output of the
last perf_analyzer run
"""
if self._output:
return self._output
raise PerfAnalyzerException("Attempted to get perf_analyzer output" "without calling run first.")
def _run_with_stream(self, command: List[str]):
commands_lst = []
if self._timeout:
commands_lst = ["timeout", str(self._timeout)]
commands_lst.extend(command)
LOGGER.debug(f"Run with stream: {commands_lst}")
process = Popen(commands_lst, start_new_session=True, stdout=PIPE, encoding="utf-8")
streamed_output = ""
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
streamed_output += output
print(output.rstrip())
self._output += streamed_output
result = process.poll()
LOGGER.debug(f"Perf Analyzer process exited with result: {result}")
# WAR for Perf Analyzer exit code 0 when stabilization failed
if result == 0 and self._failed_with_measurement_inverval(streamed_output):
LOGGER.debug("Perf Analyzer finished with exit status 0, however measurement stabilization failed.")
result = 1
if result != 0:
raise CalledProcessError(returncode=result, cmd=commands_lst, output=streamed_output)
def _failed_with_measurement_inverval(self, output: str):
checks = [
output.find("Failed to obtain stable measurement"),
output.find("Please use a larger time window"),
]
result = any([status != -1 for status in checks])
LOGGER.debug(f"Measurement stability message validation: {checks}. Result: {result}.")
return result
def _increase_request_count(self):
self._config["measurement-request-count"] += COUNT_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement request count is too small, "
f"increased to {self._config['measurement-request-count']}."
)
def _increase_time_interval(self):
self._config["measurement-interval"] += TIME_INTERVAL_DELTA
LOGGER.debug(
"perf_analyzer's measurement window is too small, "
f"increased to {self._config['measurement-interval']} ms."
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/deployment_toolkit/triton_performance_runner/perf_analyzer/perf_analyzer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .task import DataObject
class Configuration(DataObject):
"""
Configuration object - handle single experiment data
"""
def __init__(
self,
parameters: Dict,
checkpoint: Optional[str],
):
"""
Args:
parameters: Configuration parameters
checkpoint: Checkpoint used for experiment
"""
self.parameters = parameters
self.checkpoint = checkpoint
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/configuration.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from datetime import datetime
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .configuration import Configuration
from .downloader import download
from .experiment import Experiment, Stage
from .logger import LOGGER
from .maintainer import Maintainer
from .pipeline import Pipeline
from .stages import ResultsType, TritonPerformanceOfflineStage, TritonPerformanceOnlineStage
from .task import Checkpoint, Dataset, SystemInfo, Task
from .triton import Triton
from .utils import clean_directory
class Preparer(abc.ABC):
"""
Runner preparer object.
"""
@abc.abstractmethod
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
pass
class ExperimentPreparer(Preparer):
"""
Experiment runner preparer object.
"""
def exec(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
maintainer: Maintainer,
triton: Triton,
logs_dir: pathlib.Path,
):
LOGGER.info("Preparing Triton container image")
triton_container_image = self._prepare_triton_container_image(config, maintainer, triton)
LOGGER.info("Initialize task")
task = self._initialize_task(
workspace=workspace,
config=config,
pipeline=pipeline,
triton_container_image=triton_container_image,
logs_dir=logs_dir,
)
LOGGER.info("Preparing directories")
self._create_dirs(workspace, task)
LOGGER.info("Clean previous run artifacts directories")
self._clean_previous_run_artifacts(workspace, task)
LOGGER.info("Downloading checkpoints")
self._download_checkpoints(task)
return task
def _create_dirs(self, workspace: pathlib.Path, task: Task) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
for directory in [task.results_dir, task.logs_dir, task.checkpoints_dir]:
directory_path = workspace / directory
directory_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory} created.")
def _clean_previous_run_artifacts(self, workspace: pathlib.Path, task: Task) -> None:
"""
Clean logs from previous run
Returns:
None
"""
for directory in [
task.logs_dir,
task.results_dir,
]:
directory_path = workspace / directory
clean_directory(directory_path)
LOGGER.info(f"Location {directory} cleaned.")
def _prepare_triton_container_image(self, config: Config, maintainer: Maintainer, triton: Triton) -> str:
"""
Prepare Triton Container Image based on provided configuration
Returns:
Name of container image to use in process
"""
if not config.triton_dockerfile:
image_name = triton.container_image(config.container_version)
LOGGER.info(f"Using official Triton container image: {image_name}.")
return image_name
if config.triton_container_image:
LOGGER.info(f"Using provided Triton Container Image: {config.triton_container_image}")
return config.triton_container_image
normalized_model_name = config.model_name.lower().replace("_", "-")
image_name = f"tritonserver-{normalized_model_name}:latest"
LOGGER.info(f"Building Triton Container Image: {image_name}")
maintainer.build_image(
image_name=image_name,
image_file_path=pathlib.Path(config.triton_dockerfile),
build_args={"FROM_IMAGE": triton.container_image(container_version=config.container_version)},
)
return image_name
def _download_checkpoints(self, task: Task) -> None:
"""
Download checkpoints
"""
for variant, checkpoint in task.checkpoints.items():
checkpoint_url = checkpoint.url
download_path = checkpoint.path
if download_path.is_dir():
LOGGER.info(f"Checkpoint {download_path.name} already downloaded.")
continue
if not checkpoint_url:
LOGGER.warning(
f"Checkpoint {variant} url is not provided."
"\nIf you want to use that checkpoint please train the model locally"
f"\nand copy to {download_path} directory"
)
continue
download(checkpoint_url, download_path)
def _initialize_task(
self,
workspace: pathlib.Path,
config: Config,
pipeline: Pipeline,
triton_container_image: str,
logs_dir: pathlib.Path,
) -> Task:
"""
Initialize task object
Args:
workspace: Path to workspace where artifacts are stored
config: Config object
pipeline: Pipeline object
triton_container_image: Triton Inference Server container image used for tests
Returns:
Task object
"""
datasets = {}
for dataset in config.datasets:
datasets[dataset.name] = Dataset(name=dataset.name)
checkpoints = {}
for checkpoint in config.checkpoints:
download_path = workspace / Task.checkpoints_dir / checkpoint.name
checkpoints[checkpoint.name] = Checkpoint(name=checkpoint.name, url=checkpoint.url, path=download_path)
results_types = self._task_results_types(pipeline=pipeline)
stages = {}
for stage in pipeline.stages():
stages[stage.label] = {"result_path": stage.result_path, "result_type": stage.result_type}
experiments = []
for idx, configuration in enumerate(config.configurations, start=1):
experiment = self._prepare_experiment(
idx=idx,
configuration=configuration,
results_types=results_types,
stages=stages,
)
experiments.append(experiment)
system_info = SystemInfo.from_host()
task = Task(
model_name=config.model_name,
ensemble_model_name=config.ensemble_model_name,
framework=config.framework,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config.datasets_dir,
experiments=experiments,
container_version=config.container_version,
system_info=system_info,
triton_container_image=triton_container_image,
triton_custom_operations=config.triton_custom_operations,
triton_load_model_method=config.triton_load_model_method,
started_at=int(datetime.utcnow().timestamp()),
batching=config.batching,
measurement_steps_offline=config.measurement_steps_offline,
measurement_steps_online=config.measurement_steps_online,
)
return task
def _task_results_types(self, pipeline: Pipeline) -> List[str]:
"""
Types of results generated as part of task
Returns:
List of result types
"""
results = []
for stage in pipeline.stages():
if TritonPerformanceOfflineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_OFFLINE)
continue
if TritonPerformanceOnlineStage.label == stage.label:
results.append(ResultsType.TRITON_PERFORMANCE_ONLINE)
continue
return results
def _prepare_experiment(
self,
idx: int,
configuration: Configuration,
results_types: List[str],
stages: Dict,
) -> Experiment:
"""
Prepare experiments data
Args:
idx: Experiment index
configuration: Configuration object
results_types: Results types stored in experiment
stages: Stages executed as part of experiment
Returns:
Experiment object
"""
results_mapped = {}
for result_type in results_types:
results_mapped[result_type] = result_type
stages_mapped = {}
for name, stage_data in stages.items():
stages_mapped[name] = Stage(name=name, **stage_data)
experiment = Experiment(
experiment_id=idx,
parameters=configuration.parameters,
stages=stages_mapped,
results=results_mapped,
checkpoint=configuration.checkpoint,
)
return experiment
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/preparer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import platform
import subprocess
from datetime import datetime
from typing import Dict, List, Optional, Union
import cpuinfo
import psutil
import yaml
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import PerformanceTool
from .core import CustomDumper, DataObject
from .experiment import Experiment
from .triton import Triton
class GPU(DataObject):
"""
GPU information data object
"""
name: str
driver_version: str
cuda_version: str
memory: str
tdp: str
def __init__(self, name: str, driver_version: str, cuda_version: str, memory: str, tdp: str):
"""
Args:
name: name of GPU
driver_version: version of driver
cuda_version: version of CUDA
memory: size of memory available on GPU [MB]
tdp: Max TDP of GPU unit
"""
self.name = name
self.driver_version = driver_version
self.cuda_version = cuda_version
self.memory = memory
self.tdp = tdp
@staticmethod
def from_dict(data: Dict):
"""
Create GPU object from dictionary
Args:
data: dictionary with GPU data
Returns:
GPU object
"""
return GPU(
name=data["name"],
driver_version=data["driver_version"],
cuda_version=data["cuda_version"],
memory=data["memory"],
tdp=data["tdp"],
)
@staticmethod
def from_host():
"""
Create GPU object from host data
Returns:
GPU object
"""
data = subprocess.check_output(
["nvidia-smi", "--query-gpu=name,driver_version,memory.total,power.max_limit", "--format=csv"]
).decode()
lines = data.split(sep="\n")
device_details = lines[1].split(",")
name = device_details[0].strip()
driver_version = device_details[1].strip()
memory = device_details[2].strip()
tdp = device_details[3].strip()
cuda_version = None
data = subprocess.check_output(["nvidia-smi", "--query"]).decode()
lines = data.split(sep="\n")
for line in lines:
if line.startswith("CUDA Version"):
cuda_version = line.split(":")[1].strip()
break
return GPU(
name=name,
driver_version=driver_version,
cuda_version=cuda_version,
memory=memory,
tdp=tdp,
)
class CPU(DataObject):
"""
CPU details
"""
name: str
physical_cores: int
logical_cores: int
min_frequency: float
max_frequency: float
def __init__(self, name: str, physical_cores: int, logical_cores: int, min_frequency: float, max_frequency: float):
"""
Args:
name: name of CPU unit
physical_cores: number of physical cores available on CPU
logical_cores: number of logical cores available on CPU
min_frequency: minimal clock frequency
max_frequency: maximal clock frequency
"""
self.name = name
self.physical_cores = physical_cores
self.logical_cores = logical_cores
self.min_frequency = min_frequency
self.max_frequency = max_frequency
@staticmethod
def from_host():
"""
Create CPU object from host data
Returns:
CPU object
"""
return CPU(
name=cpuinfo.get_cpu_info()["brand_raw"],
physical_cores=psutil.cpu_count(logical=False),
logical_cores=psutil.cpu_count(logical=True),
min_frequency=psutil.cpu_freq().min,
max_frequency=psutil.cpu_freq().max,
)
class Memory(DataObject):
"""
Memory data object
"""
size: float
def __init__(self, size: float):
"""
Args:
size: RAM memory size in MB
"""
self.size = size
@staticmethod
def from_host():
"""
Create Memory object from host data
Returns:
Memory object
"""
svm = psutil.virtual_memory()
return Memory(size=svm.total)
class SystemInfo(DataObject):
"""
System Information data object
"""
system: str
cpu: CPU
memory: Memory
gpu: GPU
def __init__(self, system: str, cpu: CPU, memory: Memory, gpu: GPU):
"""
Args:
system: name of operating system
cpu: CPU info
memory: Memory info
gpu: GPU info
"""
self.system = system
self.cpu = cpu
self.memory = memory
self.gpu = gpu
@staticmethod
def from_host():
"""
Create SystemInfo object from host data
Returns:
SystemInfo object
"""
system = platform.platform()
gpu = GPU.from_host()
memory = Memory.from_host()
cpu = CPU.from_host()
return SystemInfo(system=system, cpu=cpu, gpu=gpu, memory=memory)
class Checkpoint(DataObject):
"""
Checkpoint data object
"""
def __init__(self, name: str, url: str, path: Union[str, pathlib.Path]):
"""
Args:
name: Name of checkpoint
path: Location of checkpoint on local hardware
"""
self.name = name
self.url = url
self.path = pathlib.Path(path)
class Dataset(DataObject):
"""
Dataset data object
"""
def __init__(self, name: str):
"""
Args:
name: Name of dataset
"""
self.name = name
class Task(DataObject):
"""
Task data object to store build information
"""
model_name: str
framework: str
batching: str
started_at: int
ended_at: Optional[int]
container_version: str
checkpoints: Dict[str, Checkpoint]
datasets: Dict[str, Dataset]
datasets_dir: Optional[Union[str, pathlib.Path]]
experiments: List[Experiment]
system_info: SystemInfo
triton_container_image: Optional[str]
triton_custom_operations: Optional[str]
performance_tool: PerformanceTool
filename: str = "task.yaml"
results_dir: str = "results"
checkpoints_dir: str = "checkpoints"
def __init__(
self,
model_name: str,
ensemble_model_name: Optional[str],
framework: str,
batching: str,
container_version: str,
checkpoints: Dict,
datasets: Dict,
experiments: List,
system_info: SystemInfo,
started_at: int,
datasets_dir: Optional[Union[str, pathlib.Path]] = None,
ended_at: Optional[int] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: str = Triton.LOAD_MODE.EXPLICIT,
measurement_steps_offline: int = 8,
measurement_steps_online: int = 32,
performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER,
):
"""
Args:
model_name: Name of model
framework: Model framework
container_version: Container version used in task
checkpoints: List of checkpoints
datasets: List of datasets
datasets_dir: Directory where datasests are stored
experiments: List of experiments run as part of task
system_info: information about node on which experiment was executed
started_at: Time when task has started
ended_at: Time when task has ended
triton_container_image: Custom Triton Container Image used for task
triton_custom_operations: Custom operation library path
triton_load_model_method: Method how models are loaded on Triton
measurement_steps_offline: Number of measurement steps in offline performance stage
measurement_steps_online: Number of measurement steps in online performance stage
performance_tool: Performance Tool used for generating results
"""
self.started_at = started_at
self.ended_at = ended_at
self.model_name = model_name
self.ensemble_model_name = ensemble_model_name
self.framework = framework
self.container_version = container_version
self.checkpoints = checkpoints
self.datasets = datasets
self.datasets_dir = pathlib.Path(datasets_dir)
self.experiments = experiments
self.system_info = system_info
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.triton_load_model_method = triton_load_model_method
self.measurement_steps_offline = measurement_steps_offline
self.measurement_steps_online = measurement_steps_online
self.logs_dir = pathlib.Path("/var/logs")
self.batching = batching
self.performance_tool = performance_tool
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.ended_at = int(datetime.utcnow().timestamp())
def to_file(self, file_path: Union[pathlib.Path, str]):
"""
Store task data to YAML file
Args:
file_path: path to file where task data has to be saved
Returns:
None
"""
task_data = self.to_dict()
with open(file_path, "w") as f:
yaml.dump(task_data, f, Dumper=CustomDumper, width=240, sort_keys=False)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/task.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import signal
import sys
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .exceptions import RunnerException
from .executor import Executor
from .finalizer import Finalizer
from .logger import LOGGER, log_format
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .triton import Triton
class Runner:
"""
Runner class. Main entrypoint to performing task and experiments
"""
WORKSPACE = pathlib.Path.cwd()
EXECUTOR_WORKSPACE = WORKSPACE / "runner_workspace"
def __init__(
self,
pipeline: Pipeline,
config: Config,
executor_cls: Type[Executor],
maintainer_cls: Type[Maintainer],
preparer_cls: Type[Preparer],
finalizer_cls: Type[Finalizer],
devices: List[str] = None,
log_level: int = logging.INFO,
):
self._pipeline = pipeline
self._config = config
self._pipeline = pipeline
self._config = config
self._preparer = preparer_cls()
self._finalizer = finalizer_cls()
self._devices = devices or ["0"]
self._log_level = log_level
self._logs_dir = self.EXECUTOR_WORKSPACE / "logs"
self._log_file_path = self._logs_dir / "runner.log"
self._maintainer = maintainer_cls()
self._executor = executor_cls(
workspace=self.EXECUTOR_WORKSPACE,
maintainer=self._maintainer,
pipeline=pipeline,
devices=devices,
)
signal.signal(signal.SIGINT, self._catch)
self._logs_dir.mkdir(parents=True, exist_ok=True)
def start(self) -> None:
"""
Start runner
Returns:
None
"""
self._setup_logger()
task = self._preparer.exec(
workspace=self.EXECUTOR_WORKSPACE,
config=self._config,
pipeline=self._pipeline,
logs_dir=self._logs_dir,
maintainer=self._maintainer,
triton=Triton(),
)
results = []
try:
for result in self._executor.start(task):
results.append(result)
except RunnerException as e:
LOGGER.error(f"Error running task: {str(e)}")
finally:
self._executor.stop()
self._finalizer.exec(workspace=self.EXECUTOR_WORKSPACE, task=task, results=results)
def _catch(self, signum, frame):
"""
SIGINT catcher. Stops executor on any sigterm.
Args:
signum: signal id
frame: signal frame
"""
self._executor.stop()
sys.exit(0)
def _setup_logger(self) -> None:
"""
Add file handle for logger
Returns:
None
"""
file = logging.FileHandler(self._log_file_path)
formatter = logging.Formatter(log_format)
file.setFormatter(formatter)
LOGGER.addHandler(file)
LOGGER.setLevel(level=self._log_level)
LOGGER.initialize(file_path=self._log_file_path)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/runner.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Framework, Paths
class Triton:
"""
Triton Inference Server helper class
"""
image = "nvcr.io/nvidia/tritonserver"
tag = "py3"
class LOAD_MODE:
"""
Loading mode available in Triton
"""
POLL = "poll"
EXPLICIT = "explicit"
@staticmethod
def container_image(container_version: str):
"""
Container image based on version
Args:
container_version: Version of container to be used
Returns:
Image name with tag
"""
return f"{Triton.image}:{container_version}-{Triton.tag}"
@staticmethod
def command(
framework: str,
repository_path: str,
strict_mode: bool = False,
poll_model: bool = False,
metrics: bool = False,
verbose: bool = False,
):
"""
Command to run Triton Inference Server inside container
Args:
framework: Framework used for model
repository_path: Path to model repository
strict_mode: Flag to use strict model config
poll_model: Poll model
metrics: Enable GPU metrics (disable for MIG)
verbose: Use verbose mode logging
Returns:
"""
triton_command = f"tritonserver --model-store={repository_path}"
if poll_model:
triton_command += " --model-control-mode=poll --repository-poll-secs 5"
else:
triton_command += " --model-control-mode=explicit"
if not strict_mode:
triton_command += " --strict-model-config=false"
if not metrics:
triton_command += " --allow-metrics=false --allow-gpu-metrics=false"
if verbose:
triton_command += " --log-verbose 1"
if framework in (Framework.TensorFlow1, Framework.TensorFlow2):
version = 1 if framework == Framework.TensorFlow1 else 2
triton_command += f" --backend-config=tensorflow,version={version}"
return triton_command
@staticmethod
def library_path(framework: str):
"""
Obtain custom library path for framework
Args:
framework: Framework used for model
Returns:
Path to additional libraries needed by framework
"""
paths = {
Framework.PyTorch.name: "/opt/tritonserver/backends/pytorch",
Framework.TensorFlow1.name: "/opt/tritonserver/backends/tensorflow1",
Framework.TensorFlow2.name: "/opt/tritonserver/backends/tensorflow2",
}
return paths[framework]
@staticmethod
def custom_library_path_remote() -> str:
"""
Path to custom library mounted in Triton container
Returns:
Path to shared library with custom operations
"""
return f"{Paths.LIBRARIES_PATH}/libcustomops.so"
@staticmethod
def custom_library_path_local(libs_dir: pathlib.Path) -> pathlib.Path:
"""
Path to custom library in local path
Args:
libs_dir: path to libraries directory
Returns:
Path to shared library with custom operations
"""
return libs_dir / "libcustomops.so"
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/triton.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, List, Optional, Union
import yaml
from ..deployment_toolkit.core import PerformanceTool
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .configuration import Configuration
from .core import DataObject
from .triton import Triton
class Checkpoint(DataObject):
"""
Checkpoint data placeholder
"""
name: str
url: str
def __init__(self, name: str, url: str):
self.name = name
self.url = url
class Dataset(DataObject):
"""
Dataset data placeholder
"""
name: str
def __init__(self, name: str):
self.name = name
class Config(DataObject):
"""
Configuration object for runner experiments
"""
def __init__(
self,
model_name: str,
framework: str,
container_version: str,
batching: str,
configurations: List[Configuration],
ensemble_model_name: str = None,
datasets_dir: str = "datasets",
datasets: List[Dataset] = None,
checkpoints: List[Checkpoint] = None,
triton_dockerfile: Optional[str] = None,
triton_container_image: Optional[str] = None,
triton_custom_operations: Optional[str] = None,
triton_load_model_method: Optional[str] = Triton.LOAD_MODE.EXPLICIT,
measurement_steps_offline: int = 8,
measurement_steps_online: int = 32,
performance_tool: PerformanceTool = PerformanceTool.MODEL_ANALYZER,
):
"""
Args:
model_name: Name of model
framework: Framework used to create model
container_version: Version of Triton Inference Server container used for evaluation
batching: Mark if model support batching
configurations: List of experiments configurations
datasets_dir: Directory where datasets are stored
datasets: Datasets used for conversion/export
checkpoints: Checkpoints with trained model
triton_load_model_method: Triton Inference Server model loading mode
triton_dockerfile: Dockerfile for Triton to build custom image
triton_container_image: Custom image used for Triton Server - leave empty to use default or built from Dockerfile
triton_custom_operations: Path where custom operation library is stored
measurement_steps_offline: Number of measurement steps in offline performance stage
measurement_steps_online: Number of measurement steps in online performance stage
performance_tool: Performance Tool used for generating results
"""
self.model_name = model_name
self.ensemble_model_name = ensemble_model_name
self.framework = framework
self.container_version = container_version
self.batching = batching
self.configurations = configurations
self.datasets_dir = datasets_dir
self.datasets = datasets
self.checkpoints = checkpoints
self.triton_load_model_method = triton_load_model_method
self.triton_dockerfile = triton_dockerfile
self.triton_container_image = triton_container_image
self.triton_custom_operations = triton_custom_operations
self.measurement_steps_offline = measurement_steps_offline
self.measurement_steps_online = measurement_steps_online
self.performance_tool = performance_tool
def to_file(self, file_path: Union[pathlib.Path, str]) -> None:
"""
Save config data to file
Args:
file_path: path to file where config data is should be stored
Returns:
None
"""
data = self.to_dict()
with open(file_path, "w") as f:
yaml.safe_dump(data, f)
@staticmethod
def from_dict(config_data: Dict):
"""
Create configuration object from data stored in dictionary
Args:
config_data: dictionary with config data
Returns:
Config object
"""
configurations = []
for configuration_data in config_data["configurations"]:
configuration = Configuration(**configuration_data)
configurations.append(configuration)
checkpoints = []
for checkpoint_data in config_data.get("checkpoints", []):
checkpoint = Checkpoint(
name=checkpoint_data["name"],
url=checkpoint_data["url"],
)
checkpoints.append(checkpoint)
datasets = []
for dataset_data in config_data.get("datasets", []):
dataset = Dataset(name=dataset_data["name"])
datasets.append(dataset)
return Config(
model_name=config_data["model_name"],
framework=config_data["framework"],
container_version=config_data["container_version"],
batching=config_data["batching"],
configurations=configurations,
checkpoints=checkpoints,
datasets=datasets,
datasets_dir=config_data.get("datasets_dir"),
triton_load_model_method=config_data["triton_load_model_method"],
triton_dockerfile=config_data.get("triton_dockerfile"),
triton_custom_operations=config_data.get("triton_custom_operations"),
measurement_steps_offline=config_data["measurement_steps_offline"],
measurement_steps_online=config_data["measurement_steps_online"],
performance_tool=PerformanceTool(config_data["performance_tool"]),
)
@staticmethod
def from_file(file_path: Union[pathlib.Path, str]):
"""
Load experiment data from file
Args:
file_path: path to file where experiment data is stored
Returns:
Experiment object
"""
with open(file_path) as f:
config_data = yaml.safe_load(f)
return Config.from_dict(config_data)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/config.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
import shutil
import urllib.request
from typing import Any, Callable
from zipfile import ZipFile
from retrying import retry
from tqdm.auto import tqdm
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .logger import LOGGER
from .exceptions import RunnerException
def unzip(checkpoint_path: pathlib.Path, archive_path: pathlib.Path) -> None:
"""
Unzip acrhive to provided path
Args:
checkpoint_path: Path where archive has to be unpacked
archive_path: Path to archive Archive filename
Returns:
None
"""
LOGGER.info(f"Creating directory for checkpoint: {checkpoint_path.name}")
checkpoint_path.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Unpacking checkpoint files {checkpoint_path}")
with ZipFile(archive_path, "r") as zf:
zf.extractall(path=checkpoint_path)
LOGGER.info("done")
LOGGER.info(f"Removing zip file: {archive_path}")
archive_path.unlink()
LOGGER.info("done")
def download_progress(t: Any) -> Callable:
"""
Progress bar
Args:
t: progress
Returns:
Callable
"""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, tsize: int = None):
if tsize not in (None, -1):
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to
@retry(stop_max_attempt_number=3)
def download(checkpoint_url: str, checkpoint_path: pathlib.Path) -> None:
"""
Download checkpoint from given url to provided path
Args:
checkpoint_url: Url from which checkpoint has to be downloaded
checkpoint_path: Path where checkpoint has to be stored
Returns:
None
"""
LOGGER.info(f"Downloading checkpoint from {checkpoint_url}")
with tqdm(unit="B") as t:
reporthook = download_progress(t)
result = urllib.request.urlretrieve(checkpoint_url, reporthook=reporthook)
filename = result[0]
LOGGER.info(f"Checkpoint saved in {filename}")
file_path = pathlib.Path(filename)
if not file_path.is_file() and not file_path.is_dir():
raise RunnerException(f"Checkpoint {filename} does not exist")
LOGGER.info(f"Moving checkpoint to {checkpoint_path.parent}")
shutil.move(file_path, checkpoint_path.parent / file_path.name)
LOGGER.info("done")
archive_path = checkpoint_path.parent / file_path.name
unzip(checkpoint_path, archive_path)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/downloader.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Dict, List
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .experiment import ExperimentResult
from .logger import LOGGER
from .stages import ResultsType
from .summary import load_results, save_summary
from .task import Task
class Finalizer(abc.ABC):
@abc.abstractmethod
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
pass
class ExperimentFinalizer(Finalizer):
"""
Public runner finalizer object.
"""
def exec(self, workspace: pathlib.Path, task: Task, results: List[ExperimentResult]):
results_path = workspace / task.results_dir
self._generate_summary(results_path, results)
self._finalize_task(results_path, task)
def _finalize_task(self, results_path: pathlib.Path, task: Task) -> None:
"""
Finalize task information
Args:
task: Task object
Returns:
None
"""
task.end()
file_path = results_path / task.filename
LOGGER.debug(f"Saving task details to file {file_path}")
task.to_file(file_path)
LOGGER.debug("Done")
LOGGER.info(f"Task details and results stored in {results_path}")
def _generate_summary(self, results_path: pathlib.Path, experiment_results: List[ExperimentResult]):
"""
Generate summary for results collected in all experiments
Args:
results_path: Path where results should be stored
experiment_results: Results collected from experiments
Returns:
"""
performance_offline_results = list()
performance_online_results = list()
results_mapping = {
ResultsType.TRITON_PERFORMANCE_OFFLINE: performance_offline_results,
ResultsType.TRITON_PERFORMANCE_ONLINE: performance_online_results,
}
self._collect_summary_results(experiment_results, results_mapping)
self._prepare_final_results(results_path, results_mapping)
def _collect_summary_results(self, experiment_results: List[ExperimentResult], results_mapping: Dict):
for experiment_result in experiment_results:
experiment = experiment_result.experiment
for result_type, result_path in experiment_result.results.items():
if not result_path.is_file() and not result_path.is_dir():
raise FileNotFoundError(f"Expected file {result_path} not found")
LOGGER.debug(f"Found {result_type} in {result_path} file.")
if result_type not in results_mapping:
LOGGER.debug(f"Results {result_type} for {experiment.experiment_id} are ignored in final summary.")
return
LOGGER.debug(f"Collecting {result_type} results from {result_path} for summary")
result = load_results(
results_path=result_path,
parameters=experiment.parameters,
result_type=result_type,
)
results_mapping[result_type].extend(result)
LOGGER.debug("Done.")
def _prepare_final_results(self, results_path: pathlib.Path, results_mapping: Dict) -> None:
"""
Prepare summary files for offline and online performance
Args:
results_path: Path where results should be stored
results_mapping: Mapping with results type and collected results for given stage
Returns:
None
"""
for results_type, results in results_mapping.items():
save_summary(
result_type=results_type,
results=results,
summary_dir=results_path,
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/finalizer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .stages import Stage
class CommandsExporter:
"""
Command exported to BASH scripts
"""
def __init__(self, scripts_dir: pathlib.Path):
"""
Args:
scripts_dir: Paths where scripts should be stored
"""
self._scripts_dir = scripts_dir
def export(self, stage: Stage) -> Command:
"""
Export stage commands to script and return new command to execute
Args:
stage: Stage object with commands
Returns:
Command object with script execution command
"""
filename = self._get_filename(stage.label)
file_path = self._scripts_dir / filename
with open(file_path, "w+") as stagefile:
stagefile.write("set -x\n")
stagefile.write("set -e\n")
stagefile.write("export PYTHONUNBUFFERED=1\n")
stagefile.write("export PYTHONPATH=`pwd`\n")
for command in stage.commands:
for line in str(command).split("\n"):
stagefile.write(str(line.rstrip()))
stagefile.write("\n")
stagefile.write("\n")
result = os.system(f'ex +"set syn=sh" +"norm gg=G" -cwq {file_path}')
if result != 0:
raise RunnerException(f"Failed running {filename} script formatting. Exit code {result}")
command = Command(f"bash -xe {file_path.as_posix()}")
return command
def _get_filename(self, label: str):
"""
Generate filename for script based on label
Args:
label: String with stage label
Returns:
String with script filename
"""
filename = label.replace(" ", "_").lower()
filename = f"{filename}.sh"
return filename
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/exporter.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from enum import Enum
from typing import Any, Dict, List
import yaml
class CustomDumper(yaml.Dumper):
"""
Custom YAML dumper to avoid craeting aliases
"""
def ignore_aliases(self, data: Dict) -> bool:
return True
class Paths:
"""
Paths mapping inside Triton Container
"""
MODEL_REPOSITORY_PATH = "/mnt/triton-models"
LIBRARIES_PATH = "/mnt/libs"
class Framework(Enum):
"""
Supported frameworks
"""
TensorFlow1 = "TensorFlow1"
TensorFlow2 = "TensorFlow2"
PyTorch = "PyTorch"
class Command:
"""Represents wrapper of raw string command"""
def __init__(self, data: str):
"""
Store command data
Args:
data: string with bash commands to execute
"""
self._data = data
def __str__(self) -> str:
"""
String object representation
Returns:
String
"""
return self._data
@dataclasses.dataclass
class Measurement:
offline_batch_sizes: List[int]
offline_concurrency: List[int]
online_batch_sizes: List[int]
online_concurrency: List[int]
min_shapes_batch: int
max_shapes_batch: int
opt_shapes_batch: int
class DataObject:
"""
Data object representation handling recursive transformation from object to dict
"""
READ_ONLY = set()
def to_dict(self) -> Dict:
"""
Represent object as dictionary
Returns:
Dict
"""
data = {}
filtered_data = {key: value for key, value in self.__dict__.items() if key not in self.READ_ONLY}
for key, value in filtered_data.items():
data[key] = self._convert_value(value)
return data
def _convert_value(self, value: Any) -> Any:
"""
Convert value based on its type
Args:
value: variable to convert
Returns:
Converted object
"""
if isinstance(value, DataObject):
value = value.to_dict()
elif isinstance(value, dict):
value = self._from_dict(value)
elif isinstance(value, list):
value = self._from_list(value)
elif isinstance(value, Enum):
value = value.value
elif isinstance(value, pathlib.Path):
value = value.as_posix()
return value
def _from_dict(self, values: Dict) -> Any:
"""
Convert dictionary values
Args:
values: dictionary with values
Returns:
Any
"""
data = {}
for key, value in values.items():
data[key] = self._convert_value(value)
return data
def _from_list(self, values: List) -> Any:
"""
Convert list of values
Args:
values: list with values
Returns:
Any
"""
items = []
for value in values:
item = self._convert_value(value)
items.append(item)
return items
AVAILABLE_FRAMEWORKS = [f.value for f in Framework]
class Batching(Enum):
DISABLED = "disabled"
STATIC = "static"
DYNAMIC = "dynamic"
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/core.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pathlib
import coloredlogs
class Logger(logging.Logger):
def __init__(self, name, level=logging.NOTSET):
super().__init__(name, level=level)
self._file_path = None
def initialize(self, file_path: pathlib.Path):
self._file_path = file_path
def write(self, log: str):
if not self._file_path:
return
with open(self._file_path, "+a") as file:
file.write(log)
LOGGER = Logger("runner")
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(format=log_format)
coloredlogs.install(
level=logging.INFO,
fmt=log_format,
logger=LOGGER,
field_styles={
"asctime": {"color": "green"},
"hostname": {"color": "magenta"},
"levelname": {"bold": True, "color": "blue"},
"name": {"color": "blue"},
"programname": {"color": "cyan"},
"username": {"color": "yellow"},
},
reconfigure=True,
)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/logger.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Type
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .config import Config
from .executor import Executor
from .finalizer import Finalizer
from .maintainer import Maintainer
from .pipeline import Pipeline
from .preparer import Preparer
from .runner import Runner
class RunnerProxy:
"""
Runner proxy to configure original runner
"""
maintainer_cls: Type[Maintainer] = None
executor_cls: Type[Executor] = None
preparer_cls: Type[Preparer] = None
finalizer_cls: Type[Finalizer] = None
def __init__(self, config: Config, pipeline: Pipeline, devices: List[str]):
"""
RunnerProxy constructor
Args:
config: Config object
pipeline: Pipeline to evaluate
devices: List of devices to use for tests
"""
self._runner = Runner(
config=config,
pipeline=pipeline,
devices=devices,
maintainer_cls=self.maintainer_cls,
executor_cls=self.executor_cls,
preparer_cls=self.preparer_cls,
finalizer_cls=self.finalizer_cls,
)
def start(self) -> None:
"""
Runner interface
"""
self._runner.start()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/runner_proxy.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import List, Optional, Tuple, Union
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
class ResultsType:
"""
Results types generated by runner
"""
TRITON_PERFORMANCE_OFFLINE = "triton_performance_offline"
TRITON_PERFORMANCE_ONLINE = "triton_performance_online"
class Stage:
"""
Stage definition
"""
label: str
commands: List[Command]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
commands: Union[Tuple[str, ...], List[str]],
result_path: Optional[str] = None,
result_type: Optional[str] = None,
):
"""
Args:
commands: List or Tuple of commands provided as raw string
result_path: Path to results file generated by stage
result_type: Type of results generated by stage
"""
if type(commands) not in [tuple, list]:
raise ValueError("""Incorrect type of commands list. Please, provide list of commands as tuple.""")
self.commands = list(map(lambda command: Command(data=command), commands))
self.result_path = result_path
self.result_type = result_type
class ExportStage(Stage):
label = "Export Model"
class ConversionStage(Stage):
label = "Convert Model"
class DeployStage(Stage):
label = "Deploy Model"
class CorrectnessStage(Stage):
label = "Model Correctness"
class TritonPreparePerformanceProfilingDataStage(Stage):
label = "Prepare Triton Profiling Data"
class TritonPerformanceOfflineStage(Stage):
label = "Triton Performance Offline"
class TritonPerformanceOnlineStage(Stage):
label = "Triton Performance Online"
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/stages.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dataclasses
import pathlib
from datetime import datetime
from typing import Any, Dict, Optional
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import DataObject
class ExperimentStatus(object):
"""
Experiment status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class StageStatus:
"""
Stages status flags object
"""
SUCCEED = "Succeed"
FAILED = "Failed"
class Stage(DataObject):
"""
Stage data object
"""
name: str
status: str
started_at: Optional[int]
ended_at: Optional[int]
result_path: Optional[str]
result_type: Optional[str]
def __init__(
self,
name: str,
result_path: Optional[str],
result_type: Optional[str],
status: str = StageStatus.FAILED,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
):
"""
Args:
name: name of stage
result_path: path where results file is stored
result_type: type of results
status: success/fail status
started_at: time when stage has started
ended_at: time when stage has ended
"""
self.name = name
self.status = status
self.started_at = started_at
self.ended_at = ended_at
self.result_path = result_path
self.result_type = result_type
def start(self) -> None:
"""
Update stage execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update stage execution info at end
Returns:
None
"""
self.status = StageStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
class Experiment(DataObject):
"""
Experiment data object
"""
experiment_id: int
parameters: Dict
stages: Dict[str, Stage]
results: Dict[str, str]
status: str
checkpoint_variant: str
started_at: Optional[int]
ended_at: Optional[int]
def __init__(
self,
experiment_id: int,
parameters: Dict,
stages: Dict[str, Stage],
results: Dict[str, str],
checkpoint: str,
started_at: Optional[int] = None,
ended_at: Optional[int] = None,
status: str = ExperimentStatus.FAILED,
):
"""
Args:
experiment_id: experiment identifier
parameters: dictionary with experiment configuration
stages: dictionary with stages run in experiment
results: mapping between results types and location where are stored
started_at: time when experiment has started
ended_at: time when experiment has ended
status: experiment success/fail information
checkpoint: Checkpoint used for experiment
"""
self.experiment_id = experiment_id
self.started_at = started_at
self.ended_at = ended_at
self.parameters = parameters
self.stages = stages
self.status = status
self.checkpoint = checkpoint
self.results = results
self.results_dir = f"experiment_{experiment_id}"
def start(self) -> None:
"""
Update experiment execution info at start
Returns:
None
"""
self.started_at = int(datetime.utcnow().timestamp())
def end(self) -> None:
"""
Update experiment execution info at end
Returns:
None
"""
self.status = ExperimentStatus.SUCCEED
self.ended_at = int(datetime.utcnow().timestamp())
@dataclasses.dataclass
class Status:
state: ExperimentStatus
message: str
@dataclasses.dataclass
class ExperimentResult:
"""
Experiment result object
"""
status: Status
experiment: Experiment
results: Dict[str, pathlib.Path]
payload: Dict[str, Any] = dataclasses.field(default_factory=dict)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/experiment.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import json
import pathlib
from typing import Dict, List, Union
# method from PEP-366 to support relative import in executed modules
import yaml
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.report import save_results, sort_results
from .logger import LOGGER
def save_summary(result_type: str, results: List, summary_dir: pathlib.Path) -> None:
"""
Create file with summary for results of given type
Args:
result_type: Type of results to dump
results: Results data
summary_dir: Path where results should be stored
Returns:
None
"""
if len(results) == 0:
LOGGER.warning(f"No {result_type} results found.")
return
results = sort_results(results=results)
kind_file = summary_dir / f"{result_type}_summary.csv"
save_results(filename=kind_file.as_posix(), data=results, formatted=True)
LOGGER.info(f"Summary for {result_type} stored in {kind_file}")
def load_results(*, results_path: Union[pathlib.Path, str], result_type: str, parameters: Dict) -> List:
"""
Update results
Args:
results_path: Path to file or directory from which data should be read
result_type: type of results
parameters: Parameters used in experiment which generated results
Returns:
List of result rows
"""
LOGGER.debug(f"Loading {result_type} from {results_path} for summary")
results_path = pathlib.Path(results_path)
if results_path.is_file():
files = [results_path]
elif results_path.is_dir():
files = list(results_path.iterdir())
else:
LOGGER.debug(f"Unable to load file: {results_path}. Generating empty rows.")
data = [{}]
return data
if any([file.name.endswith(".ckpt") for file in files]):
model_analyzer_metrics = results_path / "metrics-model-inference.csv"
files = [model_analyzer_metrics]
else:
files = [file for file in files if file.name.endswith(".csv")]
results = list()
parameters_cpy = {key: value for key, value in parameters.items() if key != "batch"}
for file in files:
if file.suffix == ".csv":
data = _generate_data_from_csv(file=file)
elif file.suffix == ".json":
data = _generate_data_from_json(file=file)
elif file.suffix == ".yaml":
data = _generate_data_from_yaml(file=file)
else:
raise ValueError(f"Unsupported file extension: {file.suffix}")
for item in data:
result = {**parameters_cpy, **item}
results.append(result)
LOGGER.debug(f"Loading done. Collected {len(results)} results.")
return results
def _normalize_key(*, key: str) -> str:
"""
Normalize key
Args:
key: Key to normalize
Returns:
Normalized string
"""
key = "_".join(key.split(sep=" "))
key = key.lower()
return key
def _normalize_keys(*, data: Dict) -> Dict:
"""
Normalize keys in dictionary
Args:
data: Dictionary to normalize
Returns:
Normalized dictionary
"""
keys = {_normalize_key(key=key): value for key, value in data.items()}
return keys
def _generate_data_from_csv(*, file: Union[pathlib.Path, str]) -> List[Dict]:
"""
Generate result rows from CSV file
Args:
file: CSV file path
Returns:
List of rows
"""
LOGGER.debug(f"Reading data from {file}")
filtered_rows: List[Dict] = []
with open(file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.debug("done")
return filtered_rows
def _generate_data_from_json(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as json_file:
file_data = json.load(json_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
def _generate_data_from_yaml(file: pathlib.Path) -> List[Dict]:
LOGGER.info(f"Reading data from {file}")
filtered_rows: List[Dict] = list()
with open(file, "r") as yaml_file:
file_data = yaml.safe_load(yaml_file)
if not isinstance(file_data, list):
file_data = [file_data]
for r in file_data:
r = _normalize_keys(data=r)
filtered_row = {k: v for k, v in r.items()}
filtered_rows.append(filtered_row)
LOGGER.info("done")
return filtered_rows
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/summary.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import shutil
import subprocess
from enum import Enum
from typing import Any
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .core import Command
from .exceptions import RunnerException
from .logger import LOGGER
def format_env_key(s: str):
"""
Format environmental variable key
Args:
s: String to format
Returns:
Upper cased string
"""
return s.upper()
def format_env_value(value: Any) -> str:
"""
Format environment variable value
Args:
value: value to be formatted
Returns:
Formatted value as a string
"""
value = value if not isinstance(value, Enum) else value.value
value = value if type(value) not in [list, tuple] else ",".join(map(str, value))
value = str(value)
return value
def get_result_path(result_path: str) -> str:
"""
Map result path when different variants passed ex. with env variable in path
Args:
result_path: Path to result file
Returns:
str
"""
for env_var, val in os.environ.items():
result_path = result_path.replace(f"${{{env_var}}}", val)
if result_path.startswith("/"):
return result_path
if result_path.startswith("./"):
result_path = result_path[2:]
return result_path
def clean_directory(directory: pathlib.Path) -> None:
"""
Remove all files and directories from directory
Args:
directory: Path to directory which should be cleaned
Returns:
None
"""
LOGGER.debug(f"Cleaning {directory.as_posix()}")
if not directory.is_dir():
LOGGER.warning(f"{directory.name} is not a directory.")
return
for item in os.listdir(directory):
item_path = directory / item
if item_path.is_dir():
LOGGER.debug(f"Remove dir {item_path.as_posix()}")
shutil.rmtree(item_path.as_posix())
elif item_path.is_file():
LOGGER.debug(f"Remove file: {item_path.as_posix()}")
item_path.unlink()
else:
LOGGER.warning(f"Cannot remove item {item_path.name}. Not a file or directory.")
def exec_command(command: Command) -> None:
"""
Execute command
Args:
command: Command to run
"""
try:
process = subprocess.Popen(
[str(command)],
shell=True,
start_new_session=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
encoding="utf-8",
)
while True:
output = process.stdout.readline()
if output == "" and process.poll() is not None:
break
if output:
print(output.rstrip())
LOGGER.write(output)
result = process.poll()
if result != 0:
raise RunnerException(f"Command {command} failed with exit status: {result}")
except subprocess.CalledProcessError as e:
raise RunnerException(f"Running command {e.cmd} failed with exit status {e.returncode} : {e.output}")
def measurement_env_params(measurement):
params = {}
for key, value in measurement.__dict__.items():
param = f"{measurement.__class__.__name__.upper()}_{key.upper()}"
params[param] = " ".join(list(map(lambda val: str(val), value))) if isinstance(value, list) else int(value)
return params
def offline_performance_configuration(steps, max_batch_size):
step = int(max_batch_size) // steps
batch_sizes = [step * idx for idx in range(1, steps + 1)]
concurrency = [1]
return batch_sizes, concurrency
def online_performance_configuration(steps, max_batch_size, number_of_model_instances):
max_total_requests = 2 * int(max_batch_size) * int(number_of_model_instances)
max_concurrency = min(128, max_total_requests)
step = max(1, max_concurrency // steps)
min_concurrency = step
batch_sizes = [max(1, max_total_requests // max_concurrency)]
concurrency = list(range(min_concurrency, max_concurrency + 1, step))
return batch_sizes, concurrency
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Dict, Tuple
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .stages import (
ConversionStage,
DeployStage,
ExportStage,
ResultsType,
TritonPerformanceOfflineStage,
TritonPerformanceOnlineStage,
TritonPreparePerformanceProfilingDataStage,
)
class Pipeline:
"""
Definition of stages that has to be executed before and during experiments
"""
# Stages to execute as part of single experiment
_experiment_stages = [
ExportStage.label,
ConversionStage.label,
DeployStage.label,
TritonPreparePerformanceProfilingDataStage.label,
TritonPerformanceOfflineStage.label,
TritonPerformanceOnlineStage.label,
]
def __init__(self):
"""
Initialize pipeline
"""
self._stages: Dict = dict()
def model_export(self, commands: Tuple[str, ...]) -> None:
"""
Model export stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ExportStage(commands=commands)
self._stages[stage.label] = stage
def model_conversion(self, commands: Tuple[str, ...]) -> None:
"""
Model conversion stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = ConversionStage(commands=commands)
self._stages[stage.label] = stage
def model_deploy(self, commands: Tuple[str, ...]) -> None:
"""
Model deployment stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = DeployStage(commands=commands)
self._stages[stage.label] = stage
def triton_prepare_performance_profiling_data(self, commands: Tuple[str, ...]) -> None:
"""
Model profiling data creation stage
Args:
commands: Commands to be executed as part of stage
Returns:
None
"""
stage = TritonPreparePerformanceProfilingDataStage(commands=commands)
self._stages[stage.label] = stage
def triton_performance_offline_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance offline test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOfflineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_OFFLINE,
)
self._stages[stage.label] = stage
def triton_performance_online_tests(self, commands: Tuple[str, ...], result_path: str) -> None:
"""
Model performance online test stage
Args:
commands: Commands to be executed as part of stage
result_path: Path where results file is stored
Returns:
None
"""
stage = TritonPerformanceOnlineStage(
commands=commands,
result_path=result_path,
result_type=ResultsType.TRITON_PERFORMANCE_ONLINE,
)
self._stages[stage.label] = stage
def stages(self):
"""
Generate stages which should be run per experiment
Returns:
Generator with stages object
"""
for stage_name in self._experiment_stages:
stage = self._stages.get(stage_name)
if not stage:
continue
yield stage
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/pipeline.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class RunnerException(Exception):
"""
Runner Exception
"""
def __init__(self, message: str):
self._message = message
def __str__(self):
return self._message
@property
def message(self):
"""Get the exception message.
Returns
-------
str
The message associated with this exception, or None if no message.
"""
return self._message
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/exceptions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import pathlib
import shutil
import traceback
from typing import Dict, List, Optional
from colorama import Fore
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..deployment_toolkit.core import BackendAccelerator, Precision
from .core import Batching, Measurement, Paths
from .exceptions import RunnerException
from .experiment import ExperimentResult, ExperimentStatus, Status
from .exporter import CommandsExporter
from .logger import LOGGER
from .maintainer import Container, Maintainer
from .pipeline import Pipeline
from .stages import Stage
from .task import Experiment, Task
from .triton import Triton
from .utils import (
clean_directory,
exec_command,
format_env_key,
format_env_value,
get_result_path,
measurement_env_params,
offline_performance_configuration,
online_performance_configuration,
)
class Executor:
"""
Experiments executor
"""
def __init__(
self,
workspace: pathlib.Path,
maintainer: Maintainer,
pipeline: Pipeline,
devices: List[str] = None,
):
"""
Initialize experiments executor
Args:
workspace: Path to workspace to store artifacts
maintainer: maintainer for running commands
pipeline: pipeline definition
devices: List of devices on which Triton Inference Server will be executed
"""
self._maintainer = maintainer
self._pipeline = pipeline
self._devices = devices or ["0"]
self._workspace = workspace
self._executor_workspace = workspace / "executor"
self._shared_dir = self._executor_workspace / "shared"
self._triton_models_repository_dir = self._executor_workspace / "triton_models"
self._scripts_dir = self._executor_workspace / "scripts"
self._libraries_dir = self._executor_workspace / "libs"
self._exporter = CommandsExporter(self._scripts_dir)
self._triton_container: Optional[Container] = None
def start(self, task: Task):
"""
Process the task and execute experiments.
"""
self._create_dirs()
total_experiment = len(task.experiments)
LOGGER.info(f"Total experiments to verify: {total_experiment}")
for idx, experiment in enumerate(task.experiments, start=1):
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Started ================{Fore.RESET}" # noqa: B950
)
results = {}
environment = self._prepare_environment(task, experiment)
LOGGER.info("Experiment details")
LOGGER.info(json.dumps(environment, indent=4))
self._clean_experiment_artifacts(idx, total_experiment)
self._create_experiment_results_dir(task, experiment)
experiment.start()
LOGGER.info("Running Triton Servers:")
log_file = self._workspace / task.logs_dir / f"triton-server-experiment-{idx}.log"
self._triton_container = self._triton_server_container(
triton_container_image=task.triton_container_image,
framework=task.framework,
accelerator=experiment.parameters.get("backend_accelerator")
or experiment.parameters.get("accelerator"),
precision=experiment.parameters["precision"],
custom_library=bool(task.triton_custom_operations is not None),
load_model_method=task.triton_load_model_method,
log_file=log_file,
)
try:
self._triton_container.start()
for stage in self._pipeline.stages():
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Started ================{Fore.RESET}" # noqa: B950
)
experiment_stage = experiment.stages[stage.label]
experiment_stage.start()
is_ok = self._run_stage(stage=stage)
if not is_ok:
LOGGER.error(f"Stage {stage.label} failed.")
break
self._save_results(task, experiment, stage.label, results)
experiment_stage.end()
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total_experiment}] ================ Stage {stage.label} Finished ================{Fore.RESET}" # noqa: B950
)
except Exception:
message = traceback.format_exc()
LOGGER.error(f"Error running experiment: {message}")
yield ExperimentResult(
status=Status(state=ExperimentStatus.FAILED, message=message),
experiment=experiment,
results=results,
)
finally:
self._triton_container.stop()
experiment.end()
LOGGER.info(
f"{Fore.CYAN}================ Experiment: {idx}/{total_experiment} Finished ================{Fore.RESET}" # noqa: B950
)
yield ExperimentResult(
status=Status(state=ExperimentStatus.SUCCEED, message="Experiment Succeed"),
experiment=experiment,
results=results,
)
def stop(self) -> None:
"""
Stop executor
Returns:
None
"""
if self._triton_container:
self._triton_container.stop()
def _prepare_environment(self, task: Task, experiment: Experiment) -> Dict:
"""
Prepare environment data and export it
Args:
experiment: Experiment data
Returns:
Dictionary with environment data
"""
environment = {
"MODEL_NAME": task.model_name,
"ENSEMBLE_MODEL_NAME": task.ensemble_model_name,
"FRAMEWORK": task.framework,
"SHARED_DIR": self._shared_dir.as_posix(),
"MODEL_REPOSITORY_PATH": self._triton_models_repository_dir.as_posix(),
"TRITON_SERVER_URL": "localhost",
"TRITON_LOAD_MODEL_METHOD": task.triton_load_model_method,
"PERFORMANCE_TOOL": task.performance_tool.value,
"MODEL_BATCHING": task.batching,
}
measurement_params = self._measurement_params(
max_batch_size=experiment.parameters["max_batch_size"],
number_of_model_instances=experiment.parameters["number_of_model_instances"],
batching=task.batching,
steps_online=task.measurement_steps_online,
steps_offline=task.measurement_steps_offline,
)
environment = {
**environment,
**measurement_params,
}
if experiment.checkpoint:
environment["CHECKPOINT_DIR"] = task.checkpoints[experiment.checkpoint].path.as_posix()
if task.datasets_dir:
environment["DATASETS_DIR"] = task.datasets_dir.as_posix()
for key, value in experiment.parameters.items():
key = format_env_key(key)
value = format_env_value(value)
environment[key] = value
for key, value in environment.items():
os.environ[key] = str(value)
return environment
def _triton_server_container(
self,
triton_container_image: str,
framework: str,
load_model_method: str,
accelerator: str,
precision: str,
log_file: pathlib.Path,
custom_library: bool,
) -> Container:
"""
Create Triton Inference Server container for experiment
Args:
triton_container_image: Triton Inference Server container image
framework: Framework used to run model
accelerator: Accelerator used for experiment
precision: Precision used for experiment
load_model_method: Configure how Triton will load model
log_file: File where Triton logs are stored
Returns:
Container object
"""
volumes = {
self._triton_models_repository_dir: {"bind": Paths.MODEL_REPOSITORY_PATH, "mode": "rw"},
self._libraries_dir: {"bind": Paths.LIBRARIES_PATH, "mode": "rw"},
}
environment = {
"MODEL_REPOSITORY_PATH": Paths.MODEL_REPOSITORY_PATH,
"LIBRARIES_PATH": Paths.LIBRARIES_PATH,
"TRITON_LOAD_MODEL_METHOD": load_model_method,
}
if custom_library:
library_path = Triton.library_path(framework=framework)
environment["LD_LIBRARY_PATH"] = f"{library_path}:${{LD_LIBRARY_PATH}}"
environment["LD_PRELOAD"] = Triton.custom_library_path_remote()
if accelerator == BackendAccelerator.TRT.value and precision == Precision.FP16.value:
environment["ORT_TENSORRT_FP16_ENABLE"] = 1
strict_mode = False
command = Triton.command(
framework=framework,
repository_path=Paths.MODEL_REPOSITORY_PATH,
strict_mode=strict_mode,
)
command = f' bash -c "{command}"'
container = self._maintainer.triton_container(
command=command,
image=triton_container_image,
devices=self._devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
return container
def _save_results(self, task: Task, experiment: Experiment, stage_name: str, results: Dict) -> None:
"""
Update results for stage
Args:
task: Task object
experiment: Experiment for which stage has to be updated
stage_name: Name of stage
results: Results path mapping
Returns:
None
"""
stage = experiment.stages[stage_name]
if not stage.result_path:
LOGGER.debug(f"No results file to copy for {stage.name}")
return
if not stage.result_type:
LOGGER.debug(f"No results type provided for {stage.name}")
return
os.environ["SHARED_DIR"] = self._shared_dir.as_posix()
result_path = get_result_path(result_path=stage.result_path)
result_path = pathlib.Path(result_path)
if not result_path.is_file() and not result_path.is_dir():
raise RunnerException(f"Results file {result_path} not found.")
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
LOGGER.info(f"Saving {stage.result_type} to {experiment_dir}")
if result_path.is_dir():
dst_path = experiment_dir / stage.result_type
shutil.copytree(result_path, dst_path)
elif result_path.is_file():
suffix = result_path.suffix
dst_path = experiment_dir / f"{stage.result_type}{suffix}"
shutil.copy(result_path, dst_path)
else:
raise RunnerException(f"Result not found {result_path}")
LOGGER.info("Done")
results[stage.result_type] = dst_path
def _create_dirs(self) -> None:
"""
Create directories used to store artifacts and final results
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Started ================{Fore.RESET}"
) # noqa: B950
if self._executor_workspace.is_dir():
LOGGER.info(f"Removing previous executor workspace: {self._executor_workspace}")
shutil.rmtree(self._executor_workspace)
for directory in [
self._libraries_dir,
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
directory.mkdir(parents=True, exist_ok=True)
LOGGER.info(f"Directory {directory.name} created.")
LOGGER.info(
f"{Fore.GREEN}================ Creating Artifacts Directories Finished ================{Fore.RESET}"
)
def _clean_experiment_artifacts(self, idx: int, total: int) -> None:
"""
Clean artifacts stored between experiments
Returns:
None
"""
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Started ================{Fore.RESET}" # noqa: B950
)
for directory in [
self._shared_dir,
self._scripts_dir,
self._triton_models_repository_dir,
]:
clean_directory(directory)
LOGGER.info(f"Location {directory} cleaned.")
LOGGER.info(
f"{Fore.GREEN}[Experiment: {idx}/{total}] ================ Cleanup Experiment Data Finished ================{Fore.RESET}" # noqa: B950
)
def _create_experiment_results_dir(self, task: Task, experiment: Experiment):
"""
Create result directory for experiment
Returns:
"""
experiment_dir = self._workspace / task.results_dir / experiment.results_dir
experiment_dir.mkdir(parents=True, exist_ok=True)
def _prepare_triton_custom_operations(self, task: Task) -> None:
"""
Prepare Triton Server custom operations library
Returns:
None
"""
if task.triton_custom_operations:
target_library_path = Triton.custom_library_path_local(self._libraries_dir)
target_library_path_dir = target_library_path.parent
target_library_path_dir.mkdir(parents=True, exist_ok=True)
shutil.copy(task.triton_custom_operations, target_library_path)
def _run_stage(self, stage: Stage) -> bool:
"""
Run single stage commands
Args:
stage: Stage object with defined commands
Returns:
True on success, False otherwise
"""
try:
command = self._exporter.export(stage=stage)
exec_command(command)
except RunnerException:
return False
return True
def _measurement_params(
self,
max_batch_size: int,
number_of_model_instances: int,
steps_offline: int,
steps_online: int,
batching: str,
):
max_batch_size = int(max_batch_size)
if batching == Batching.DISABLED.value:
LOGGER.debug("Model does not support batching.")
measurement = Measurement(
offline_batch_sizes=[1],
offline_concurrency=[1],
online_batch_sizes=[1],
online_concurrency=[1],
min_shapes_batch=max_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement
offline_batch_sizes, offline_concurrency = offline_performance_configuration(
steps=steps_offline,
max_batch_size=max_batch_size,
)
if batching == Batching.DYNAMIC.value:
online_batch_sizes, online_concurrency = online_performance_configuration(
steps=steps_online,
max_batch_size=max_batch_size,
number_of_model_instances=number_of_model_instances,
)
else:
online_batch_sizes, online_concurrency = offline_batch_sizes, offline_concurrency
min_batch_size = min(min(offline_batch_sizes), min(online_batch_sizes))
measurement = Measurement(
offline_batch_sizes=offline_batch_sizes,
offline_concurrency=offline_concurrency,
online_batch_sizes=online_batch_sizes,
online_concurrency=online_concurrency,
min_shapes_batch=min_batch_size,
opt_shapes_batch=max_batch_size,
max_shapes_batch=max_batch_size,
)
return measurement_env_params(measurement)
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/executor.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .docker.maintainer import DockerMaintainer
class MaintainerFactory:
@staticmethod
def create_docker_maintainer():
return DockerMaintainer()
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/maintainer_factory.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .container import Container # noqa: F401
from .docker.maintainer import DockerMaintainer # noqa: F401
from .maintainer import Maintainer # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import Any
class Container(abc.ABC):
def __init__(self, name: str):
self.name = name
self._container = None
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> Any:
"""
Run command inside container
Args:
command: command to execute
Returns:
Any
"""
pass
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/container.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
from typing import Any, Dict, List, Optional, Union
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from .container import Container
class Maintainer(abc.ABC):
@abc.abstractmethod
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> Container:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
Container object
"""
pass
@abc.abstractmethod
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
pass
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/maintainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class ContainerNotStarted(Exception):
pass
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/exceptions.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/docker/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import pathlib
import docker
from docker.models.containers import ExecResult
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ..container import Container
class DockerContainer(Container):
def __init__(self, name: str):
super().__init__(name)
self._container = None
self._docker_client = docker.from_env()
self._docker_api_client = docker.APIClient()
@abc.abstractmethod
def start(self):
"""
Start container
"""
pass
@abc.abstractmethod
def stop(self):
"""
Stop container
"""
@abc.abstractmethod
def run(self, command: str) -> ExecResult:
"""
Run command inside container
Args:
command: command to execute
Returns:
ExecResult
"""
pass
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/docker/container.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from typing import Any, Dict, List, Optional, Union
import docker
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ...logger import LOGGER
from ..maintainer import Maintainer
from .container import DockerContainer
from .containers import TritonServerContainer
class DockerMaintainer(Maintainer):
def triton_container(
self, command: str, image: str, devices: List, volumes: Dict, environment: Dict, log_file: Union[pathlib.Path, str]
) -> DockerContainer:
"""
Return triton container
Args:
command: Triton Server command that has to be executed
image: Container image
devices: List of device ids which has to be available in container
volumes: Volumes mapping
environment: Environment variables set in container
log_file: File path where server logs has to be saved
Returns:
DockerContainer object
"""
return TritonServerContainer(
name="triton-server",
command=command,
image=image,
devices=devices,
volumes=volumes,
environment=environment,
log_file=log_file,
)
def build_image(
self,
*,
image_file_path: pathlib.Path,
image_name: str,
workdir_path: Optional[pathlib.Path] = None,
build_args: Optional[Dict[str, Any]] = None,
) -> None:
workdir_path = workdir_path or image_file_path.parent
build_args = build_args or {}
LOGGER.info(f"Building {image_name} docker image.")
LOGGER.debug(f" Using workdir: {workdir_path}")
LOGGER.debug(f" Dockerfile: {image_file_path}")
LOGGER.debug(f" Build args: {build_args}")
build_logs = list()
try:
docker_client = docker.from_env()
_, build_logs = docker_client.images.build(
path=workdir_path.resolve().as_posix(),
dockerfile=image_file_path.resolve().as_posix(),
tag=image_name,
buildargs=build_args,
network_mode="host",
rm=True,
)
except docker.errors.BuildError as e:
build_logs = e.build_log
raise e
finally:
for chunk in build_logs:
log = chunk.get("stream")
if log:
LOGGER.debug(log.rstrip())
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/docker/maintainer.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .triton_server_container import TritonServerContainer # noqa: F401
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/docker/containers/__init__.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import pathlib
from threading import Thread
from typing import Dict, Generator, Union
from docker.models.containers import ExecResult
from docker.types import DeviceRequest, Ulimit
if __name__ == "__main__" and __package__ is None:
__package__ = pathlib.Path(__file__).parent.name
from ....logger import LOGGER
from ...exceptions import ContainerNotStarted
from ..container import DockerContainer
class TritonServerContainer(DockerContainer):
def __init__(
self,
name: str,
command: str,
image: str,
volumes: Dict,
devices: Union[list, int],
environment: Dict,
log_file: Union[pathlib.Path, str],
network: str = "host",
shm_size: str = "1G",
):
"""
Initialize Triton Server Container
Args:
name: Container name
command: Triton Server command to exec on container start
image: Docker Image
volumes: Volumes to mount inside container
devices: Devices which has to be visible in container
environment: Environment variables
log_file: Path where logs should be saved
network: Network mode
shm_size: Shared memory size
"""
super().__init__(name)
self._image = image
self._command = command
self._volumes = volumes
self._devices = devices
self._environment = environment
self._network = network
self._shm_size = shm_size
self._triton_exec = None
self._logging_thread = None
self._log_file_path = pathlib.Path(log_file)
def start(self) -> None:
"""
Start Triton Server Container
"""
devices = [
DeviceRequest(capabilities=[["gpu"]], device_ids=self._devices),
]
LOGGER.info(f"Triton environment: {json.dumps(self._environment, indent=4)}")
LOGGER.info(f"Starting Triton container {self.name}.")
self._container = self._docker_client.containers.run(
image=self._image,
name=self.name,
device_requests=devices,
detach=True,
tty=True,
shm_size=self._shm_size,
ulimits=[
Ulimit(name="memlock", soft=-1, hard=-1),
Ulimit(name="stack", soft=67108864, hard=67108864),
],
volumes=self._volumes,
environment=self._environment,
network_mode=self._network,
auto_remove=True,
ipc_mode="host",
)
LOGGER.info("Triton command:")
LOGGER.info(f" {self._command}")
LOGGER.info(f"Starting Triton Server {self.name}.")
self._triton_exec = self._docker_api_client.exec_create(
container=self._container.id,
cmd=self._command,
)
stream_generator = self._docker_api_client.exec_start(exec_id=self._triton_exec["Id"], stream=True)
self._logging_thread = Thread(target=TritonServerContainer._logging, args=(self, stream_generator), daemon=True)
self._logging_thread.start()
def stop(self) -> None:
"""
Stop Triton Server Container and save logs to file
"""
if self._container is not None:
triton_result = self._docker_api_client.exec_inspect(self._triton_exec["Id"])
if triton_result.get("ExitCode") not in (0, None):
LOGGER.info(
f"Triton Inference Server instance {self.name} failed. Exit code: {triton_result.get('ExitCode')}"
)
LOGGER.info(f"Stopping triton server {self.name}.")
self._container.stop()
self._container = None
self._docker_client.close()
self._docker_api_client.close()
def run(self, command: str) -> ExecResult:
"""
Run command in container
Args:
command: Command to execute
Returns:
ExecResult
"""
if not self._container:
raise ContainerNotStarted("Triton Server Container is not running. Use .start() first.")
return self._container.exec_run(command)
def _logging(self, generator: Generator) -> None:
"""Triton logging thread for Triton Inference Server
Args:
generator (string generator): Triton log stream.
"""
with open(self._log_file_path, mode="w") as file:
try:
while True:
log = next(generator)
txt = log.decode("utf-8")
file.write(txt)
except StopIteration:
LOGGER.info(f"Saving Triton Inference Server {self.name} logs in {self._log_file_path}.")
| DeepLearningExamples-master | PyTorch/Classification/GPUNet/triton/runner/maintainer/docker/containers/triton_server_container.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.