python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ast import literal_eval
def parse_hifigan_args(parent, add_help=False):
"""
Parse model specific commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
hfg = parser.add_argument_group('HiFi-GAN generator parameters')
hfg.add_argument('--upsample_rates', default=[8, 8, 2, 2],
type=literal_eval_arg,
help='Upsample rates')
hfg.add_argument('--upsample_kernel_sizes', default=[16, 16, 4, 4],
type=literal_eval_arg,
help='Upsample kernel sizes')
hfg.add_argument('--upsample_initial_channel', default=512, type=int,
help='Upsample initial channel')
hfg.add_argument('--resblock', default='1', type=str,
help='Resblock module version')
hfg.add_argument('--resblock_kernel_sizes', default=[3, 7, 11],
type=literal_eval_arg,
help='Resblock kernel sizes')
hfg.add_argument('--resblock_dilation_sizes', type=literal_eval_arg,
default=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
help='Resblock dilation sizes'),
hfg = parser.add_argument_group('HiFi-GAN discriminator parameters')
hfg.add_argument('--mpd_periods', default=[2, 3, 5, 7, 11],
type=literal_eval_arg,
help='Periods of MultiPeriodDiscriminator')
hfg.add_argument('--concat_fwd', action='store_true',
help='Faster Discriminators (requires more GPU memory)')
hfg.add_argument('--hifigan-config', type=str, default=None, required=False,
help='Path to a HiFi-GAN config .json'
' (if provided, overrides model architecture flags)')
return parser
def literal_eval_arg(val):
try:
return literal_eval(val)
except SyntaxError as e: # Argparse does not handle SyntaxError
raise ValueError(str(e)) from e
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/arg_parser.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Dict, List, NamedTuple, Optional
import numpy as np
from deployment_toolkit.core import BaseMetricsCalculator
class MetricsCalculator(BaseMetricsCalculator):
def __init__(self, output_used_for_metrics: str):
self._output_used_for_metrics = output_used_for_metrics
self._MEL_MIN = -15.0
self._MEL_MAX = 3.0
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
y_pred = y_pred[self._output_used_for_metrics]
value_range_correct = np.ones(y_pred.shape[0]).astype(np.int32)
for idx, mel in enumerate(y_pred):
mel = mel[~np.isnan(mel)]
if mel.min() < self._MEL_MIN or mel.max() > self._MEL_MAX:
value_range_correct[idx] = 0
return {
"accuracy": np.mean(value_range_correct)
}
# from LJSpeech:
# min(mins) # Out[27]: -11.512925148010254
# max(maxs) # Out[28]: 2.0584452152252197
# min(sizes) # Out[29]: 96
# max(sizes) # Out[30]: 870
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class.
Data provided to `MetricsCalculator` are obtained from npz dump files
stored in directory pointed by `--dump-dir` argument.
Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts.
Output data is stored in csv file pointed by `--csv` argument.
Example call:
```shell script
python ./triton/calculate_metrics.py \
--dump-dir /results/dump_triton \
--csv /results/accuracy_results.csv \
--metrics metrics.py \
--metric-class-param1 value
```
"""
import argparse
import csv
import logging
import string
from pathlib import Path
import numpy as np
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file
from .deployment_toolkit.dump import pad_except_batch_axis
LOGGER = logging.getLogger("calculate_metrics")
TOTAL_COLUMN_NAME = "_total_"
def get_data(dump_dir, prefix):
"""Loads and concatenates dump files for given prefix (ex. inputs, outputs, labels, ids)"""
dump_dir = Path(dump_dir)
npz_files = sorted(dump_dir.glob(f"{prefix}*.npz"))
data = None
if npz_files:
# assume that all npz files with given prefix contain same set of names
names = list(np.load(npz_files[0].as_posix()).keys())
# calculate target shape
target_shape = {
name: tuple(np.max([np.load(npz_file.as_posix())[name].shape for npz_file in npz_files], axis=0))
for name in names
}
# pad and concatenate data
data = {
name: np.concatenate(
[pad_except_batch_axis(np.load(npz_file.as_posix())[name], target_shape[name]) for npz_file in npz_files]
)
for name in names
}
return data
def main():
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False)
parser.add_argument("--metrics", help=f"Path to python module containing metrics calculator", required=True)
parser.add_argument("--csv", help="Path to csv file", required=True)
parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True)
args, *_ = parser.parse_known_args()
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
ArgParserGenerator(MetricsCalculator).update_argparser(parser)
args = parser.parse_args()
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator")
metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args)
ids = get_data(args.dump_dir, "ids")["ids"]
x = get_data(args.dump_dir, "inputs")
y_true = get_data(args.dump_dir, "labels")
y_pred = get_data(args.dump_dir, "outputs")
common_keys = list({k for k in (y_true or [])} & {k for k in (y_pred or [])})
for key in common_keys:
if y_true[key].shape != y_pred[key].shape:
LOGGER.warning(
f"Model predictions and labels shall have equal shapes. "
f"y_pred[{key}].shape={y_pred[key].shape} != "
f"y_true[{key}].shape={y_true[key].shape}"
)
metrics = metrics_calculator.calc(ids=ids, x=x, y_pred=y_pred, y_real=y_true)
metrics = {TOTAL_COLUMN_NAME: len(ids), **metrics}
metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])]
if metric_names_with_space:
raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}")
csv_path = Path(args.csv)
csv_path.parent.mkdir(parents=True, exist_ok=True)
with csv_path.open("w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys()))
writer.writeheader()
writer.writerow(metrics)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/calculate_metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script.
It sends a request with data obtained from pointed data loader and dumps received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Currently, the client communicates with the Triton server asynchronously using GRPC protocol.
Example call:
```shell script
python ./triton/run_inference_on_triton.py \
--server-url localhost:8001 \
--model-name ResNet50 \
--model-version 1 \
--dump-labels \
--output-dir /results/dump_triton
```
"""
import argparse
import functools
import logging
import queue
import threading
import time
from pathlib import Path
from typing import Optional
from tqdm import tqdm
# pytype: disable=import-error
try:
from tritonclient import utils as client_utils # noqa: F401
from tritonclient.grpc import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
except ImportError:
import tritongrpcclient as grpc_client
from tritongrpcclient import (
InferenceServerClient,
InferInput,
InferRequestedOutput,
)
# pytype: enable=import-error
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file
from .deployment_toolkit.dump import NpzWriter
LOGGER = logging.getLogger("run_inference_on_triton")
class AsyncGRPCTritonRunner:
DEFAULT_MAX_RESP_WAIT_S = 120
DEFAULT_MAX_UNRESP_REQS = 128
DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min
def __init__(
self,
server_url: str,
model_name: str,
model_version: str,
*,
dataloader,
verbose=False,
resp_wait_s: Optional[float] = None,
max_unresponded_reqs: Optional[int] = None,
):
self._server_url = server_url
self._model_name = model_name
self._model_version = model_version
self._dataloader = dataloader
self._verbose = verbose
self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s
self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs
self._results = queue.Queue()
self._processed_all = False
self._errors = []
self._num_waiting_for = 0
self._sync = threading.Condition()
self._req_thread = threading.Thread(target=self.req_loop, daemon=True)
def __iter__(self):
self._req_thread.start()
timeout_s = 0.050 # check flags processed_all and error flags every 50ms
while True:
try:
ids, x, y_pred, y_real = self._results.get(timeout=timeout_s)
yield ids, x, y_pred, y_real
except queue.Empty:
shall_stop = self._processed_all or self._errors
if shall_stop:
break
LOGGER.debug("Waiting for request thread to stop")
self._req_thread.join()
if self._errors:
error_msg = "\n".join(map(str, self._errors))
raise RuntimeError(error_msg)
def _on_result(self, ids, x, y_real, output_names, result, error):
with self._sync:
if error:
self._errors.append(error)
else:
y_pred = {name: result.as_numpy(name) for name in output_names}
self._results.put((ids, x, y_pred, y_real))
self._num_waiting_for -= 1
self._sync.notify_all()
def req_loop(self):
client = InferenceServerClient(self._server_url, verbose=self._verbose)
self._errors = self._verify_triton_state(client)
if self._errors:
return
LOGGER.debug(
f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!"
)
model_config = client.get_model_config(self._model_name, self._model_version)
model_metadata = client.get_model_metadata(self._model_name, self._model_version)
LOGGER.info(f"Model config {model_config}")
LOGGER.info(f"Model metadata {model_metadata}")
inputs = {tm.name: tm for tm in model_metadata.inputs}
outputs = {tm.name: tm for tm in model_metadata.outputs}
output_names = list(outputs)
outputs_req = [InferRequestedOutput(name) for name in outputs]
self._num_waiting_for = 0
for ids, x, y_real in self._dataloader:
infer_inputs = []
for name in inputs:
data = x[name]
infer_input = InferInput(name, data.shape, inputs[name].datatype)
target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype)
data = data.astype(target_np_dtype)
infer_input.set_data_from_numpy(data)
infer_inputs.append(infer_input)
with self._sync:
def _check_can_send():
return self._num_waiting_for < self._max_unresp_reqs
can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t)
if not can_send:
error_msg = f"Runner could not send new requests for {self._response_wait_t}s"
self._errors.append(error_msg)
break
callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names)
client.async_infer(
model_name=self._model_name,
model_version=self._model_version,
inputs=infer_inputs,
outputs=outputs_req,
callback=callback,
)
self._num_waiting_for += 1
# wait till receive all requested data
with self._sync:
def _all_processed():
LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs")
return self._num_waiting_for == 0
self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S)
if not self._processed_all:
error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server"
self._errors.append(error_msg)
LOGGER.debug("Finished request thread")
def _verify_triton_state(self, triton_client):
errors = []
if not triton_client.is_server_live():
errors.append(f"Triton server {self._server_url} is not live")
elif not triton_client.is_server_ready():
errors.append(f"Triton server {self._server_url} is not ready")
elif not triton_client.is_model_ready(self._model_name, self._model_version):
errors.append(f"Model {self._model_name}:{self._model_version} is not ready")
return errors
def _parse_args():
parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False)
parser.add_argument(
"--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)"
)
parser.add_argument("--model-name", help="The name of the model used for inference.", required=True)
parser.add_argument("--model-version", help="The version of the model used for inference.", required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved")
parser.add_argument("--response-wait-time", required=False, help="Maximal time to wait for response", type=int, default=120)
parser.add_argument(
"--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", type=int, default=128
)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
args = parser.parse_args()
return args
def main():
args = _parse_args()
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
log_level = logging.INFO if not args.verbose else logging.DEBUG
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
runner = AsyncGRPCTritonRunner(
args.server_url,
args.model_name,
args.model_version,
dataloader=dataloader_fn(),
verbose=False,
resp_wait_s=args.response_wait_time,
max_unresponded_reqs=args.max_unresponded_requests,
)
with NpzWriter(output_dir=args.output_dir) as writer:
start = time.time()
for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10):
data = _verify_and_format_dump(args, ids, x, y_pred, y_real)
writer.write(**data)
stop = time.time()
LOGGER.info(f"\nThe inference took {stop - start:0.3f}s")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/run_inference_on_triton.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import importlib
import numpy as np
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dataloader', type=str, required=True,
help='Path to file containing get_dataloader function')
parser.add_argument('--input-data-dir', type=str, required=True,
help='Path to directory where input data for perf client will be saved')
parser.add_argument('--dataset-path', required=False, help='Path to the datset')
parser.add_argument('--precision', type=str, default="fp16",
help='Precision for the generated input data')
parser.add_argument('--length', type=int, required=True,
help='Length of the generated input data')
args = parser.parse_args()
args.batch_size = 1
return args
def main():
args = parse_args()
spec = importlib.util.spec_from_file_location('dataloader', args.dataloader)
dm = importlib.util.module_from_spec(spec)
spec.loader.exec_module(dm)
dataloader = dm.get_dataloader_fn(dataset_path=args.dataset_path,
batch_size=1,
precision=args.precision)
_, x, _ = next(dataloader())
for name, t in x.items():
if name == 'INPUT__0':
if t.shape[1] > args.length:
t = t[:,:,:args.length]
elif t.shape[1] < args.length:
num_tiles = int(np.ceil(1.0*args.length/t.shape[1]))
t = np.tile(t, (1,1,num_tiles))
t = t[:,:,:args.length]
t.tofile(os.path.join(args.input_data_dir, name))
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/prepare_input_data.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, batch_size: int, performance_partial_file: str):
row: Dict = {"batch_size": batch_size}
with open(performance_partial_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for r in reader:
avg_latency = calculate_average_latency(r)
row = {**row, **r, "avg latency": avg_latency}
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def offline_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Static batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
results: List[Dict] = list()
for batch_size in batch_sizes:
print(f"Running performance tests for batch size: {batch_size}")
performance_partial_file = f"triton_performance_partial_{batch_size}.csv"
exec_args = f"""-max-threads {triton_instances} \
-m {model_name} \
-x 1 \
-c {triton_instances} \
-t {triton_instances} \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_partial_file} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
update_performance_data(results, batch_size, performance_partial_file)
os.remove(performance_partial_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
print("Performance results for static batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
offline_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/run_offline_performance_test_on_triton.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)+'/../'))
from common.text import symbols
from inference import load_model_from_ckpt
import models
from torch.utils.data import DataLoader
import torch
import numpy as np
def update_argparser(parser):
### copy-paste from ./fastpitch/arg_parser.py
io = parser.add_argument_group('io parameters')
io.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=148, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--padding-idx', default=0, type=int,
help='Index of padding symbol in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=384, type=int,
help='Input embedding dimension')
text_processing = parser.add_argument_group('Text processing parameters')
text_processing.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
in_fft = parser.add_argument_group('input FFT parameters')
in_fft.add_argument('--in-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
in_fft.add_argument('--in-fft-n-heads', default=1, type=int,
help='Number of attention heads')
in_fft.add_argument('--in-fft-d-head', default=64, type=int,
help='Dim of attention heads')
in_fft.add_argument('--in-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
in_fft.add_argument('--in-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
in_fft.add_argument('--in-fft-output-size', default=384, type=int,
help='Output dim')
in_fft.add_argument('--p-in-fft-dropout', default=0.1, type=float,
help='Dropout probability')
in_fft.add_argument('--p-in-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
in_fft.add_argument('--p-in-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
out_fft = parser.add_argument_group('output FFT parameters')
out_fft.add_argument('--out-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
out_fft.add_argument('--out-fft-n-heads', default=1, type=int,
help='Number of attention heads')
out_fft.add_argument('--out-fft-d-head', default=64, type=int,
help='Dim of attention head')
out_fft.add_argument('--out-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
out_fft.add_argument('--out-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
out_fft.add_argument('--out-fft-output-size', default=384, type=int,
help='Output dim')
out_fft.add_argument('--p-out-fft-dropout', default=0.1, type=float,
help='Dropout probability for out_fft')
out_fft.add_argument('--p-out-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
out_fft.add_argument('--p-out-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
dur_pred = parser.add_argument_group('duration predictor parameters')
dur_pred.add_argument('--dur-predictor-kernel-size', default=3, type=int,
help='Duration predictor conv-1D kernel size')
dur_pred.add_argument('--dur-predictor-filter-size', default=256, type=int,
help='Duration predictor conv-1D filter size')
dur_pred.add_argument('--p-dur-predictor-dropout', default=0.1, type=float,
help='Dropout probability for duration predictor')
dur_pred.add_argument('--dur-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
pitch_pred = parser.add_argument_group('pitch predictor parameters')
pitch_pred.add_argument('--pitch-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
pitch_pred.add_argument('--pitch-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
pitch_pred.add_argument('--p-pitch-predictor-dropout', default=0.1, type=float,
help='Pitch probability for pitch predictor')
pitch_pred.add_argument('--pitch-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
energy_pred = parser.add_argument_group('energy predictor parameters')
energy_pred.add_argument('--energy-conditioning', type=bool, default=True)
energy_pred.add_argument('--energy-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
energy_pred.add_argument('--energy-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
energy_pred.add_argument('--p-energy-predictor-dropout', default=0.1, type=float,
help='Pitch probability for energy predictor')
energy_pred.add_argument('--energy-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
###~copy-paste from ./fastpitch/arg_parser.py
parser.add_argument('--checkpoint', type=str,
help='Full path to the FastPitch checkpoint file')
parser.add_argument('--torchscript', action='store_true',
help='Apply TorchScript')
parser.add_argument('--ema', action='store_true',
help='Use EMA averaged model \
(if saved in checkpoints)')
cond = parser.add_argument_group('conditioning parameters')
cond.add_argument('--pitch-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--energy-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--speaker-emb-weight', type=float, default=1.0,
help='Scale speaker embedding')
cond.add_argument('--n-speakers', type=int, default=1,
help='Number of speakers in the model.')
cond.add_argument('--pitch-conditioning-formants', default=1, type=int,
help='Number of speech formants to condition on.')
parser.add_argument("--precision", type=str, default="fp32",
choices=["fp32", "fp16"],
help="PyTorch model precision")
parser.add_argument("--output-format", type=str, required=True,
help="Output format")
def get_model(**model_args):
import argparse
args = argparse.Namespace(**model_args)
model_config = models.get_model_config(model_name="FastPitch",
args=args)
jittable = True if 'ts-' in args.output_format else False
model = models.get_model(model_name="FastPitch",
model_config=model_config,
device='cuda',
forward_is_infer=True,
jitable=jittable)
model = load_model_from_ckpt(args.checkpoint, args.ema, model)
if args.precision == "fp16":
model = model.half()
model.eval()
tensor_names = {"inputs": ["INPUT__0"],
"outputs" : ["OUTPUT__0", "OUTPUT__1",
"OUTPUT__2", "OUTPUT__3", "OUTPUT__4"]}
return model, tensor_names
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/model.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To infer the model on framework runtime, you can use `run_inference_on_fw.py` script.
It infers data obtained from pointed data loader locally and saves received data into npz files.
Those files are stored in directory pointed by `--output-dir` argument.
Example call:
```shell script
python ./triton/run_inference_on_fw.py \
--input-path /models/exported/model.onnx \
--input-type onnx \
--dataloader triton/dataloader.py \
--data-dir /data/imagenet \
--batch-size 32 \
--output-dir /results/dump_local \
--dump-labels
```
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0"
from tqdm import tqdm
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import DATALOADER_FN_NAME, BaseLoader, BaseRunner, Format, load_from_file
from .deployment_toolkit.dump import NpzWriter
from .deployment_toolkit.extensions import loaders, runners
LOGGER = logging.getLogger("run_inference_on_fw")
def _verify_and_format_dump(args, ids, x, y_pred, y_real):
data = {"outputs": y_pred, "ids": {"ids": ids}}
if args.dump_inputs:
data["inputs"] = x
if args.dump_labels:
if not y_real:
raise ValueError(
"Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument"
)
data["labels"] = y_real
return data
def _parse_and_validate_args():
supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions)
parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model", required=True)
parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True)
parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True)
parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True)
parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False)
parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False)
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
args, *_ = parser.parse_known_args()
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
Runner: BaseRunner = runners.get(args.input_type)
ArgParserGenerator(Runner).update_argparser(parser)
args = parser.parse_args()
types_requiring_io_params = []
if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outputs]):
parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters")
return args
def main():
args = _parse_and_validate_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
Loader: BaseLoader = loaders.get(args.input_type)
Runner: BaseRunner = runners.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
runner = ArgParserGenerator(Runner).from_args(args)
LOGGER.info(f"Loading {args.input_path}")
model = loader.load(args.input_path)
with runner.init_inference(model=model) as runner_session, NpzWriter(args.output_dir) as writer:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
LOGGER.info(f"Data loader initialized; Running inference")
for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10):
y_pred = runner_session(x)
data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real)
writer.write(**data)
LOGGER.info(f"Inference finished")
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/run_inference_on_fw.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import abspath, dirname
sys.path.append(abspath(dirname(__file__)+'/../'))
from fastpitch.data_function import TTSCollate, TTSDataset
from torch.utils.data import DataLoader
import numpy as np
import inspect
import torch
from typing import List
from common.text import cmudict
def get_dataloader_fn(batch_size: int = 8,
precision: str = "fp16",
heteronyms_path: str = 'cmudict/heteronyms',
cmudict_path: str = 'cmudict/cmudict-0.7b',
dataset_path: str = './LJSpeech_1.1',
filelist: str ="filelists/ljs_audio_pitch_text_test.txt",
text_cleaners: List = ['english_cleaners_v2'],
n_mel_channels: int = 80,
symbol_set: str ='english_basic',
p_arpabet: float = 1.0,
n_speakers: int = 1,
load_mel_from_disk: bool = False,
load_pitch_from_disk: bool = True,
pitch_mean: float = 214.72203, # LJSpeech defaults
pitch_std: float = 65.72038,
max_wav_value: float = 32768.0,
sampling_rate: int = 22050,
filter_length: int = 1024,
hop_length: int = 256,
win_length: int = 1024,
mel_fmin: float = 0.0,
mel_fmax: float = 8000.0):
if p_arpabet > 0.0:
cmudict.initialize(cmudict_path, heteronyms_path)
dataset = TTSDataset(dataset_path=dataset_path,
audiopaths_and_text=filelist,
text_cleaners=text_cleaners,
n_mel_channels=n_mel_channels,
symbol_set=symbol_set,
p_arpabet=p_arpabet,
n_speakers=n_speakers,
load_mel_from_disk=load_mel_from_disk,
load_pitch_from_disk=load_pitch_from_disk,
pitch_mean=pitch_mean,
pitch_std=pitch_std,
max_wav_value=max_wav_value,
sampling_rate=sampling_rate,
filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
mel_fmin=mel_fmin,
mel_fmax=mel_fmax)
collate_fn = TTSCollate()
dataloader = DataLoader(dataset, num_workers=8, shuffle=False,
sampler=None,
batch_size=batch_size, pin_memory=False,
collate_fn=collate_fn)
def _get_dataloader():
for idx, batch in enumerate(dataloader):
text_padded, _, mel_padded, output_lengths, _, \
pitch_padded, energy_padded, *_ = batch
pitch_padded = pitch_padded.float()
energy_padded = energy_padded.float()
dur_padded = torch.zeros_like(pitch_padded)
if precision == "fp16":
pitch_padded = pitch_padded.half()
dur_padded = dur_padded.half()
mel_padded = mel_padded.half()
energy_padded = energy_padded.half()
ids = np.arange(idx*batch_size, idx*batch_size + batch_size)
x = {"INPUT__0": text_padded.cpu().numpy()}
y_real = {"OUTPUT__0": mel_padded.cpu().numpy(),
"OUTPUT__1": output_lengths.cpu().numpy(),
"OUTPUT__2": dur_padded.cpu().numpy(),
"OUTPUT__3": pitch_padded.cpu().numpy(),
"OUTPUT__4": energy_padded.cpu().numpy()}
yield (ids, x, y_real)
return _get_dataloader
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/dataloader.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
To configure model on Triton, you can use `config_model_on_triton.py` script.
This will prepare layout of Model Repository, including Model Configuration.
```shell script
python ./triton/config_model_on_triton.py \
--model-repository /model_repository \
--model-path /models/exported/model.onnx \
--model-format onnx \
--model-name ResNet50 \
--model-version 1 \
--max-batch-size 32 \
--precision fp16 \
--backend-accelerator trt \
--load-model explicit \
--timeout 120 \
--verbose
```
If Triton server to which we prepare model repository is running with **explicit model control mode**,
use `--load-model` argument to send request load_model request to Triton Inference Server.
If server is listening on non-default address or port use `--server-url` argument to point server control endpoint.
If it is required to use HTTP protocol to communicate with Triton server use `--http` argument.
To improve inference throughput you can use
[dynamic batching](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#dynamic-batcher)
for your model by providing `--preferred-batch-sizes` and `--max-queue-delay-us` parameters.
For models which doesn't support batching, set `--max-batch-sizes` to 0.
By default Triton will [automatically obtain inputs and outputs definitions](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#auto-generated-model-configuration).
but for TorchScript ang TF GraphDef models script uses file with I/O specs. This file is automatically generated
when the model is converted to ScriptModule (either traced or scripted).
If there is a need to pass different than default path to I/O spec file use `--io-spec` CLI argument.
I/O spec file is yaml file with below structure:
```yaml
- inputs:
- name: input
dtype: float32 # np.dtype name
shape: [None, 224, 224, 3]
- outputs:
- name: probabilities
dtype: float32
shape: [None, 1001]
- name: classes
dtype: int32
shape: [None, 1]
```
"""
import argparse
import logging
import time
from model_navigator.triton.config import BackendAccelerator as Accelerator
from model_navigator.triton.config import TensorRTOptPrecision as Precision
from model_navigator.model import Format
from model_navigator.log import set_logger, log_dict
from model_navigator.triton import ModelConfig, TritonClient, TritonModelStore
LOGGER = logging.getLogger("config_model")
def _available_enum_values(my_enum):
return [item.value for item in my_enum]
def main():
parser = argparse.ArgumentParser(
description="Create Triton model repository and model configuration", allow_abbrev=False
)
parser.add_argument("--model-repository", required=True, help="Path to Triton model repository.")
parser.add_argument("--model-path", required=True, help="Path to model to configure")
# TODO: automation
parser.add_argument(
"--model-format",
required=True,
choices=_available_enum_values(Format),
help="Format of model to deploy",
)
parser.add_argument("--model-name", required=True, help="Model name")
parser.add_argument("--model-version", default="1", help="Version of model (default 1)")
parser.add_argument(
"--max-batch-size",
type=int,
default=32,
help="Maximum batch size allowed for inference. "
"A max_batch_size value of 0 indicates that batching is not allowed for the model",
)
# TODO: automation
parser.add_argument(
"--precision",
type=str,
default=Precision.FP16.value,
choices=_available_enum_values(Precision),
help="Model precision (parameter used only by Tensorflow backend with TensorRT optimization)",
)
# Triton Inference Server endpoint
parser.add_argument(
"--server-url",
type=str,
default="grpc://localhost:8001",
help="Inference server URL in format protocol://host[:port] (default grpc://localhost:8001)",
)
parser.add_argument(
"--load-model",
choices=["none", "poll", "explicit"],
help="Loading model while Triton Server is in given model control mode",
)
parser.add_argument(
"--timeout", default=120, help="Timeout in seconds to wait till model load (default=120)", type=int
)
# optimization related
parser.add_argument(
"--backend-accelerator",
type=str,
choices=_available_enum_values(Accelerator),
default=Accelerator.TRT.value,
help="Select Backend Accelerator used to serve model",
)
parser.add_argument("--number-of-model-instances", type=int, default=1, help="Number of model instances per GPU")
parser.add_argument(
"--preferred-batch-sizes",
type=int,
nargs="*",
help="Batch sizes that the dynamic batcher should attempt to create. "
"In case --max-queue-delay-us is set and this parameter is not, default value will be --max-batch-size",
)
parser.add_argument(
"--max-queue-delay-us",
type=int,
default=0,
help="Max delay time which dynamic batcher shall wait to form a batch (default 0)",
)
parser.add_argument(
"--capture-cuda-graph",
type=int,
default=0,
help="Use cuda capture graph (used only by TensorRT platform)",
)
parser.add_argument("-v", "--verbose", help="Provide verbose logs", action='store_true')
args = parser.parse_args()
set_logger(verbose=args.verbose)
log_dict("args", vars(args))
config = ModelConfig.create(
model_path=args.model_path,
# model definition
model_name=args.model_name,
model_version=args.model_version,
model_format=args.model_format,
precision=args.precision,
max_batch_size=args.max_batch_size,
# optimization
accelerator=args.backend_accelerator,
gpu_engine_count=args.number_of_model_instances,
preferred_batch_sizes=args.preferred_batch_sizes or [],
max_queue_delay_us=args.max_queue_delay_us,
capture_cuda_graph=args.capture_cuda_graph,
)
model_store = TritonModelStore(args.model_repository)
model_store.deploy_model(model_config=config, model_path=args.model_path)
if args.load_model != "none":
client = TritonClient(server_url=args.server_url, verbose=args.verbose)
client.wait_for_server_ready(timeout=args.timeout)
if args.load_model == "explicit":
client.load_model(model_name=args.model_name)
if args.load_model == "poll":
time.sleep(15)
client.wait_for_model(model_name=args.model_name, model_version=args.model_version, timeout_s=args.timeout)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/config_model_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows
what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ],
where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ]
`--shape IMAGE:3,224,224`.
"""
import argparse
import csv
import os
import sys
from pathlib import Path
from typing import List, Optional
# method from PEP-366 to support relative import in executed modules
if __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.report import save_results, show_results, sort_results
from .deployment_toolkit.warmup import warmup
def calculate_average_latency(r):
avg_sum_fields = [
"Client Send",
"Network+Server Send/Recv",
"Server Queue",
"Server Compute",
"Server Compute Input",
"Server Compute Infer",
"Server Compute Output",
"Client Recv",
]
avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields])
return avg_latency
def update_performance_data(results: List, performance_file: str):
with open(performance_file, "r") as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
row["avg latency"] = calculate_average_latency(row)
results.append(row)
def _parse_batch_sizes(batch_sizes: str):
batches = batch_sizes.split(sep=",")
return list(map(lambda x: int(x.strip()), batches))
def online_performance(
model_name: str,
batch_sizes: List[int],
result_path: str,
input_shapes: Optional[List[str]] = None,
profiling_data: str = "random",
triton_instances: int = 1,
triton_gpu_engine_count: int = 1,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Dynamic batching analysis start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
print(f"Running performance tests for dynamic batching")
performance_file = f"triton_performance_dynamic_partial.csv"
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 32)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
-f {performance_file} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
results = list()
update_performance_data(results=results, performance_file=performance_file)
results = sort_results(results=results)
save_results(filename=result_path, data=results)
show_results(results=results)
os.remove(performance_file)
print("Performance results for dynamic batching stored in: {0}".format(result_path))
print("\n")
print(f"==== Analysis done ====")
print("\n")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test")
parser.add_argument(
"--input-data", type=str, required=False, default="random", help="Input data to perform profiling."
)
parser.add_argument(
"--input-shape",
action="append",
required=False,
help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.",
)
parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.")
parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances")
parser.add_argument(
"--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server"
)
parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.")
parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server")
parser.add_argument(
"--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000
)
parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true",
default=False)
args = parser.parse_args()
warmup(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
online_performance(
server_url=args.server_url,
model_name=args.model_name,
batch_sizes=_parse_batch_sizes(args.batch_sizes),
triton_instances=args.triton_instances,
triton_gpu_engine_count=args.number_of_model_instances,
profiling_data=args.input_data,
input_shapes=args.input_shape,
result_path=args.result_path,
measurement_window=args.measurement_window,
shared_memory=args.shared_memory
)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/run_online_performance_test_on_triton.py |
#!/usr/bin/env python3
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""
`convert_model.py` script allows to convert between model formats with additional model optimizations
for faster inference.
It converts model from results of get_model function.
Currently supported input and output formats are:
- inputs
- `tf-estimator` - `get_model` function returning Tensorflow Estimator
- `tf-keras` - `get_model` function returning Tensorflow Keras Model
- `tf-savedmodel` - Tensorflow SavedModel binary
- `pyt` - `get_model` function returning PyTorch Module
- output
- `tf-savedmodel` - Tensorflow saved model
- `tf-trt` - TF-TRT saved model
- `ts-trace` - PyTorch traced ScriptModule
- `ts-script` - PyTorch scripted ScriptModule
- `onnx` - ONNX
- `trt` - TensorRT plan file
For tf-keras input you can use:
- --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB
- --tf-allow-growth flag - control limiting GPU memory growth feature
(https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled.
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1"
# method from PEP-366 to support relative import in executed modules
if __name__ == "__main__" and __package__ is None:
__package__ = Path(__file__).parent.name
from .deployment_toolkit.args import ArgParserGenerator
from .deployment_toolkit.core import (
DATALOADER_FN_NAME,
BaseConverter,
BaseLoader,
BaseSaver,
Format,
Precision,
load_from_file,
)
from .deployment_toolkit.extensions import converters, loaders, savers
LOGGER = logging.getLogger("convert_model")
INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT]
OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT]
def _get_args():
parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False)
parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True)
parser.add_argument(
"--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True
)
parser.add_argument("--output-path", help="Path to output model file", required=True)
parser.add_argument(
"--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True
)
parser.add_argument("--dataloader", help="Path to python module containing data loader")
parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False)
parser.add_argument(
"--ignore-unknown-parameters",
help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)",
action="store_true",
default=False,
)
args, unparsed_args = parser.parse_known_args()
Loader: BaseLoader = loaders.get(args.input_type)
ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser)
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter is not None:
ArgParserGenerator(Converter).update_argparser(parser)
Saver: BaseSaver = savers.get(args.output_type)
ArgParserGenerator(Saver).update_argparser(parser)
if args.dataloader is not None:
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
ArgParserGenerator(get_dataloader_fn).update_argparser(parser)
if args.ignore_unknown_parameters:
args, unknown_args = parser.parse_known_args()
LOGGER.warning(f"Got additional args {unknown_args}")
else:
args = parser.parse_args()
return args
def main():
args = _get_args()
log_level = logging.INFO if not args.verbose else logging.DEBUG
log_format = "%(asctime)s %(levelname)s %(name)s %(message)s"
logging.basicConfig(level=log_level, format=log_format)
LOGGER.info(f"args:")
for key, value in vars(args).items():
LOGGER.info(f" {key} = {value}")
requested_model_precision = Precision(args.precision)
dataloader_fn = None
# if conversion is required, temporary change model load precision to that required by converter
# it is for TensorRT converters which require fp32 models for all requested precisions
converter_name = f"{args.input_type}--{args.output_type}"
Converter: BaseConverter = converters.get(converter_name)
if Converter:
args.precision = Converter.required_source_model_precision(requested_model_precision).value
Loader: BaseLoader = loaders.get(args.input_type)
loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args)
model = loader.load(args.input_path)
LOGGER.info("inputs: %s", model.inputs)
LOGGER.info("outputs: %s", model.outputs)
if Converter: # if conversion is needed
# dataloader must much source model precision - so not recovering it yet
if args.dataloader is not None:
if args.p_arpabet > 0.0:
from common.text import cmudict
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME)
dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args)
# recover precision to that requested by user
args.precision = requested_model_precision.value
if Converter:
converter = ArgParserGenerator(Converter).from_args(args)
model = converter.convert(model, dataloader_fn=dataloader_fn)
Saver: BaseSaver = savers.get(args.output_type)
saver = ArgParserGenerator(Saver).from_args(args)
saver.save(model, args.output_path)
return 0
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/convert_model.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import importlib
import logging
import os
from enum import Enum
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
LOGGER = logging.getLogger(__name__)
DATALOADER_FN_NAME = "get_dataloader_fn"
GET_MODEL_FN_NAME = "get_model"
GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn"
GET_ARGPARSER_FN_NAME = "update_argparser"
class TensorSpec(NamedTuple):
name: str
dtype: str
shape: Tuple
class Parameter(Enum):
def __lt__(self, other: "Parameter") -> bool:
return self.value < other.value
class Accelerator(Parameter):
AMP = "amp"
CUDA = "cuda"
TRT = "trt"
class Precision(Parameter):
FP16 = "fp16"
FP32 = "fp32"
TF32 = "tf32" # Deprecated
class Format(Parameter):
TF_GRAPHDEF = "tf-graphdef"
TF_SAVEDMODEL = "tf-savedmodel"
TF_TRT = "tf-trt"
TF_ESTIMATOR = "tf-estimator"
TF_KERAS = "tf-keras"
ONNX = "onnx"
TRT = "trt"
TS_SCRIPT = "ts-script"
TS_TRACE = "ts-trace"
PYT = "pyt"
class Model(NamedTuple):
handle: object
precision: Optional[Precision]
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def load_from_file(file_path, label, target):
spec = importlib.util.spec_from_file_location(name=label, location=file_path)
my_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
return getattr(my_module, target, None)
class BaseLoader(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def load(self, model_path: Union[str, Path], **kwargs) -> Model:
"""
Loads and process model from file based on given set of args
"""
pass
class BaseSaver(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def save(self, model: Model, model_path: Union[str, Path]) -> None:
"""
Save model to file
"""
pass
class BaseRunner(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def init_inference(self, model: Model):
raise NotImplementedError
class BaseRunnerSession(abc.ABC):
def __init__(self, model: Model):
self._model = model
@abc.abstractmethod
def __enter__(self):
raise NotImplementedError()
@abc.abstractmethod
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError()
@abc.abstractmethod
def __call__(self, x: Dict[str, object]):
raise NotImplementedError()
def _set_env_variables(self) -> Dict[str, object]:
"""this method not remove values; fix it if needed"""
to_set = {}
old_values = {k: os.environ.pop(k, None) for k in to_set}
os.environ.update(to_set)
return old_values
def _recover_env_variables(self, old_envs: Dict[str, object]):
for name, value in old_envs.items():
if value is None:
del os.environ[name]
else:
os.environ[name] = str(value)
class BaseConverter(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def convert(self, model: Model, dataloader_fn) -> Model:
raise NotImplementedError()
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
return requested_model_precision
class BaseMetricsCalculator(abc.ABC):
required_fn_name_for_signature_parsing: Optional[str] = None
@abc.abstractmethod
def calc(
self,
*,
ids: List[Any],
y_pred: Dict[str, np.ndarray],
x: Optional[Dict[str, np.ndarray]],
y_real: Optional[Dict[str, np.ndarray]],
) -> Dict[str, float]:
"""
Calculates error/accuracy metrics
Args:
ids: List of ids identifying each sample in the batch
y_pred: model output as dict where key is output name and value is output value
x: model input as dict where key is input name and value is input value
y_real: input ground truth as dict where key is output name and value is output value
Returns:
dictionary where key is metric name and value is its value
"""
pass
class ShapeSpec(NamedTuple):
min: Tuple
opt: Tuple
max: Tuple
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/core.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Dict, Iterable
import numpy as np
MB2B = 2 ** 20
B2MB = 1 / MB2B
FLUSH_THRESHOLD_B = 256 * MB2B
def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]):
assert all(
[current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)]
), "target_shape should have equal or greater all dimensions comparing to data.shape"
padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0)
(0, target_size - current_size)
for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:])
]
return np.pad(data, padding, "constant", constant_values=np.nan)
class NpzWriter:
"""
Dumps dicts of numpy arrays into npz files
It can/shall be used as context manager:
```
with OutputWriter('mydir') as writer:
writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))},
labels={'classes': np.zeros(8)},
inputs={'input': np.zeros((8, 240, 240, 3)})
```
## Variable size data
Only dynamic of last axis is handled. Data is padded with np.nan value.
Also each generated file may have different size of dynamic axis.
"""
def __init__(self, output_dir, compress=False):
self._output_dir = Path(output_dir)
self._items_cache: Dict[str, Dict[str, np.ndarray]] = {}
self._items_counters: Dict[str, int] = {}
self._flush_threshold_b = FLUSH_THRESHOLD_B
self._compress = compress
@property
def cache_size(self):
return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()}
def _append_to_cache(self, prefix, data):
if data is None:
return
if not isinstance(data, dict):
raise ValueError(f"{prefix} data to store shall be dict")
cached_data = self._items_cache.get(prefix, {})
for name, value in data.items():
assert isinstance(
value, (list, np.ndarray)
), f"Values shall be lists or np.ndarrays; current type {type(value)}"
if not isinstance(value, np.ndarray):
value = np.array(value)
assert value.dtype.kind in ["S", "U"] or not np.any(
np.isnan(value)
), f"Values with np.nan is not supported; {name}={value}"
cached_value = cached_data.get(name, None)
if cached_value is not None:
target_shape = np.max([cached_value.shape, value.shape], axis=0)
cached_value = pad_except_batch_axis(cached_value, target_shape)
value = pad_except_batch_axis(value, target_shape)
value = np.concatenate((cached_value, value))
cached_data[name] = value
self._items_cache[prefix] = cached_data
def write(self, **kwargs):
"""
Writes named list of dictionaries of np.ndarrays.
Finally keyword names will be later prefixes of npz files where those dictionaries will be stored.
ex. writer.write(inputs={'input': np.zeros((2, 10))},
outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))},
labels={'classes': np.zeros((2,))})
Args:
**kwargs: named list of dictionaries of np.ndarrays to store
"""
for prefix, data in kwargs.items():
self._append_to_cache(prefix, data)
biggest_item_size = max(self.cache_size.values())
if biggest_item_size > self._flush_threshold_b:
self.flush()
def flush(self):
for prefix, data in self._items_cache.items():
self._dump(prefix, data)
self._items_cache = {}
def _dump(self, prefix, data):
idx = self._items_counters.setdefault(prefix, 0)
filename = f"{prefix}-{idx:012d}.npz"
output_path = self._output_dir / filename
if self._compress:
np.savez_compressed(output_path, **data)
else:
np.savez(output_path, **data)
nitems = len(list(data.values())[0])
msg_for_labels = (
"If these are correct shapes - consider moving loading of them into metrics.py."
if prefix == "labels"
else ""
)
shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()}
assert all(len(v) == nitems for v in data.values()), (
f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}'
f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}'
)
self._items_counters[prefix] += nitems
def __enter__(self):
if self._output_dir.exists() and len(list(self._output_dir.iterdir())):
raise ValueError(f"{self._output_dir.as_posix()} is not empty")
self._output_dir.mkdir(parents=True, exist_ok=True)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.flush()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/dump.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import logging
import os
import re
from pathlib import Path
from typing import List
LOGGER = logging.getLogger(__name__)
class ExtensionManager:
def __init__(self, name: str):
self._name = name
self._registry = {}
def register_extension(self, extension: str, clazz):
already_registered_class = self._registry.get(extension, None)
if already_registered_class and already_registered_class.__module__ != clazz.__module__:
raise RuntimeError(
f"Conflicting extension {self._name}/{extension}; "
f"{already_registered_class.__module__}.{already_registered_class.__name} "
f"and "
f"{clazz.__module__}.{clazz.__name__}"
)
elif already_registered_class is None:
clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None"
LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}")
self._registry[extension] = clazz
def get(self, extension):
if extension not in self._registry:
raise RuntimeError(f"Missing extension {self._name}/{extension}")
return self._registry[extension]
@property
def supported_extensions(self):
return list(self._registry)
@staticmethod
def scan_for_extensions(extension_dirs: List[Path]):
register_pattern = r".*\.register_extension\(.*"
for extension_dir in extension_dirs:
for python_path in extension_dir.rglob("*.py"):
if not python_path.is_file():
continue
payload = python_path.read_text()
if re.findall(register_pattern, payload):
import_path = python_path.relative_to(toolkit_root_dir.parent)
package = import_path.parent.as_posix().replace(os.sep, ".")
package_with_module = f"{package}.{import_path.stem}"
spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path)
my_module = importlib.util.module_from_spec(spec)
my_module.__package__ = package
try:
spec.loader.exec_module(my_module) # pytype: disable=attribute-error
except ModuleNotFoundError as e:
LOGGER.error(
f"Could not load extensions from {import_path} due to missing python packages; {e}"
)
runners = ExtensionManager("runners")
loaders = ExtensionManager("loaders")
savers = ExtensionManager("savers")
converters = ExtensionManager("converters")
toolkit_root_dir = (Path(__file__).parent / "..").resolve()
ExtensionManager.scan_for_extensions([toolkit_root_dir])
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/extensions.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from typing import List, Optional
def warmup(
model_name: str,
batch_sizes: List[int],
triton_gpu_engine_count: int = 1,
triton_instances: int = 1,
profiling_data: str = "random",
input_shapes: Optional[List[str]] = None,
server_url: str = "localhost",
measurement_window: int = 10000,
shared_memory: bool = False
):
print("\n")
print(f"==== Warmup start ====")
print("\n")
input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else ""
measurement_window = 6 * measurement_window
max_batch_size = max(batch_sizes)
max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count
max_concurrency = min(256, max_total_requests)
batch_size = max(1, max_total_requests // 256)
step = max(1, max_concurrency // 2)
min_concurrency = step
exec_args = f"""-m {model_name} \
-x 1 \
-p {measurement_window} \
-v \
-i http \
-u {server_url}:8000 \
-b {batch_size} \
--concurrency-range {min_concurrency}:{max_concurrency}:{step} \
--input-data {profiling_data} {input_shapes}"""
if shared_memory:
exec_args += " --shared-memory=cuda"
result = os.system(f"perf_client {exec_args}")
if result != 0:
print(f"Failed running performance tests. Perf client failed with exit code {result}")
sys.exit(1)
print("\n")
print(f"==== Warmup done ====")
print("\n")
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/warmup.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import logging
from typing import Any, Callable, Dict, Optional, Union
from .core import GET_ARGPARSER_FN_NAME, load_from_file
LOGGER = logging.getLogger(__name__)
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict:
signature = inspect.signature(fn)
parameters_names = list(signature.parameters)
if isinstance(args, argparse.Namespace):
args = vars(args)
args = {k: v for k, v in args.items() if k in parameters_names}
return args
def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser:
parser.conflict_handler = "resolve"
signature = inspect.signature(fn)
for parameter in signature.parameters.values():
if parameter.name in ["self", "args", "kwargs"]:
continue
argument_kwargs = {}
if parameter.annotation != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["type"] = str2bool
argument_kwargs["choices"] = [0, 1]
elif type(parameter.annotation) == type(Union): # isinstance(parameter.annotation, type(Optional[Any])):
types = [type_ for type_ in parameter.annotation.__args__ if not isinstance(None, type_)]
if len(types) != 1:
raise RuntimeError(
f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}"
)
argument_kwargs["type"] = types[0]
else:
argument_kwargs["type"] = parameter.annotation
if parameter.default != inspect.Parameter.empty:
if parameter.annotation == bool:
argument_kwargs["default"] = str2bool(parameter.default)
else:
argument_kwargs["default"] = parameter.default
else:
argument_kwargs["required"] = True
name = parameter.name.replace("_", "-")
LOGGER.debug(f"Adding argument {name} with {argument_kwargs}")
parser.add_argument(f"--{name}", **argument_kwargs)
return parser
class ArgParserGenerator:
def __init__(self, cls_or_fn, module_path: Optional[str] = None):
self._cls_or_fn = cls_or_fn
self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, "__init__")
input_is_python_file = module_path and module_path.endswith(".py")
self._input_path = module_path if input_is_python_file else None
self._required_fn_name_for_signature_parsing = getattr(
cls_or_fn, "required_fn_name_for_signature_parsing", None
)
def update_argparser(self, parser):
name = self._handle.__name__
group_parser = parser.add_argument_group(name)
add_args_for_fn_signature(group_parser, fn=self._handle)
self._update_argparser(group_parser)
def get_args(self, args: argparse.Namespace):
filtered_args = filter_fn_args(args, fn=self._handle)
tmp_parser = argparse.ArgumentParser(allow_abbrev=False)
self._update_argparser(tmp_parser)
custom_names = [
p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction)
]
custom_params = {n: getattr(args, n) for n in custom_names}
filtered_args = {**filtered_args, **custom_params}
return filtered_args
def from_args(self, args: Union[argparse.Namespace, Dict]):
args = self.get_args(args)
LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})")
return self._cls_or_fn(**args)
def _update_argparser(self, parser):
label = "argparser_update"
if self._input_path:
update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME)
if update_argparser_handle:
update_argparser_handle(parser)
elif self._required_fn_name_for_signature_parsing:
fn_handle = load_from_file(
self._input_path, label=label, target=self._required_fn_name_for_signature_parsing
)
if fn_handle:
add_args_for_fn_signature(parser, fn_handle)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/args.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import re
from typing import Dict, List
from natsort import natsorted
from tabulate import tabulate
def sort_results(results: List):
results = natsorted(results, key=lambda item: [item[key] for key in item.keys()])
return results
def save_results(filename: str, data: List, formatted: bool = False):
data = format_data(data=data) if formatted else data
with open(filename, "a") as csvfile:
fieldnames = data[0].keys()
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in data:
writer.writerow(row)
def format_data(data: List[Dict]) -> List[Dict]:
formatted_data = list()
for item in data:
formatted_item = format_keys(data=item)
formatted_data.append(formatted_item)
return formatted_data
def format_keys(data: Dict) -> Dict:
keys = {format_key(key=key): value for key, value in data.items()}
return keys
def format_key(key: str) -> str:
key = " ".join([k.capitalize() for k in re.split("_| ", key)])
return key
def show_results(results: List[Dict]):
headers = list(results[0].keys())
summary = map(lambda x: list(map(lambda item: item[1], x.items())), results)
print(tabulate(summary, headers=headers))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/report.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Dict, Iterable, Optional
# pytype: disable=import-error
import onnx
import tensorrt as trt
from ..core import BaseConverter, Format, Model, Precision, ShapeSpec
from ..extensions import converters
from .utils import get_input_shapes
# pytype: enable=import-error
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
class Onnx2TRTConverter(BaseConverter):
def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
model.handle,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return model._replace(handle=cuda_engine)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
def onnx2trt(
onnx_model: onnx.ModelProto,
*,
shapes: Dict[str, ShapeSpec],
max_workspace_size: int,
max_batch_size: int,
model_precision: str,
) -> "trt.ICudaEngine":
"""
Converts onnx model to TensorRT ICudaEngine
Args:
onnx_model: onnx.Model to convert
shapes: dictionary containing min shape, max shape, opt shape for each input name
max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time.
max_batch_size: The maximum batch size which can be used at execution time,
and also the batch size for which the CudaEngine will be optimized.
model_precision: precision of kernels (possible values: fp16, fp32)
Returns: TensorRT ICudaEngine
"""
# Whether or not 16-bit kernels are permitted.
# During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled.
fp16_mode = "16" in model_precision
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16_mode
builder.max_batch_size = max_batch_size
builder.max_workspace_size = max_workspace_size
# In TensorRT 7.0, the ONNX parser only supports full-dimensions mode,
# meaning that your network definition must be created with the explicitBatch flag set.
# For more information, see
# https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes
flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flags)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
# onnx model parsing
if not parser.parse(onnx_model.SerializeToString()):
for i in range(parser.num_errors):
LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}")
raise RuntimeError("Error during parsing ONNX model (see logs for details)")
# optimization
config = builder.create_builder_config()
config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16)
config.max_workspace_size = max_workspace_size
profile = builder.create_optimization_profile()
for name, spec in shapes.items():
profile.set_shape(name, **spec._asdict())
config.add_optimization_profile(profile)
engine = builder.build_engine(network, config=config)
return engine
converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/onnx2trt_conv.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pathlib import Path
from typing import Dict, Optional, Union
import numpy as np
# pytype: disable=import-error
import onnx
import onnx.optimizer
import onnx.shape_inference
import onnxruntime
from google.protobuf import text_format
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
# pytype: enable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
from .utils import infer_precision
LOGGER = logging.getLogger(__name__)
def _value_info2tensor_spec(value_info: onnx.ValueInfoProto):
onnx_data_type_map = {"float": "float32", "double": "float64"}
elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower()
dtype = onnx_data_type_map.get(elem_type_name, elem_type_name)
def _get_dim(dim):
which = dim.WhichOneof("value")
if which is not None: # which is None when dim is None
dim = getattr(dim, which)
return None if isinstance(dim, (str, bytes)) else dim
shape = value_info.type.tensor_type.shape
shape = tuple([_get_dim(d) for d in shape.dim])
return TensorSpec(value_info.name, dtype=dtype, shape=shape)
def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]:
import networkx as nx
# build directed graph
nx_graph = nx.DiGraph()
def _get_dtype(vi):
t = vi.type
if hasattr(t, "tensor_type"):
type_id = t.tensor_type.elem_type
else:
raise NotImplementedError("Not implemented yet")
return TENSOR_TYPE_TO_NP_TYPE[type_id]
node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info}
node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output}
node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input}
for node in onnx_graph.node:
node_dtype = node_output2type.get("+".join(node.output), None)
nx_graph.add_node(
node.name,
op=node.op_type,
attr={a.name: a for a in node.attribute},
dtype=node_dtype,
)
for input_name in node.input:
prev_node = node_outputs2node.get(input_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, node.name)
for input_node in onnx_graph.input:
input_name = input_node.name
nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node))
next_node = node_inputs2node.get(input_name, None)
if next_node:
nx_graph.add_edge(input_name, next_node.name)
for output in onnx_graph.output:
output_name = output.name
nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output))
prev_node = node_outputs2node.get(output_name, None)
if prev_node:
nx_graph.add_edge(prev_node.name, output_name)
else:
LOGGER.warning(f"Could not find previous node for {output_name}")
input_names = [n.name for n in onnx_graph.input]
output_names = [n.name for n in onnx_graph.output]
most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None))
if most_common_dtype is not None:
precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype]
else:
precision = None
return precision
class OnnxLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
model = onnx.load(model_path)
onnx.checker.check_model(model)
onnx.helper.strip_doc_string(model)
model = onnx.shape_inference.infer_shapes(model)
# TODO: probably modification of onnx model ios causes error on optimize
# from onnx.utils import polish_model
# model = polish_model(model) # run checker, docs strip, optimizer and shape inference
inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input}
outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output}
precision = _infer_graph_precision(model.graph)
return Model(model, precision, inputs, outputs)
class OnnxSaver(BaseSaver):
def __init__(self, as_text: bool = False):
self._as_text = as_text
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
onnx_model: onnx.ModelProto = model.handle
if self._as_text:
with model_path.open("w") as f:
f.write(text_format.MessageToString(onnx_model))
else:
with model_path.open("wb") as f:
f.write(onnx_model.SerializeToString())
"""
ExecutionProviders on onnxruntime 1.4.0
['TensorrtExecutionProvider',
'CUDAExecutionProvider',
'MIGraphXExecutionProvider',
'NGRAPHExecutionProvider',
'OpenVINOExecutionProvider',
'DnnlExecutionProvider',
'NupharExecutionProvider',
'VitisAIExecutionProvider',
'ArmNNExecutionProvider',
'ACLExecutionProvider',
'CPUExecutionProvider']
"""
def _check_providers(providers):
providers = providers or []
if not isinstance(providers, (list, tuple)):
providers = [providers]
available_providers = onnxruntime.get_available_providers()
unavailable = set(providers) - set(available_providers)
if unavailable:
raise RuntimeError(f"Unavailable providers {unavailable}")
return providers
class OnnxRunner(BaseRunner):
def __init__(self, verbose_runtime_logs: bool = False):
self._providers = None
self._verbose_runtime_logs = verbose_runtime_logs
def init_inference(self, model: Model):
assert isinstance(model.handle, onnx.ModelProto)
return OnnxRunnerSession(
model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs
)
class OnnxRunnerSession(BaseRunnerSession):
def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False):
super().__init__(model)
self._input_names = None
self._output_names = None
self._session = None
self._providers = providers
self._verbose_runtime_logs = verbose_runtime_logs
self._old_env_values = {}
def __enter__(self):
self._old_env_values = self._set_env_variables()
sess_options = onnxruntime.SessionOptions() # default session options
if self._verbose_runtime_logs:
sess_options.log_severity_level = 0
sess_options.log_verbosity_level = 1
LOGGER.info(
f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}"
)
self._input_names = list(self._model.inputs)
self._output_names = list(self._model.outputs)
model_payload = self._model.handle.SerializeToString()
self._session = onnxruntime.InferenceSession(
model_payload, providers=self._providers, sess_options=sess_options
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._input_names = None
self._output_names = None
self._session = None
self._recover_env_variables(self._old_env_values)
def __call__(self, x: Dict[str, object]):
feed_dict = {k: x[k] for k in self._input_names}
y_pred = self._session.run(self._output_names, feed_dict)
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.ONNX.value, OnnxLoader)
runners.register_extension(Format.ONNX.value, OnnxRunner)
savers.register_extension(Format.ONNX.value, OnnxSaver)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/onnx.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. | DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import Counter
from typing import Callable, Dict, List
import networkx as nx
from ..core import ShapeSpec
def infer_precision(
nx_graph: nx.Graph,
input_names: List[str],
output_names: List[str],
get_node_dtype_fn: Callable,
):
node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes]
node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]]
dtypes_counter = Counter(node_dtypes)
return dtypes_counter.most_common()[0][0]
def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0):
def _set_dynamic_shapes(t, shapes):
for k, v in t.items():
shape = list(v.shape)
for dim, s in enumerate(shape):
if shapes[k][dim] != -1 and shapes[k][dim] != s:
shapes[k][dim] = -1
## get all shapes from input and output tensors
input_shapes = {}
output_shapes = {}
for batch in dataloader:
_, x, y = batch
for k, v in x.items():
input_shapes[k] = list(v.shape)
for k, v in y.items():
output_shapes[k] = list(v.shape)
break
# based on max <max_num_iters> iterations, check which
# dimensions differ to determine dynamic_axes
max_num_iters = 100
for idx, batch in enumerate(dataloader):
if idx >= max_num_iters:
break
_, x, y = batch
_set_dynamic_shapes(x, input_shapes)
_set_dynamic_shapes(y, output_shapes)
return input_shapes, output_shapes
def get_dynamic_axes(dataloader, batch_size_dim=0):
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim)
all_shapes = {**input_shapes, **output_shapes}
dynamic_axes = {}
for k, shape in all_shapes.items():
for idx, s in enumerate(shape):
if s == -1:
dynamic_axes[k] = {idx: k + "_" + str(idx)}
for k, v in all_shapes.items():
if k in dynamic_axes:
dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)})
else:
dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)}
return dynamic_axes
def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]:
def init_counters_and_shapes(x, counters, min_shapes, max_shapes):
for k, v in x.items():
counters[k] = Counter()
min_shapes[k] = [float("inf")] * v.ndim
max_shapes[k] = [float("-inf")] * v.ndim
counters = {}
min_shapes: Dict[str, tuple] = {}
max_shapes: Dict[str, tuple] = {}
for idx, batch in enumerate(dataloader):
ids, x, y = batch
if idx == 0:
init_counters_and_shapes(x, counters, min_shapes, max_shapes)
for k, v in x.items():
shape = v.shape
counters[k][shape] += 1
min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)])
max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)])
opt_shapes: Dict[str, tuple] = {}
for k, v in counters.items():
opt_shapes[k] = v.most_common(1)[0][0]
shapes = {}
for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes
shapes[k] = ShapeSpec(
min=(1,) + min_shapes[k][1:],
max=(max_batch_size,) + max_shapes[k][1:],
opt=(max_batch_size,) + opt_shapes[k][1:],
)
return shapes
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/utils.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from pathlib import Path
from typing import Dict, NamedTuple, Optional, Union
import numpy as np
# pytype: disable=import-error
try:
import pycuda.autoinit
import pycuda.driver as cuda
except (ImportError, Exception) as e:
logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}")
# pytype: enable=import-error
import tensorrt as trt # pytype: disable=import-error
from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec
from ..extensions import loaders, runners, savers
LOGGER = logging.getLogger(__name__)
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
"""
documentation:
https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html
https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section
"""
class TensorRTLoader(BaseLoader):
def load(self, model_path: Union[str, Path], **_) -> Model:
model_path = Path(model_path)
LOGGER.debug(f"Loading TensorRT engine from {model_path}")
with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime:
engine = runtime.deserialize_cuda_engine(fh.read())
if engine is None:
raise RuntimeError(f"Could not load ICudaEngine from {model_path}")
inputs = {}
outputs = {}
for binding_idx in range(engine.num_bindings):
name = engine.get_binding_name(binding_idx)
is_input = engine.binding_is_input(binding_idx)
dtype = engine.get_binding_dtype(binding_idx)
shape = engine.get_binding_shape(binding_idx)
if is_input:
inputs[name] = TensorSpec(name, dtype, shape)
else:
outputs[name] = TensorSpec(name, dtype, shape)
return Model(engine, None, inputs, outputs)
class TensorRTSaver(BaseSaver):
def __init__(self):
pass
def save(self, model: Model, model_path: Union[str, Path]) -> None:
model_path = Path(model_path)
LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}")
model_path.parent.mkdir(parents=True, exist_ok=True)
engine: "trt.ICudaEngine" = model.handle
with model_path.open("wb") as fh:
fh.write(engine.serialize())
class TRTBuffers(NamedTuple):
x_host: Optional[Dict[str, object]]
x_dev: Dict[str, object]
y_pred_host: Dict[str, object]
y_pred_dev: Dict[str, object]
class TensorRTRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return TensorRTRunnerSession(model=model)
class TensorRTRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, trt.ICudaEngine)
self._model = model
self._has_dynamic_shapes = None
self._context = None
self._engine: trt.ICudaEngine = self._model.handle
self._cuda_context = pycuda.autoinit.context
self._input_names = None
self._output_names = None
self._buffers = None
def __enter__(self):
self._context = self._engine.create_execution_context()
self._context.__enter__()
self._input_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx)
]
self._output_names = [
self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx)
]
# all_binding_shapes_specified is True for models without dynamic shapes
# so initially this variable is False for models with dynamic shapes
self._has_dynamic_shapes = not self._context.all_binding_shapes_specified
return self
def __exit__(self, exc_type, exc_value, traceback):
self._context.__exit__(exc_type, exc_value, traceback)
self._input_names = None
self._output_names = None
# TODO: are cuda buffers dealloc automatically?
self._buffers = None
def __call__(self, x):
buffers = self._prepare_buffers_if_needed(x)
bindings = self._update_bindings(buffers)
for name in self._input_names:
cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name])
self._cuda_context.push()
self._context.execute_v2(bindings=bindings)
self._cuda_context.pop()
for name in self._output_names:
cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name])
return buffers.y_pred_host
def _update_bindings(self, buffers: TRTBuffers):
bindings = [None] * self._engine.num_bindings
for name in buffers.y_pred_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.y_pred_dev[name]
for name in buffers.x_dev:
binding_idx: int = self._engine[name]
bindings[binding_idx] = buffers.x_dev[name]
return bindings
def _set_dynamic_input_shapes(self, x_host):
def _is_shape_dynamic(input_shape):
return any([dim is None or dim == -1 for dim in input_shape])
for name in self._input_names:
bindings_idx = self._engine[name]
data_shape = x_host[name].shape # pytype: disable=attribute-error
if self._engine.is_shape_binding(bindings_idx):
input_shape = self._context.get_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_shape_input(bindings_idx, data_shape)
else:
input_shape = self._engine.get_binding_shape(bindings_idx)
if _is_shape_dynamic(input_shape):
self._context.set_binding_shape(bindings_idx, data_shape)
assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified
def _prepare_buffers_if_needed(self, x_host: Dict[str, object]):
# pytype: disable=attribute-error
new_batch_size = list(x_host.values())[0].shape[0]
current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0
# pytype: enable=attribute-error
if self._has_dynamic_shapes or new_batch_size != current_batch_size:
# TODO: are CUDA buffers dealloc automatically?
self._set_dynamic_input_shapes(x_host)
y_pred_host = {}
for name in self._output_names:
shape = self._context.get_binding_shape(self._engine[name])
y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype))
y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()}
x_dev = {
name: cuda.mem_alloc(host_input.nbytes)
for name, host_input in x_host.items()
if name in self._input_names # pytype: disable=attribute-error
}
self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev)
return self._buffers._replace(x_host=x_host)
if "pycuda.driver" in sys.modules:
loaders.register_extension(Format.TRT.value, TensorRTLoader)
runners.register_extension(Format.TRT.value, TensorRTRunner)
savers.register_extension(Format.TRT.value, TensorRTSaver)
else:
LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/tensorrt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from collections import Counter
from pathlib import Path
from typing import Dict, Iterable, NamedTuple, Optional, Union
import torch # pytype: disable=import-error
import yaml
from ..core import (
GET_MODEL_FN_NAME,
BaseConverter,
BaseLoader,
BaseRunner,
BaseRunnerSession,
BaseSaver,
Format,
Model,
Precision,
TensorSpec,
load_from_file,
)
from ..extensions import converters, loaders, runners, savers
from .utils import get_dynamic_axes, get_input_shapes, get_shapes_with_dynamic_axes
LOGGER = logging.getLogger(__name__)
class InputOutputSpec(NamedTuple):
inputs: Dict[str, TensorSpec]
outputs: Dict[str, TensorSpec]
def get_sample_input(dataloader, device):
for batch in dataloader:
_, x, _ = batch
break
if isinstance(x, dict):
sample_input = list(x.values())
elif isinstance(x, list):
sample_input = x
else:
raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict")
for idx, s in enumerate(sample_input):
sample_input[idx] = torch.from_numpy(s).to(device)
return tuple(sample_input)
def get_model_device(torch_model):
if next(torch_model.parameters()).is_cuda:
return "cuda"
else:
return "cpu"
def infer_model_precision(model):
counter = Counter()
for param in model.parameters():
counter[param.dtype] += 1
if counter[torch.float16] > 0:
return Precision.FP16
else:
return Precision.FP32
def _get_tensor_dtypes(dataloader, precision):
def _get_dtypes(t):
dtypes = {}
for k, v in t.items():
dtype = str(v.dtype)
if dtype == "float64":
dtype = "float32"
if precision == Precision.FP16 and dtype == "float32":
dtype = "float16"
dtypes[k] = dtype
return dtypes
input_dtypes = {}
output_dtypes = {}
for batch in dataloader:
_, x, y = batch
input_dtypes = _get_dtypes(x)
output_dtypes = _get_dtypes(y)
break
return input_dtypes, output_dtypes
### TODO assumption: floating point input
### type has same precision as the model
def _get_io_spec(model, dataloader_fn):
precision = model.precision
dataloader = dataloader_fn()
input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision)
input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader)
inputs = {
name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in model.inputs
}
outputs = {
name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name]))
for name in model.outputs
}
return InputOutputSpec(inputs, outputs)
class PyTorchModelLoader(BaseLoader):
required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME
def __init__(self, **kwargs):
self._model_args = kwargs
def load(self, model_path: Union[str, Path], **_) -> Model:
if isinstance(model_path, Path):
model_path = model_path.as_posix()
get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME)
model, tensor_infos = get_model(**self._model_args)
io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"])
precision = infer_model_precision(model)
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptLoader(BaseLoader):
def __init__(self, tensor_names_path: str = None, **kwargs):
self._model_args = kwargs
self._io_spec = None
if tensor_names_path is not None:
with Path(tensor_names_path).open("r") as fh:
tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader)
self._io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"])
def load(self, model_path: Union[str, Path], **_) -> Model:
if not isinstance(model_path, Path):
model_path = Path(model_path)
model = torch.jit.load(model_path.as_posix())
precision = infer_model_precision(model)
io_spec = self._io_spec
if not io_spec:
yaml_path = model_path.parent / f"{model_path.stem}.yaml"
if not yaml_path.is_file():
raise ValueError(
f"If `--tensor-names-path is not provided, "
f"TorchScript model loader expects file {yaml_path} with tensor information."
)
with yaml_path.open("r") as fh:
tensor_info = yaml.load(fh, Loader=yaml.SafeLoader)
io_spec = InputOutputSpec(tensor_info["inputs"], tensor_info["outputs"])
return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptTraceConverter(BaseConverter):
def __init__(self):
pass
def convert(self, model: Model, dataloader_fn) -> Model:
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
converted_model = torch.jit.trace_module(model.handle, {"forward": dummy_input})
io_spec = _get_io_spec(model, dataloader_fn)
return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class TorchScriptScriptConverter(BaseConverter):
def __init__(self):
pass
def convert(self, model: Model, dataloader_fn) -> Model:
converted_model = torch.jit.script(model.handle)
io_spec = _get_io_spec(model, dataloader_fn)
return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs)
class PYT2ONNXConverter(BaseConverter):
def __init__(self, onnx_opset: int = None):
self._onnx_opset = onnx_opset
def convert(self, model: Model, dataloader_fn) -> Model:
import tempfile
import onnx # pytype: disable=import-error
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted."
dynamic_axes = get_dynamic_axes(dataloader_fn())
device = get_model_device(model.handle)
dummy_input = get_sample_input(dataloader_fn(), device)
with tempfile.TemporaryDirectory() as tmpdirname:
export_path = os.path.join(tmpdirname, "model.onnx")
with torch.no_grad():
torch.onnx.export(
model.handle,
dummy_input,
export_path,
do_constant_folding=True,
input_names=list(model.inputs),
output_names=list(model.outputs),
dynamic_axes=dynamic_axes,
opset_version=self._onnx_opset,
enable_onnx_checker=True,
)
onnx_model = onnx.load(export_path)
onnx.checker.check_model(onnx_model)
onnx.helper.strip_doc_string(onnx_model)
onnx_model = onnx.shape_inference.infer_shapes(onnx_model)
return Model(
handle=onnx_model,
precision=model.precision,
inputs=model.inputs,
outputs=model.outputs,
)
class PYT2TensorRTConverter(BaseConverter):
def __init__(self, max_batch_size: int, max_workspace_size: int, onnx_opset: int, precision: str):
self._max_batch_size = max_batch_size
self._max_workspace_size = max_workspace_size
self._onnx_opset = onnx_opset
self._precision = Precision(precision)
def convert(self, model: Model, dataloader_fn) -> Model:
from .onnx import _infer_graph_precision
from .onnx2trt_conv import onnx2trt
pyt2onnx_converter = PYT2ONNXConverter(self._onnx_opset)
onnx_model = pyt2onnx_converter.convert(model, dataloader_fn).handle
precision = _infer_graph_precision(onnx_model.graph)
input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size)
cuda_engine = onnx2trt(
onnx_model,
shapes=input_shapes,
max_workspace_size=self._max_workspace_size,
max_batch_size=self._max_batch_size,
model_precision=self._precision.value,
)
return Model(
handle=cuda_engine,
precision=model.precision,
inputs=model.inputs,
outputs=model.outputs,
)
@staticmethod
def required_source_model_precision(requested_model_precision: Precision) -> Precision:
# TensorRT requires source models to be in FP32 precision
return Precision.FP32
class TorchScriptSaver(BaseSaver):
def save(self, model: Model, model_path: Union[str, Path]) -> None:
if not isinstance(model_path, Path):
model_path = Path(model_path)
if isinstance(model.handle, torch.jit.ScriptModule):
torch.jit.save(model.handle, model_path.as_posix())
else:
print("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.")
assert False # temporary error handling
def _format_tensor_spec(tensor_spec):
# wrapping shape with list and whole tensor_spec with dict() is required for correct yaml dump
tensor_spec = tensor_spec._replace(shape=list(tensor_spec.shape))
tensor_spec = dict(tensor_spec._asdict())
return tensor_spec
# store TensorSpecs from inputs and outputs in a yaml file
tensor_specs = {
"inputs": {k: _format_tensor_spec(v) for k, v in model.inputs.items()},
"outputs": {k: _format_tensor_spec(v) for k, v in model.outputs.items()},
}
yaml_path = model_path.parent / f"{model_path.stem}.yaml"
with Path(yaml_path).open("w") as fh:
yaml.dump(tensor_specs, fh, indent=4)
class PyTorchRunner(BaseRunner):
def __init__(self):
pass
def init_inference(self, model: Model):
return PyTorchRunnerSession(model=model)
class PyTorchRunnerSession(BaseRunnerSession):
def __init__(self, model: Model):
super().__init__(model)
assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance(
model.handle, torch.nn.Module
), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted."
self._model = model
self._output_names = None
def __enter__(self):
self._output_names = list(self._model.outputs)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._output_names = None
self._model = None
def __call__(self, x: Dict[str, object]):
with torch.no_grad():
feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()]
y_pred = self._model.handle(*feed_list)
if isinstance(y_pred, torch.Tensor):
y_pred = (y_pred,)
y_pred = [t.cpu().numpy() for t in y_pred]
y_pred = dict(zip(self._output_names, y_pred))
return y_pred
loaders.register_extension(Format.PYT.value, PyTorchModelLoader)
loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader)
loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader)
converters.register_extension(f"{Format.PYT.value}--{Format.TS_SCRIPT.value}", TorchScriptScriptConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.TS_TRACE.value}", TorchScriptTraceConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXConverter)
converters.register_extension(f"{Format.PYT.value}--{Format.TRT.value}", PYT2TensorRTConverter)
savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver)
savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver)
runners.register_extension(Format.PYT.value, PyTorchRunner)
runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner)
runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/triton/deployment_toolkit/bermuda/pyt.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W, _ = torch.linalg.qr(torch.randn(c, c))
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
def forward(self, z, reverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128,
n_text_channels=512, n_att_channels=80, temperature=1.0,
n_mel_convs=2, align_query_enc_type='3xconv',
use_query_proj=True):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.att_scaling_factor = np.sqrt(n_att_channels)
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1)
self.align_query_enc_type = align_query_enc_type
self.use_query_proj = bool(use_query_proj)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels,
n_text_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2,
n_att_channels,
kernel_size=1,
bias=True))
self.align_query_enc_type = align_query_enc_type
if align_query_enc_type == "inv_conv":
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
elif align_query_enc_type == "3xconv":
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels,
n_mel_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2,
n_mel_channels,
kernel_size=1,
bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels,
n_att_channels,
kernel_size=1,
bias=True))
else:
raise ValueError("Unknown query encoder type specified")
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def encode_query(self, query, query_lens):
query = query.permute(2, 0, 1) # seq_len, batch, feature dim
lens, ids = torch.sort(query_lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
query_encoded = self.run_padded_sequence(ids, original_ids, lens,
query, self.query_lstm)
query_encoded = query_encoded.permute(1, 2, 0)
return query_encoded
def forward(self, queries, keys, query_lens, mask=None, key_lens=None,
keys_encoded=None, attn_prior=None):
"""Attention mechanism for flowtron parallel
Unlike in Flowtron, we have no restrictions such as causality etc,
since we only need this during training.
Args:
queries (torch.tensor): B x C x T1 tensor
(probably going to be mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
if self.use_query_proj:
if self.align_query_enc_type == "inv_conv":
queries_enc, log_det_W = self.query_proj(queries)
elif self.align_query_enc_type == "3xconv":
queries_enc = self.query_proj(queries)
log_det_W = 0.0
else:
queries_enc, log_det_W = self.query_proj(queries)
else:
queries_enc, log_det_W = queries, 0.0
# different ways of computing attn,
# one is isotopic gaussians (per phoneme)
# Simplistic Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2
# compute log likelihood from a gaussian
attn = -0.0005 * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None]+1e-8)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2),
-float("inf"))
attn = self.softmax(attn) # Softmax along T2
return attn, attn_logprob
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/attention.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import torch
from torch import nn as nn
from common import filter_warnings
from fastpitch.model import TemporalPredictor
from fastpitch.transformer_jit import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
reps = torch.round(durations.float() / pace).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
bsz, _, hid = enc_out.size()
reps_padded = torch.cat([reps, (max_len - dec_lens)[:, None]], dim=1)
pad_vec = torch.zeros(bsz, 1, hid, dtype=enc_out.dtype,
device=enc_out.device)
enc_rep = torch.cat([enc_out, pad_vec], dim=1)
enc_rep = torch.repeat_interleave(
enc_rep.view(-1, hid), reps_padded.view(-1), dim=0
).view(bsz, -1, hid)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
class FastPitchJIT(nn.Module):
__constants__ = ['energy_conditioning']
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitchJIT, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
# skip self.attention (used only in training)
def infer(self, inputs, pace: float = 1.0,
dur_tgt: Optional[torch.Tensor] = None,
pitch_tgt: Optional[torch.Tensor] = None,
energy_tgt: Optional[torch.Tensor] = None,
speaker: int = 0):
if self.speaker_emb is None:
spk_emb = None
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, 100.0)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_pred = None
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/model_jit.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit, prange
@jit(nopython=True)
def mas(log_attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(log_attn_map)
log_attn_map = log_attn_map.copy()
log_attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(log_attn_map)
log_p[0, :] = log_attn_map[0, :]
prev_ind = np.zeros_like(log_attn_map, dtype=np.int64)
for i in range(1, log_attn_map.shape[0]):
for j in range(log_attn_map.shape[1]): # for each text dim
prev_j = np.arange(max(0, j-width), j+1)
prev_log = np.array([log_p[i-1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = log_attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = log_attn_map.shape[1]-1
for i in range(log_attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True)
def mas_width1(log_attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
neg_inf = log_attn_map.dtype.type(-np.inf)
log_p = log_attn_map.copy()
log_p[0, 1:] = neg_inf
for i in range(1, log_p.shape[0]):
prev_log1 = neg_inf
for j in range(log_p.shape[1]):
prev_log2 = log_p[i-1, j]
log_p[i, j] += max(prev_log1, prev_log2)
prev_log1 = prev_log2
# now backtrack
opt = np.zeros_like(log_p)
one = opt.dtype.type(1)
j = log_p.shape[1]-1
for i in range(log_p.shape[0]-1, 0, -1):
opt[i, j] = one
if log_p[i-1, j-1] >= log_p[i-1, j]:
j -= 1
if j == 0:
opt[1:i, j] = one
break
opt[0, j] = one
return opt
@jit(nopython=True, parallel=True)
def b_mas(b_log_attn_map, in_lens, out_lens, width=1):
assert width == 1
attn_out = np.zeros_like(b_log_attn_map)
for b in prange(b_log_attn_map.shape[0]):
out = mas_width1(b_log_attn_map[b, 0, :out_lens[b], :in_lens[b]])
attn_out[b, 0, :out_lens[b], :in_lens[b]] = out
return attn_out
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/alignment.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def pitch_transform_custom(pitch, pitch_lens):
"""Apply a custom pitch transformation to predicted pitch values.
This sample modification linearly increases the pitch throughout
the utterance from 0.5 of predicted pitch to 1.5 of predicted pitch.
In other words, it starts low and ends high.
PARAMS
------
pitch: torch.Tensor (bs, max_len)
Predicted pitch values for each lexical unit, padded to max_len (in Hz).
pitch_lens: torch.Tensor (bs, max_len)
Number of lexical units in each utterance.
RETURNS
-------
pitch: torch.Tensor
Modified pitch (in Hz).
"""
weights = torch.arange(pitch.size(1), dtype=torch.float32, device=pitch.device)
# The weights increase linearly from 0.0 to 1.0 in every i-th row
# in the range (0, pitch_lens[i])
weights = weights.unsqueeze(0) / pitch_lens.unsqueeze(1)
# Shift the range from (0.0, 1.0) to (0.5, 1.5)
weights += 0.5
return pitch * weights
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/pitch_transform.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.utils import mask_from_lens
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz: Optional[int] = None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.dropout = dropout
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask: Optional[torch.Tensor] = None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
# XXX Running TorchScript on 20.02 and 20.03 containers crashes here
# XXX Works well with 20.01-py3 container.
# XXX dirty fix is:
# XXX output = self.layer_norm(residual + attn_out).half()
output = self.layer_norm(residual + attn_out)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, mask):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
dropout, dropatt, dropemb=0.0, embed_input=True,
n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False):
super(FFTransformer, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.padding_idx = padding_idx
self.n_embed = n_embed
self.embed_input = embed_input
if embed_input:
self.word_emb = nn.Embedding(n_embed, d_embed or d_model,
padding_idx=self.padding_idx)
else:
self.word_emb = nn.Identity()
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head, d_model, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
def forward(self, dec_inp, seq_lens: Optional[torch.Tensor] = None,
conditioning: Optional[torch.Tensor] = None):
if not self.embed_input:
inp = dec_inp
assert seq_lens is not None
mask = mask_from_lens(seq_lens).unsqueeze(2)
else:
inp = self.word_emb(dec_inp)
# [bsz x L x 1]
mask = (dec_inp != self.padding_idx).unsqueeze(2)
pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
if conditioning is not None:
out = self.drop(inp + pos_emb + conditioning)
else:
out = self.drop(inp + pos_emb)
for layer in self.layers:
out = layer(out, mask=mask)
# out = self.drop(out)
return out, mask
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/transformer_jit.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import functools
import json
import re
from pathlib import Path
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from scipy import ndimage
from scipy.stats import betabinom
import common.layers as layers
from common.text.text_processing import get_text_processing
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class BetaBinomialInterpolator:
"""Interpolates alignment prior matrices to save computation.
Calculating beta-binomial priors is costly. Instead cache popular sizes
and use img interpolation to get priors faster.
"""
def __init__(self, round_mel_len_to=100, round_text_len_to=20):
self.round_mel_len_to = round_mel_len_to
self.round_text_len_to = round_text_len_to
self.bank = functools.lru_cache(beta_binomial_prior_distribution)
def round(self, val, to):
return max(1, int(np.round((val + 1) / to))) * to
def __call__(self, w, h):
bw = self.round(w, to=self.round_mel_len_to)
bh = self.round(h, to=self.round_text_len_to)
ret = ndimage.zoom(self.bank(bw, bh).T, zoom=(w / bw, h / bh), order=1)
assert ret.shape[0] == w, ret.shape
assert ret.shape[1] == h, ret.shape
return ret
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling=1.0):
P = phoneme_count
M = mel_count
x = np.arange(0, P)
mel_text_probs = []
for i in range(1, M+1):
a, b = scaling * i, scaling * (M + 1 - i)
rv = betabinom(P, a, b)
mel_i_prob = rv.pmf(x)
mel_text_probs.append(mel_i_prob)
return torch.tensor(np.array(mel_text_probs))
def estimate_pitch(wav, mel_len, method='pyin', normalize_mean=None,
normalize_std=None, n_formants=1):
if type(normalize_mean) is float or type(normalize_mean) is list:
normalize_mean = torch.tensor(normalize_mean)
if type(normalize_std) is float or type(normalize_std) is list:
normalize_std = torch.tensor(normalize_std)
if method == 'pyin':
snd, sr = librosa.load(wav)
pitch_mel, voiced_flag, voiced_probs = librosa.pyin(
snd, fmin=librosa.note_to_hz('C2'),
fmax=librosa.note_to_hz('C7'), frame_length=1024)
assert np.abs(mel_len - pitch_mel.shape[0]) <= 1.0
pitch_mel = np.where(np.isnan(pitch_mel), 0.0, pitch_mel)
pitch_mel = torch.from_numpy(pitch_mel).unsqueeze(0)
pitch_mel = F.pad(pitch_mel, (0, mel_len - pitch_mel.size(1)))
if n_formants > 1:
raise NotImplementedError
else:
raise ValueError
pitch_mel = pitch_mel.float()
if normalize_mean is not None:
assert normalize_std is not None
pitch_mel = normalize_pitch(pitch_mel, normalize_mean, normalize_std)
return pitch_mel
def normalize_pitch(pitch, mean, std):
zeros = (pitch == 0.0)
pitch -= mean[:, None]
pitch /= std[:, None]
pitch[zeros] = 0.0
return pitch
class TTSDataset(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self,
dataset_path,
audiopaths_and_text,
text_cleaners,
n_mel_channels,
symbol_set='english_basic',
p_arpabet=1.0,
n_speakers=1,
load_mel_from_disk=True,
load_pitch_from_disk=True,
pitch_mean=214.72203, # LJSpeech defaults
pitch_std=65.72038,
max_wav_value=None,
sampling_rate=None,
filter_length=None,
hop_length=None,
win_length=None,
mel_fmin=None,
mel_fmax=None,
prepend_space_to_text=False,
append_space_to_text=False,
pitch_online_dir=None,
betabinomial_online_dir=None,
use_betabinomial_interpolator=True,
pitch_online_method='pyin',
**ignored):
# Expect a list of filenames
if type(audiopaths_and_text) is str:
audiopaths_and_text = [audiopaths_and_text]
self.dataset_path = dataset_path
self.audiopaths_and_text = load_filepaths_and_text(
dataset_path, audiopaths_and_text,
has_speakers=(n_speakers > 1))
self.load_mel_from_disk = load_mel_from_disk
if not load_mel_from_disk:
self.max_wav_value = max_wav_value
self.sampling_rate = sampling_rate
self.stft = layers.TacotronSTFT(
filter_length, hop_length, win_length,
n_mel_channels, sampling_rate, mel_fmin, mel_fmax)
self.load_pitch_from_disk = load_pitch_from_disk
self.prepend_space_to_text = prepend_space_to_text
self.append_space_to_text = append_space_to_text
assert p_arpabet == 0.0 or p_arpabet == 1.0, (
'Only 0.0 and 1.0 p_arpabet is currently supported. '
'Variable probability breaks caching of betabinomial matrices.')
self.tp = get_text_processing(symbol_set, text_cleaners, p_arpabet)
self.n_speakers = n_speakers
self.pitch_tmp_dir = pitch_online_dir
self.f0_method = pitch_online_method
self.betabinomial_tmp_dir = betabinomial_online_dir
self.use_betabinomial_interpolator = use_betabinomial_interpolator
if use_betabinomial_interpolator:
self.betabinomial_interpolator = BetaBinomialInterpolator()
expected_columns = (2 + int(load_pitch_from_disk) + (n_speakers > 1))
assert not (load_pitch_from_disk and self.pitch_tmp_dir is not None)
if len(self.audiopaths_and_text[0]) < expected_columns:
raise ValueError(f'Expected {expected_columns} columns in audiopaths file. '
'The format is <mel_or_wav>|[<pitch>|]<text>[|<speaker_id>]')
if len(self.audiopaths_and_text[0]) > expected_columns:
print('WARNING: Audiopaths file has more columns than expected')
to_tensor = lambda x: torch.Tensor([x]) if type(x) is float else x
self.pitch_mean = to_tensor(pitch_mean)
self.pitch_std = to_tensor(pitch_std)
def __getitem__(self, index):
# Separate filename and text
if self.n_speakers > 1:
audiopath, *extra, text, speaker = self.audiopaths_and_text[index]
speaker = int(speaker)
else:
audiopath, *extra, text = self.audiopaths_and_text[index]
speaker = None
mel = self.get_mel(audiopath)
text = self.get_text(text)
pitch = self.get_pitch(index, mel.size(-1))
energy = torch.norm(mel.float(), dim=0, p=2)
attn_prior = self.get_prior(index, mel.shape[1], text.shape[0])
assert pitch.size(-1) == mel.size(-1)
# No higher formants?
if len(pitch.size()) == 1:
pitch = pitch[None, :]
return (text, mel, len(text), pitch, energy, speaker, attn_prior,
audiopath)
def __len__(self):
return len(self.audiopaths_and_text)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm,
requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.load(filename)
# assert melspec.size(0) == self.stft.n_mel_channels, (
# 'Mel dimension mismatch: given {}, expected {}'.format(
# melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text = self.tp.encode_text(text)
space = [self.tp.encode_text("A A")[1]]
if self.prepend_space_to_text:
text = space + text
if self.append_space_to_text:
text = text + space
return torch.LongTensor(text)
def get_prior(self, index, mel_len, text_len):
if self.use_betabinomial_interpolator:
return torch.from_numpy(self.betabinomial_interpolator(mel_len,
text_len))
if self.betabinomial_tmp_dir is not None:
audiopath, *_ = self.audiopaths_and_text[index]
fname = Path(audiopath).relative_to(self.dataset_path)
fname = fname.with_suffix('.pt')
cached_fpath = Path(self.betabinomial_tmp_dir, fname)
if cached_fpath.is_file():
return torch.load(cached_fpath)
attn_prior = beta_binomial_prior_distribution(text_len, mel_len)
if self.betabinomial_tmp_dir is not None:
cached_fpath.parent.mkdir(parents=True, exist_ok=True)
torch.save(attn_prior, cached_fpath)
return attn_prior
def get_pitch(self, index, mel_len=None):
audiopath, *fields = self.audiopaths_and_text[index]
if self.n_speakers > 1:
spk = int(fields[-1])
else:
spk = 0
if self.load_pitch_from_disk:
pitchpath = fields[0]
pitch = torch.load(pitchpath)
if self.pitch_mean is not None:
assert self.pitch_std is not None
pitch = normalize_pitch(pitch, self.pitch_mean, self.pitch_std)
return pitch
if self.pitch_tmp_dir is not None:
fname = Path(audiopath).relative_to(self.dataset_path)
fname_method = fname.with_suffix('.pt')
cached_fpath = Path(self.pitch_tmp_dir, fname_method)
if cached_fpath.is_file():
return torch.load(cached_fpath)
# No luck so far - calculate
wav = audiopath
if not wav.endswith('.wav'):
wav = re.sub('/mels/', '/wavs/', wav)
wav = re.sub('.pt$', '.wav', wav)
pitch_mel = estimate_pitch(wav, mel_len, self.f0_method,
self.pitch_mean, self.pitch_std)
if self.pitch_tmp_dir is not None and not cached_fpath.is_file():
cached_fpath.parent.mkdir(parents=True, exist_ok=True)
torch.save(pitch_mel, cached_fpath)
return pitch_mel
def ensure_disjoint(*tts_datasets):
paths = [set(list(zip(*d.audiopaths_and_text))[0]) for d in tts_datasets]
assert sum(len(p) for p in paths) == len(set().union(*paths)), (
"Your datasets (train, val) are not disjoint. "
"Review filelists and restart training."
)
class TTSCollate:
"""Zero-pads model inputs and targets based on number of frames per step"""
def __call__(self, batch):
"""Collate training batch from normalized text and mel-spec"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
# Include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
output_lengths[i] = mel.size(1)
n_formants = batch[0][3].shape[0]
pitch_padded = torch.zeros(mel_padded.size(0), n_formants,
mel_padded.size(2), dtype=batch[0][3].dtype)
energy_padded = torch.zeros_like(pitch_padded[:, 0, :])
for i in range(len(ids_sorted_decreasing)):
pitch = batch[ids_sorted_decreasing[i]][3]
energy = batch[ids_sorted_decreasing[i]][4]
pitch_padded[i, :, :pitch.shape[1]] = pitch
energy_padded[i, :energy.shape[0]] = energy
if batch[0][5] is not None:
speaker = torch.zeros_like(input_lengths)
for i in range(len(ids_sorted_decreasing)):
speaker[i] = batch[ids_sorted_decreasing[i]][5]
else:
speaker = None
attn_prior_padded = torch.zeros(len(batch), max_target_len,
max_input_len)
attn_prior_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
prior = batch[ids_sorted_decreasing[i]][6]
attn_prior_padded[i, :prior.size(0), :prior.size(1)] = prior
# Count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
audiopaths = [batch[i][7] for i in ids_sorted_decreasing]
return (text_padded, input_lengths, mel_padded, output_lengths, len_x,
pitch_padded, energy_padded, speaker, attn_prior_padded,
audiopaths)
def batch_to_gpu(batch):
(text_padded, input_lengths, mel_padded, output_lengths, len_x,
pitch_padded, energy_padded, speaker, attn_prior, audiopaths) = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
mel_padded = to_gpu(mel_padded).float()
output_lengths = to_gpu(output_lengths).long()
pitch_padded = to_gpu(pitch_padded).float()
energy_padded = to_gpu(energy_padded).float()
attn_prior = to_gpu(attn_prior).float()
if speaker is not None:
speaker = to_gpu(speaker).long()
# Alignments act as both inputs and targets - pass shallow copies
x = [text_padded, input_lengths, mel_padded, output_lengths,
pitch_padded, energy_padded, speaker, attn_prior, audiopaths]
y = [mel_padded, input_lengths, output_lengths]
len_x = torch.sum(output_lengths)
return (x, y, len_x)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/data_function.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from common import filter_warnings
from common.layers import ConvReLUNorm
from common.utils import mask_from_lens
from fastpitch.alignment import b_mas, mas_width1
from fastpitch.attention import ConvAttention
from fastpitch.transformer import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
dtype = enc_out.dtype
reps = durations.float() / pace
reps = (reps + 0.5).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
reps_cumsum = torch.cumsum(F.pad(reps, (1, 0, 0, 0), value=0.0),
dim=1)[:, None, :]
reps_cumsum = reps_cumsum.to(dtype)
range_ = torch.arange(max_len, device=enc_out.device)[None, :, None]
mult = ((reps_cumsum[:, :, :-1] <= range_) &
(reps_cumsum[:, :, 1:] > range_))
mult = mult.to(dtype)
enc_rep = torch.matmul(mult, enc_out)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
def average_pitch(pitch, durs):
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = F.pad(durs_cums_ends[:, :-1], (1, 0))
pitch_nonzero_cums = F.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0))
pitch_cums = F.pad(torch.cumsum(pitch, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = pitch.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
pitch_sums = (torch.gather(pitch_cums, 2, dce)
- torch.gather(pitch_cums, 2, dcs)).float()
pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce)
- torch.gather(pitch_nonzero_cums, 2, dcs)).float()
pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems,
pitch_sums / pitch_nelems)
return pitch_avg
class TemporalPredictor(nn.Module):
"""Predicts a single float per each temporal location"""
def __init__(self, input_size, filter_size, kernel_size, dropout,
n_layers=2, n_predictions=1):
super(TemporalPredictor, self).__init__()
self.layers = nn.Sequential(*[
ConvReLUNorm(input_size if i == 0 else filter_size, filter_size,
kernel_size=kernel_size, dropout=dropout)
for i in range(n_layers)]
)
self.n_predictions = n_predictions
self.fc = nn.Linear(filter_size, self.n_predictions, bias=True)
def forward(self, enc_out, enc_out_mask):
out = enc_out * enc_out_mask
out = self.layers(out.transpose(1, 2)).transpose(1, 2)
out = self.fc(out) * enc_out_mask
return out
class FastPitch(nn.Module):
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitch, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
self.attention = ConvAttention(
n_mel_channels, 0, symbols_embedding_dim,
use_query_proj=True, align_query_enc_type='3xconv')
def binarize_attention(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_out_cpu = np.zeros(attn.data.shape, dtype=np.float32)
log_attn_cpu = torch.log(attn.data).to(device='cpu', dtype=torch.float32)
log_attn_cpu = log_attn_cpu.numpy()
out_lens_cpu = out_lens.cpu()
in_lens_cpu = in_lens.cpu()
for ind in range(b_size):
hard_attn = mas_width1(
log_attn_cpu[ind, 0, :out_lens_cpu[ind], :in_lens_cpu[ind]])
attn_out_cpu[ind, 0, :out_lens_cpu[ind], :in_lens_cpu[ind]] = hard_attn
attn_out = torch.tensor(
attn_out_cpu, device=attn.get_device(), dtype=attn.dtype)
return attn_out
def binarize_attention_parallel(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
with torch.no_grad():
log_attn_cpu = torch.log(attn.data).cpu().numpy()
attn_out = b_mas(log_attn_cpu, in_lens.cpu().numpy(),
out_lens.cpu().numpy(), width=1)
return torch.from_numpy(attn_out).to(attn.get_device())
def forward(self, inputs, use_gt_pitch=True, pace=1.0, max_duration=75):
(inputs, input_lens, mel_tgt, mel_lens, pitch_dense, energy_dense,
speaker, attn_prior, audiopaths) = inputs
text_max_len = inputs.size(1)
mel_max_len = mel_tgt.size(2)
# Calculate speaker embedding
if self.speaker_emb is None:
spk_emb = 0
else:
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Predict pitch
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
# Alignment
text_emb = self.encoder.word_emb(inputs)
# make sure to do the alignments before folding
attn_mask = mask_from_lens(input_lens, max_len=text_max_len)
attn_mask = attn_mask[..., None] == 0
# attn_mask should be 1 for unused timesteps in the text_enc_w_spkvec tensor
attn_soft, attn_logprob = self.attention(
mel_tgt, text_emb.permute(0, 2, 1), mel_lens, attn_mask,
key_lens=input_lens, keys_encoded=enc_out, attn_prior=attn_prior)
attn_hard = self.binarize_attention(attn_soft, input_lens, mel_lens)
# Viterbi --> durations
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
dur_tgt = attn_hard_dur
assert torch.all(torch.eq(dur_tgt.sum(dim=1), mel_lens))
# Average pitch over characters
pitch_tgt = average_pitch(pitch_dense, dur_tgt)
if use_gt_pitch and pitch_tgt is not None:
pitch_emb = self.pitch_emb(pitch_tgt)
else:
pitch_emb = self.pitch_emb(pitch_pred)
enc_out = enc_out + pitch_emb.transpose(1, 2)
# Predict energy
if self.energy_conditioning:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
# Average energy over characters
energy_tgt = average_pitch(energy_dense.unsqueeze(1), dur_tgt)
energy_tgt = torch.log(1.0 + energy_tgt)
energy_emb = self.energy_emb(energy_tgt)
energy_tgt = energy_tgt.squeeze(1)
enc_out = enc_out + energy_emb.transpose(1, 2)
else:
energy_pred = None
energy_tgt = None
len_regulated, dec_lens = regulate_len(
dur_tgt, enc_out, pace, mel_max_len)
# Output FFT
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
return (mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred,
pitch_tgt, energy_pred, energy_tgt, attn_soft, attn_hard,
attn_hard_dur, attn_logprob)
def infer(self, inputs, pace=1.0, dur_tgt=None, pitch_tgt=None,
energy_tgt=None, pitch_transform=None, max_duration=75,
speaker=0):
if self.speaker_emb is None:
spk_emb = 0
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_transform is not None:
if self.pitch_std[0] == 0.0:
# XXX LJSpeech-1.1 defaults
mean, std = 218.14, 67.24
else:
mean, std = self.pitch_mean[0], self.pitch_std[0]
pitch_pred = pitch_transform(pitch_pred, enc_mask.sum(dim=(1,2)),
mean, std)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/model.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from torch import nn
from common.utils import mask_from_lens
from fastpitch.attn_loss_function import AttentionCTCLoss
class FastPitchLoss(nn.Module):
def __init__(self, dur_predictor_loss_scale=1.0,
pitch_predictor_loss_scale=1.0, attn_loss_scale=1.0,
energy_predictor_loss_scale=0.1):
super(FastPitchLoss, self).__init__()
self.dur_predictor_loss_scale = dur_predictor_loss_scale
self.pitch_predictor_loss_scale = pitch_predictor_loss_scale
self.energy_predictor_loss_scale = energy_predictor_loss_scale
self.attn_loss_scale = attn_loss_scale
self.attn_ctc_loss = AttentionCTCLoss()
def forward(self, model_out, targets, is_training=True, meta_agg='mean'):
(mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred, pitch_tgt,
energy_pred, energy_tgt, attn_soft, attn_hard, attn_dur,
attn_logprob) = model_out
(mel_tgt, in_lens, out_lens) = targets
dur_tgt = attn_dur
dur_lens = in_lens
mel_tgt.requires_grad = False
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
dur_mask = mask_from_lens(dur_lens, max_len=dur_tgt.size(1))
log_dur_tgt = torch.log(dur_tgt.float() + 1)
loss_fn = F.mse_loss
dur_pred_loss = loss_fn(log_dur_pred, log_dur_tgt, reduction='none')
dur_pred_loss = (dur_pred_loss * dur_mask).sum() / dur_mask.sum()
ldiff = mel_tgt.size(1) - mel_out.size(1)
mel_out = F.pad(mel_out, (0, 0, 0, ldiff, 0, 0), value=0.0)
mel_mask = mel_tgt.ne(0).float()
loss_fn = F.mse_loss
mel_loss = loss_fn(mel_out, mel_tgt, reduction='none')
mel_loss = (mel_loss * mel_mask).sum() / mel_mask.sum()
ldiff = pitch_tgt.size(2) - pitch_pred.size(2)
pitch_pred = F.pad(pitch_pred, (0, ldiff, 0, 0, 0, 0), value=0.0)
pitch_loss = F.mse_loss(pitch_tgt, pitch_pred, reduction='none')
pitch_loss = (pitch_loss * dur_mask.unsqueeze(1)).sum() / dur_mask.sum()
if energy_pred is not None:
energy_pred = F.pad(energy_pred, (0, ldiff, 0, 0), value=0.0)
energy_loss = F.mse_loss(energy_tgt, energy_pred, reduction='none')
energy_loss = (energy_loss * dur_mask).sum() / dur_mask.sum()
else:
energy_loss = 0
# Attention loss
attn_loss = self.attn_ctc_loss(attn_logprob, in_lens, out_lens)
loss = (mel_loss
+ dur_pred_loss * self.dur_predictor_loss_scale
+ pitch_loss * self.pitch_predictor_loss_scale
+ energy_loss * self.energy_predictor_loss_scale
+ attn_loss * self.attn_loss_scale)
meta = {
'loss': loss.clone().detach(),
'mel_loss': mel_loss.clone().detach(),
'duration_predictor_loss': dur_pred_loss.clone().detach(),
'pitch_loss': pitch_loss.clone().detach(),
'attn_loss': attn_loss.clone().detach(),
'dur_error': (torch.abs(dur_pred - dur_tgt).sum()
/ dur_mask.sum()).detach(),
}
if energy_pred is not None:
meta['energy_loss'] = energy_loss.clone().detach()
assert meta_agg in ('sum', 'mean')
if meta_agg == 'sum':
bsz = mel_out.size(0)
meta = {k: v * bsz for k, v in meta.items()}
return loss, meta
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/loss_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def parse_fastpitch_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
io = parser.add_argument_group('io parameters')
io.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
io.add_argument('--max-seq-len', default=2048, type=int,
help='')
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=148, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--padding-idx', default=0, type=int,
help='Index of padding symbol in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=384, type=int,
help='Input embedding dimension')
in_fft = parser.add_argument_group('input FFT parameters')
in_fft.add_argument('--in-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
in_fft.add_argument('--in-fft-n-heads', default=1, type=int,
help='Number of attention heads')
in_fft.add_argument('--in-fft-d-head', default=64, type=int,
help='Dim of attention heads')
in_fft.add_argument('--in-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
in_fft.add_argument('--in-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
in_fft.add_argument('--in-fft-output-size', default=384, type=int,
help='Output dim')
in_fft.add_argument('--p-in-fft-dropout', default=0.1, type=float,
help='Dropout probability')
in_fft.add_argument('--p-in-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
in_fft.add_argument('--p-in-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
out_fft = parser.add_argument_group('output FFT parameters')
out_fft.add_argument('--out-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
out_fft.add_argument('--out-fft-n-heads', default=1, type=int,
help='Number of attention heads')
out_fft.add_argument('--out-fft-d-head', default=64, type=int,
help='Dim of attention head')
out_fft.add_argument('--out-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
out_fft.add_argument('--out-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
out_fft.add_argument('--out-fft-output-size', default=384, type=int,
help='Output dim')
out_fft.add_argument('--p-out-fft-dropout', default=0.1, type=float,
help='Dropout probability for out_fft')
out_fft.add_argument('--p-out-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
out_fft.add_argument('--p-out-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
dur_pred = parser.add_argument_group('duration predictor parameters')
dur_pred.add_argument('--dur-predictor-kernel-size', default=3, type=int,
help='Duration predictor conv-1D kernel size')
dur_pred.add_argument('--dur-predictor-filter-size', default=256, type=int,
help='Duration predictor conv-1D filter size')
dur_pred.add_argument('--p-dur-predictor-dropout', default=0.1, type=float,
help='Dropout probability for duration predictor')
dur_pred.add_argument('--dur-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
pitch_pred = parser.add_argument_group('pitch predictor parameters')
pitch_pred.add_argument('--pitch-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
pitch_pred.add_argument('--pitch-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
pitch_pred.add_argument('--p-pitch-predictor-dropout', default=0.1, type=float,
help='Pitch probability for pitch predictor')
pitch_pred.add_argument('--pitch-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
energy_pred = parser.add_argument_group('energy predictor parameters')
energy_pred.add_argument('--energy-conditioning', action='store_true')
energy_pred.add_argument('--energy-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
energy_pred.add_argument('--energy-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
energy_pred.add_argument('--p-energy-predictor-dropout', default=0.1, type=float,
help='Pitch probability for energy predictor')
energy_pred.add_argument('--energy-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
cond = parser.add_argument_group('conditioning parameters')
cond.add_argument('--pitch-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--energy-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--speaker-emb-weight', type=float, default=1.0,
help='Scale speaker embedding')
return parser
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/arg_parser.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.utils import mask_from_lens
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1),
torch.unsqueeze(self.inv_freq, 0))
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
return self._forward(inp)
def _forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out).to(inp.dtype))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out).to(inp.dtype)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(2, 0, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
output = self.layer_norm(residual + attn_out)
output = output.to(attn_out.dtype)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, mask=None):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
dropout, dropatt, dropemb=0.0, embed_input=True,
n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False):
super(FFTransformer, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.padding_idx = padding_idx
if embed_input:
self.word_emb = nn.Embedding(n_embed, d_embed or d_model,
padding_idx=self.padding_idx)
else:
self.word_emb = None
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head, d_model, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
def forward(self, dec_inp, seq_lens=None, conditioning=0):
if self.word_emb is None:
inp = dec_inp
mask = mask_from_lens(seq_lens).unsqueeze(2)
else:
inp = self.word_emb(dec_inp)
# [bsz x L x 1]
mask = (dec_inp != self.padding_idx).unsqueeze(2)
pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
out = self.drop(inp + pos_emb + conditioning)
for layer in self.layers:
out = layer(out, mask=mask)
# out = self.drop(out)
return out, mask
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/transformer.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionCTCLoss(torch.nn.Module):
def __init__(self, blank_logprob=-1):
super(AttentionCTCLoss, self).__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=-1)
self.blank_logprob = blank_logprob
self.CTCLoss = nn.CTCLoss(zero_infinity=True)
def forward(self, attn_logprob, in_lens, out_lens):
key_lens = in_lens
query_lens = out_lens
max_key_len = attn_logprob.size(-1)
# Reorder input to [query_len, batch_size, key_len]
attn_logprob = attn_logprob.squeeze(1)
attn_logprob = attn_logprob.permute(1, 0, 2)
# Add blank label
attn_logprob = F.pad(
input=attn_logprob,
pad=(1, 0, 0, 0, 0, 0),
value=self.blank_logprob)
# Convert to log probabilities
# Note: Mask out probs beyond key_len
key_inds = torch.arange(
max_key_len+1,
device=attn_logprob.device,
dtype=torch.long)
attn_logprob.masked_fill_(
key_inds.view(1,1,-1) > key_lens.view(1,-1,1), # key_inds >= key_lens+1
-float("inf"))
attn_logprob = self.log_softmax(attn_logprob)
# Target sequences
target_seqs = key_inds[1:].unsqueeze(0)
target_seqs = target_seqs.repeat(key_lens.numel(), 1)
# Evaluate CTC loss
cost = self.CTCLoss(
attn_logprob, target_seqs,
input_lengths=query_lens, target_lengths=key_lens)
return cost
class AttentionBinarizationLoss(torch.nn.Module):
def __init__(self):
super(AttentionBinarizationLoss, self).__init__()
def forward(self, hard_attention, soft_attention, eps=1e-12):
log_sum = torch.log(torch.clamp(soft_attention[hard_attention == 1],
min=eps)).sum()
return -log_sum / hard_attention.sum()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/fastpitch/attn_loss_function.py |
import atexit
import glob
import re
from itertools import product
from pathlib import Path
import dllogger
import torch
import numpy as np
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from torch.utils.tensorboard import SummaryWriter
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=Path(log_dir, name), flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('_', '✕'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>3}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
format = f'{{{metadata.get("format", "")}}}'
fields = [name, format.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def init(log_fpath, log_dir, enabled=True, tb_subsets=[], **tb_kw):
if enabled:
backends = [
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
]
else:
backends = []
dllogger.init(backends=backends)
dllogger.metadata("train_lrate", {"name": "lrate", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('val', ' avg val '), ('val_ema', ' EMA val ')]:
dllogger.metadata(f"{id_}_loss",
{"name": f"{pref}loss", "unit": None, "format": ":>5.2f"})
dllogger.metadata(f"{id_}_mel_loss",
{"name": f"{pref}mel loss", "unit": None, "format": ":>5.2f"})
dllogger.metadata(f"{id_}_kl_loss",
{"name": f"{pref}kl loss", "unit": None, "format": ":>5.5f"})
dllogger.metadata(f"{id_}_kl_weight",
{"name": f"{pref}kl weight", "unit": None, "format": ":>5.5f"})
dllogger.metadata(f"{id_}_frames/s",
{"name": None, "unit": "frames/s", "format": ":>10.2f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>3.2f"})
global tb_loggers
tb_loggers = {s: TBLogger(enabled, log_dir, name=s, **tb_kw)
for s in tb_subsets}
def init_inference_metadata(batch_size=None):
modalities = [('latency', 's', ':>10.5f'), ('RTF', 'x', ':>10.2f'),
('frames/s', 'frames/s', ':>10.2f'), ('samples/s', 'samples/s', ':>10.2f'),
('letters/s', 'letters/s', ':>10.2f'), ('tokens/s', 'tokens/s', ':>10.2f')]
if batch_size is not None:
modalities.append((f'RTF@{batch_size}', 'x', ':>10.2f'))
percs = ['', 'avg', '90%', '95%', '99%']
models = ['', 'fastpitch', 'waveglow', 'hifigan']
for perc, model, (mod, unit, fmt) in product(percs, models, modalities):
name = f'{perc} {model} {mod}'.strip().replace(' ', ' ')
dllogger.metadata(name.replace(' ', '_'),
{'name': f'{name: <26}', 'unit': unit, 'format': fmt})
def log(step, tb_total_steps=None, data={}, subset='train'):
if tb_total_steps is not None:
tb_loggers[subset].log(tb_total_steps, data)
if subset != '':
data = {f'{subset}_{key}': v for key, v in data.items()}
dllogger.log(step, data=data)
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def parameters(data, verbosity=0, tb_subset=None):
for k, v in data.items():
dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k: v for k, v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/tb_dllogger.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data pipeline elements which wrap the data N times
A RepeatedDataLoader resets its iterator less frequently. This saves time
on multi-GPU platforms and is invisible to the training loop.
NOTE: Repeating puts a block of (len(dataset) * repeats) int64s into RAM.
Do not use more repeats than necessary (e.g., 10**6 to simulate infinity).
"""
import itertools
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
class RepeatedDataLoader(DataLoader):
def __init__(self, repeats, *args, **kwargs):
self.repeats = repeats
super().__init__(*args, **kwargs)
def __iter__(self):
if self._iterator is None or self.repeats_done >= self.repeats:
self.repeats_done = 1
return super().__iter__()
else:
self.repeats_done += 1
return self._iterator
class RepeatedDistributedSampler(DistributedSampler):
def __init__(self, repeats, *args, **kwargs):
self.repeats = repeats
assert self.repeats <= 10000, "Too many repeats overload RAM."
super().__init__(*args, **kwargs)
def __iter__(self):
# Draw indices for `self.repeats` epochs forward
start_epoch = self.epoch
iters = []
for r in range(self.repeats):
self.set_epoch(start_epoch + r)
iters.append(super().__iter__())
self.set_epoch(start_epoch)
return itertools.chain.from_iterable(iters)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/repeated_dataloader.py |
import os
import shutil
from collections import defaultdict
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DefaultAttrDict(defaultdict):
def __init__(self, *args, **kwargs):
super(DefaultAttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, item):
return self[item]
def build_env(config, config_name, path):
t_path = os.path.join(path, config_name)
if config != t_path:
os.makedirs(path, exist_ok=True)
shutil.copyfile(config, os.path.join(path, config_name))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/env.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/gpu_affinity.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutes known and unrelated PyTorch warnings.
The warnings module keeps a list of filters. Importing it as late as possible
prevents its filters from being overriden.
"""
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# 22.08-py3 container
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/filter_warnings.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import librosa.util as librosa_util
import numpy as np
import torch
from scipy.signal import get_window
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, size=n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/audio_processing.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].copy())
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, size=filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
with torch.no_grad():
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase, self.inverse_basis,
stride=self.hop_length, padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/stft.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# init_weights, get_padding, AttrDict
import ctypes
import glob
import os
import re
import shutil
import warnings
from collections import defaultdict, OrderedDict
from pathlib import Path
from typing import Optional
import librosa
import numpy as np
import torch
import torch.distributed as dist
from scipy.io.wavfile import read
def mask_from_lens(lens, max_len: Optional[int] = None):
if max_len is None:
max_len = lens.max()
ids = torch.arange(0, max_len, device=lens.device, dtype=lens.dtype)
mask = torch.lt(ids, lens.unsqueeze(1))
return mask
def load_wav(full_path, torch_tensor=False):
import soundfile # flac
data, sampling_rate = soundfile.read(full_path, dtype='int16')
if torch_tensor:
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
else:
return data, sampling_rate
def load_wav_to_torch(full_path, force_sampling_rate=None):
if force_sampling_rate is not None:
data, sampling_rate = librosa.load(full_path, sr=force_sampling_rate)
else:
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(dataset_path, fnames, has_speakers=False, split="|"):
def split_line(root, line):
parts = line.strip().split(split)
if has_speakers:
paths, non_paths = parts[:-2], parts[-2:]
else:
paths, non_paths = parts[:-1], parts[-1:]
return tuple(str(Path(root, p)) for p in paths) + tuple(non_paths)
fpaths_and_text = []
for fname in fnames:
with open(fname, encoding='utf-8') as f:
fpaths_and_text += [split_line(dataset_path, line) for line in f]
return fpaths_and_text
def to_gpu(x):
x = x.contiguous()
return x.cuda(non_blocking=True) if torch.cuda.is_available() else x
def l2_promote():
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def prepare_tmp(path):
if path is None:
return
p = Path(path)
if p.is_dir():
warnings.warn(f'{p} exists. Removing...')
shutil.rmtree(p, ignore_errors=True)
p.mkdir(parents=False, exist_ok=False)
def print_once(*msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*msg)
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def load_pretrained_weights(model, ckpt_fpath):
model = getattr(model, "module", model)
weights = torch.load(ckpt_fpath, map_location="cpu")["state_dict"]
weights = {re.sub("^module.", "", k): v for k, v in weights.items()}
ckpt_emb = weights["encoder.word_emb.weight"]
new_emb = model.state_dict()["encoder.word_emb.weight"]
ckpt_vocab_size = ckpt_emb.size(0)
new_vocab_size = new_emb.size(0)
if ckpt_vocab_size != new_vocab_size:
print("WARNING: Resuming from a checkpoint with a different size "
"of embedding table. For best results, extend the vocab "
"and ensure the common symbols' indices match.")
min_len = min(ckpt_vocab_size, new_vocab_size)
weights["encoder.word_emb.weight"] = ckpt_emb if ckpt_vocab_size > new_vocab_size else new_emb
weights["encoder.word_emb.weight"][:min_len] = ckpt_emb[:min_len]
model.load_state_dict(weights)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DefaultAttrDict(defaultdict):
def __init__(self, *args, **kwargs):
super(DefaultAttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, item):
return self[item]
class BenchmarkStats:
""" Tracks statistics used for benchmarking. """
def __init__(self):
self.num_frames = []
self.losses = []
self.mel_losses = []
self.took = []
def update(self, num_frames, losses, mel_losses, took):
self.num_frames.append(num_frames)
self.losses.append(losses)
self.mel_losses.append(mel_losses)
self.took.append(took)
def get(self, n_epochs):
frames_s = sum(self.num_frames[-n_epochs:]) / sum(self.took[-n_epochs:])
return {'frames/s': frames_s,
'loss': np.mean(self.losses[-n_epochs:]),
'mel_loss': np.mean(self.mel_losses[-n_epochs:]),
'took': np.mean(self.took[-n_epochs:]),
'benchmark_epochs_num': n_epochs}
def __len__(self):
return len(self.losses)
class Checkpointer:
def __init__(self, save_dir, keep_milestones=[]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
find = lambda name: [
(int(re.search("_(\d+).pt", fn).group(1)), fn)
for fn in glob.glob(f"{save_dir}/{name}_checkpoint_*.pt")]
tracked = sorted(find("FastPitch"), key=lambda t: t[0])
self.tracked = OrderedDict(tracked)
def last_checkpoint(self, output):
def corrupted(fpath):
try:
torch.load(fpath, map_location="cpu")
return False
except:
warnings.warn(f"Cannot load {fpath}")
return True
saved = sorted(
glob.glob(f"{output}/FastPitch_checkpoint_*.pt"),
key=lambda f: int(re.search("_(\d+).pt", f).group(1)))
if len(saved) >= 1 and not corrupted(saved[-1]):
return saved[-1]
elif len(saved) >= 2:
return saved[-2]
else:
return None
def maybe_load(self, model, optimizer, scaler, train_state, args,
ema_model=None):
assert args.checkpoint_path is None or args.resume is False, (
"Specify a single checkpoint source")
fpath = None
if args.checkpoint_path is not None:
fpath = args.checkpoint_path
self.tracked = OrderedDict() # Do not track/delete prev ckpts
elif args.resume:
fpath = self.last_checkpoint(args.output)
if fpath is None:
return
print_once(f"Loading model and optimizer state from {fpath}")
ckpt = torch.load(fpath, map_location="cpu")
train_state["epoch"] = ckpt["epoch"] + 1
train_state["total_iter"] = ckpt["iteration"]
no_pref = lambda sd: {re.sub("^module.", "", k): v for k, v in sd.items()}
unwrap = lambda m: getattr(m, "module", m)
unwrap(model).load_state_dict(no_pref(ckpt["state_dict"]))
if ema_model is not None:
unwrap(ema_model).load_state_dict(no_pref(ckpt["ema_state_dict"]))
optimizer.load_state_dict(ckpt["optimizer"])
if "scaler" in ckpt:
scaler.load_state_dict(ckpt["scaler"])
else:
warnings.warn("AMP scaler state missing from the checkpoint.")
def maybe_save(self, args, model, ema_model, optimizer, scaler, epoch,
total_iter, config):
intermediate = (args.epochs_per_checkpoint > 0
and epoch % args.epochs_per_checkpoint == 0)
final = epoch == args.epochs
if not intermediate and not final and epoch not in self.keep_milestones:
return
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
unwrap = lambda m: getattr(m, "module", m)
ckpt = {"epoch": epoch,
"iteration": total_iter,
"config": config,
"train_setup": args.__dict__,
"state_dict": unwrap(model).state_dict(),
"optimizer": optimizer.state_dict(),
"scaler": scaler.state_dict()}
if ema_model is not None:
ckpt["ema_state_dict"] = unwrap(ema_model).state_dict()
fpath = Path(args.output, f"FastPitch_checkpoint_{epoch}.pt")
print(f"Saving model and optimizer state at epoch {epoch} to {fpath}")
torch.save(ckpt, fpath)
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = fpath
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch])
except:
pass
del self.tracked[epoch]
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/utils.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from librosa.filters import mel as librosa_mel_fn
from common.audio_processing import (dynamic_range_compression,
dynamic_range_decompression)
from common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear',
batch_norm=False):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
self.norm = torch.nn.BatchNorm1D(out_channels) if batch_norm else None
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.norm is None:
return self.conv(signal)
else:
return self.norm(self.conv(signal))
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size,
padding=(kernel_size // 2))
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = F.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2).to(signal.dtype)
return self.dropout(out)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/layers.py |
##############################################################################
# Foreing utils.py from HiFi-GAN
##############################################################################
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/utils_hfg.py |
import re
_letters_and_numbers_re = re.compile(
r"((?:[a-zA-Z]+[0-9]|[0-9]+[a-zA-Z])[a-zA-Z0-9']*)", re.IGNORECASE)
_hardware_re = re.compile(
'([0-9]+(?:[.,][0-9]+)?)(?:\s?)(tb|gb|mb|kb|ghz|mhz|khz|hz|mm)', re.IGNORECASE)
_hardware_key = {'tb': 'terabyte',
'gb': 'gigabyte',
'mb': 'megabyte',
'kb': 'kilobyte',
'ghz': 'gigahertz',
'mhz': 'megahertz',
'khz': 'kilohertz',
'hz': 'hertz',
'mm': 'millimeter',
'cm': 'centimeter',
'km': 'kilometer'}
_dimension_re = re.compile(
r'\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b|\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b')
_dimension_key = {'m': 'meter',
'in': 'inch',
'inch': 'inch'}
def _expand_letters_and_numbers(m):
text = re.split(r'(\d+)', m.group(0))
# remove trailing space
if text[-1] == '':
text = text[:-1]
elif text[0] == '':
text = text[1:]
# if not like 1920s, or AK47's , 20th, 1st, 2nd, 3rd, etc...
if text[-1] in ("'s", "s", "th", "nd", "st", "rd") and text[-2].isdigit():
text[-2] = text[-2] + text[-1]
text = text[:-1]
# for combining digits 2 by 2
new_text = []
for i in range(len(text)):
string = text[i]
if string.isdigit() and len(string) < 5:
# heuristics
if len(string) > 2 and string[-2] == '0':
if string[-1] == '0':
string = [string]
else:
string = [string[:-2], string[-2], string[-1]]
elif len(string) % 2 == 0:
string = [string[i:i+2] for i in range(0, len(string), 2)]
elif len(string) > 2:
string = [string[0]] + [string[i:i+2] for i in range(1, len(string), 2)]
new_text.extend(string)
else:
new_text.append(string)
text = new_text
text = " ".join(text)
return text
def _expand_hardware(m):
quantity, measure = m.groups(0)
measure = _hardware_key[measure.lower()]
if measure[-1] != 'z' and float(quantity.replace(',', '')) > 1:
return "{} {}s".format(quantity, measure)
return "{} {}".format(quantity, measure)
def _expand_dimension(m):
text = "".join([x for x in m.groups(0) if x != 0])
text = text.replace(' x ', ' by ')
text = text.replace('x', ' by ')
if text.endswith(tuple(_dimension_key.keys())):
if text[-2].isdigit():
text = "{} {}".format(text[:-1], _dimension_key[text[-1:]])
elif text[-3].isdigit():
text = "{} {}".format(text[:-2], _dimension_key[text[-2:]])
return text
def normalize_letters_and_numbers(text):
text = re.sub(_hardware_re, _expand_hardware, text)
text = re.sub(_dimension_re, _expand_dimension, text)
text = re.sub(_letters_and_numbers_re, _expand_letters_and_numbers, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/letters_and_numbers.py |
""" from https://github.com/keithito/tacotron """
import re
import sys
import urllib.request
from pathlib import Path
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path=None, heteronyms_path=None, keep_ambiguous=True):
self._entries = {}
self.heteronyms = []
if file_or_path is not None:
self.initialize(file_or_path, heteronyms_path, keep_ambiguous)
def initialize(self, file_or_path, heteronyms_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
if not Path(file_or_path).exists():
print("CMUdict missing. Downloading to data/cmudict/.")
self.download()
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
if heteronyms_path is not None:
with open(heteronyms_path, encoding='utf-8') as f:
self.heteronyms = [l.rstrip() for l in f]
def __len__(self):
if len(self._entries) == 0:
raise ValueError("CMUDict not initialized")
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
if len(self._entries) == 0:
raise ValueError("CMUDict not initialized")
return self._entries.get(word.upper())
def download(self):
url = 'https://github.com/Alexir/CMUdict/raw/master/cmudict-0.7b'
try:
Path('cmudict').mkdir(parents=False, exist_ok=True)
urllib.request.urlretrieve(url, filename='cmudict/cmudict-0.7b')
except:
print("Automatic download of CMUdict failed. Try manually with:")
print()
print(" bash scripts/download_cmudict.sh")
print()
print("and re-run the script.")
sys.exit(0)
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/cmudict.py |
import re
_no_period_re = re.compile(r'(No[.])(?=[ ]?[0-9])')
_percent_re = re.compile(r'([ ]?[%])')
_half_re = re.compile('([0-9]½)|(½)')
_url_re = re.compile(r'([a-zA-Z])\.(com|gov|org)')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('ms', 'miss'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
('sen', 'senator'),
('etc', 'et cetera'),
]]
def _expand_no_period(m):
word = m.group(0)
if word[0] == 'N':
return 'Number'
return 'number'
def _expand_percent(m):
return ' percent'
def _expand_half(m):
word = m.group(1)
if word is None:
return 'half'
return word[0] + ' and a half'
def _expand_urls(m):
return f'{m.group(1)} dot {m.group(2)}'
def normalize_abbreviations(text):
text = re.sub(_no_period_re, _expand_no_period, text)
text = re.sub(_percent_re, _expand_percent, text)
text = re.sub(_half_re, _expand_half, text)
text = re.sub('&', ' and ', text)
text = re.sub('@', ' at ', text)
text = re.sub(_url_re, _expand_urls, text)
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/abbreviations.py |
""" adapted from https://github.com/keithito/tacotron """
import re
import numpy as np
from . import cleaners
from .symbols import get_symbols
from . import cmudict
from .numerical import _currency_re, _expand_currency
#########
# REGEX #
#########
# Regular expression matching text enclosed in curly braces for encoding
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching words and not words
_words_re = re.compile(r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)")
# Regular expression separating words enclosed in curly braces for cleaning
_arpa_re = re.compile(r'{[^}]+}|\S+')
class TextProcessing(object):
def __init__(self, symbol_set, cleaner_names, p_arpabet=0.0,
handle_arpabet='word', handle_arpabet_ambiguous='ignore',
expand_currency=True):
self.symbols = get_symbols(symbol_set)
self.cleaner_names = cleaner_names
# Mappings from symbol to numeric ID and vice versa:
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
self.expand_currency = expand_currency
# cmudict
self.p_arpabet = p_arpabet
self.handle_arpabet = handle_arpabet
self.handle_arpabet_ambiguous = handle_arpabet_ambiguous
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self.symbols_to_sequence(text)
break
sequence += self.symbols_to_sequence(m.group(1))
sequence += self.arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(self, sequence):
result = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def clean_text(self, text):
for name in self.cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if s in self.symbol_to_id]
def arpabet_to_sequence(self, text):
return self.symbols_to_sequence(['@' + s for s in text.split()])
def get_arpabet(self, word):
arpabet_suffix = ''
if word.lower() in cmudict.heteronyms:
return word
if len(word) > 2 and word.endswith("'s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-2])
arpabet_suffix = ' Z'
elif len(word) > 1 and word.endswith("s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-1])
arpabet_suffix = ' Z'
else:
arpabet = cmudict.lookup(word)
if arpabet is None:
return word
elif arpabet[0] == '{':
arpabet = [arpabet[1:-1]]
# XXX arpabet might not be a list here
if type(arpabet) is not list:
return word
if len(arpabet) > 1:
if self.handle_arpabet_ambiguous == 'first':
arpabet = arpabet[0]
elif self.handle_arpabet_ambiguous == 'random':
arpabet = np.random.choice(arpabet)
elif self.handle_arpabet_ambiguous == 'ignore':
return word
else:
arpabet = arpabet[0]
arpabet = "{" + arpabet + arpabet_suffix + "}"
return arpabet
def encode_text(self, text, return_all=False):
if self.expand_currency:
text = re.sub(_currency_re, _expand_currency, text)
text_clean = [self.clean_text(split) if split[0] != '{' else split
for split in _arpa_re.findall(text)]
text_clean = ' '.join(text_clean)
text_clean = cleaners.collapse_whitespace(text_clean)
text = text_clean
text_arpabet = ''
if self.p_arpabet > 0:
if self.handle_arpabet == 'sentence':
if np.random.uniform() < self.p_arpabet:
words = _words_re.findall(text)
text_arpabet = [
self.get_arpabet(word[0])
if (word[0] != '') else word[1]
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet == 'word':
words = _words_re.findall(text)
text_arpabet = [
word[1] if word[0] == '' else (
self.get_arpabet(word[0])
if np.random.uniform() < self.p_arpabet
else word[0])
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet != '':
raise Exception("{} handle_arpabet is not supported".format(
self.handle_arpabet))
text_encoded = self.text_to_sequence(text)
if return_all:
return text_encoded, text_clean, text_arpabet
return text_encoded
def get_text_processing(symbol_set, text_cleaners, p_arpabet):
if symbol_set in ['english_basic', 'english_basic_lowercase', 'english_expanded']:
return TextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
elif symbol_set == 'english_mandarin_basic':
from common.text.zh.mandarin_text_processing import MandarinTextProcessing
return MandarinTextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
else:
raise ValueError(f"No TextProcessing for symbol set {symbol_set} unknown.")
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/text_processing.py |
from .cmudict import CMUDict
cmudict = CMUDict()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/__init__.py |
""" adapted from https://github.com/keithito/tacotron """
import inflect
import re
_magnitudes = ['trillion', 'billion', 'million', 'thousand', 'hundred', 'm', 'b', 't']
_magnitudes_key = {'m': 'million', 'b': 'billion', 't': 'trillion'}
_measurements = '(f|c|k|d|m)'
_measurements_key = {'f': 'fahrenheit',
'c': 'celsius',
'k': 'thousand',
'm': 'meters'}
_currency_key = {'$': 'dollar', '£': 'pound', '€': 'euro', '₩': 'won'}
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_currency_re = re.compile(r'([\$€£₩])([0-9\.\,]*[0-9]+)(?:[ ]?({})(?=[^a-zA-Z]|$))?'.format("|".join(_magnitudes)), re.IGNORECASE)
_measurement_re = re.compile(r'([0-9\.\,]*[0-9]+(\s)?{}\b)'.format(_measurements), re.IGNORECASE)
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
# _range_re = re.compile(r'(?<=[0-9])+(-)(?=[0-9])+.*?')
_roman_re = re.compile(r'\b(?=[MDCLXVI]+\b)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{2,3})\b') # avoid I
_multiply_re = re.compile(r'(\b[0-9]+)(x)([0-9]+)')
_number_re = re.compile(r"[0-9]+'s|[0-9]+s|[0-9]+")
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_currency(m):
currency = _currency_key[m.group(1)]
quantity = m.group(2)
magnitude = m.group(3)
# remove commas from quantity to be able to convert to numerical
quantity = quantity.replace(',', '')
# check for million, billion, etc...
if magnitude is not None and magnitude.lower() in _magnitudes:
if len(magnitude) == 1:
magnitude = _magnitudes_key[magnitude.lower()]
return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency+'s')
parts = quantity.split('.')
if len(parts) > 2:
return quantity + " " + currency + "s" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = currency if dollars == 1 else currency+'s'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {}".format(
_expand_hundreds(dollars), dollar_unit,
_inflect.number_to_words(cents), cent_unit)
elif dollars:
dollar_unit = currency if dollars == 1 else currency+'s'
return "{} {}".format(_expand_hundreds(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero' + ' ' + currency + 's'
def _expand_hundreds(text):
number = float(text)
if 1000 < number < 10000 and (number % 100 == 0) and (number % 1000 != 0):
return _inflect.number_to_words(int(number / 100)) + " hundred"
else:
return _inflect.number_to_words(text)
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_measurement(m):
_, number, measurement = re.split('(\d+(?:\.\d+)?)', m.group(0))
number = _inflect.number_to_words(number)
measurement = "".join(measurement.split())
measurement = _measurements_key[measurement.lower()]
return "{} {}".format(number, measurement)
def _expand_range(m):
return ' to '
def _expand_multiply(m):
left = m.group(1)
right = m.group(3)
return "{} by {}".format(left, right)
def _expand_roman(m):
# from https://stackoverflow.com/questions/19308177/converting-roman-numerals-to-integers-in-python
roman_numerals = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
num = m.group(0)
for i, c in enumerate(num):
if (i+1) == len(num) or roman_numerals[c] >= roman_numerals[num[i+1]]:
result += roman_numerals[c]
else:
result -= roman_numerals[c]
return str(result)
def _expand_number(m):
_, number, suffix = re.split(r"(\d+(?:'?\d+)?)", m.group(0))
number = int(number)
if number > 1000 < 10000 and (number % 100 == 0) and (number % 1000 != 0):
text = _inflect.number_to_words(number // 100) + " hundred"
elif number > 1000 and number < 3000:
if number == 2000:
text = 'two thousand'
elif number > 2000 and number < 2010:
text = 'two thousand ' + _inflect.number_to_words(number % 100)
elif number % 100 == 0:
text = _inflect.number_to_words(number // 100) + ' hundred'
else:
number = _inflect.number_to_words(number, andword='', zero='oh', group=2).replace(', ', ' ')
number = re.sub(r'-', ' ', number)
text = number
else:
number = _inflect.number_to_words(number, andword='and')
number = re.sub(r'-', ' ', number)
number = re.sub(r',', '', number)
text = number
if suffix in ("'s", "s"):
if text[-1] == 'y':
text = text[:-1] + 'ies'
else:
text = text + suffix
return text
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_currency_re, _expand_currency, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
# text = re.sub(_range_re, _expand_range, text)
# text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_roman_re, _expand_roman, text)
text = re.sub(_multiply_re, _expand_multiply, text)
text = re.sub(_number_re, _expand_number, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/numerical.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from .cmudict import valid_symbols
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in valid_symbols]
def get_symbols(symbol_set='english_basic'):
if symbol_set == 'english_basic':
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_basic_lowercase':
_pad = '_'
_punctuation = '!\'"(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_expanded':
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_punctuation + _math + _special + _accented + _letters) + _arpabet
elif symbol_set == 'english_mandarin_basic':
from .zh.chinese import chinese_punctuations, valid_symbols as mandarin_valid_symbols
# Prepend "#" to mandarin phonemes to ensure uniqueness (some are the same as uppercase letters):
_mandarin_phonemes = ['#' + s for s in mandarin_valid_symbols]
_pad = '_'
_punctuation = '!\'(),.:;? '
_chinese_punctuation = ["#" + p for p in chinese_punctuations]
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet + _mandarin_phonemes + _chinese_punctuation
else:
raise Exception("{} symbol set does not exist".format(symbol_set))
return symbols
def get_pad_idx(symbol_set='english_basic'):
if symbol_set in {'english_basic', 'english_basic_lowercase', 'english_mandarin_basic'}:
return 0
else:
raise Exception("{} symbol set not used yet".format(symbol_set))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/symbols.py |
""" adapted from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .abbreviations import normalize_abbreviations
from .acronyms import normalize_acronyms, spell_acronyms
from .datestime import normalize_datestime
from .letters_and_numbers import normalize_letters_and_numbers
from .numerical import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
def expand_abbreviations(text):
return normalize_abbreviations(text)
def expand_numbers(text):
return normalize_numbers(text)
def expand_acronyms(text):
return normalize_acronyms(text)
def expand_datestime(text):
return normalize_datestime(text)
def expand_letters_and_numbers(text):
return normalize_letters_and_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def separate_acronyms(text):
text = re.sub(r"([0-9]+)([a-zA-Z]+)", r"\1 \2", text)
text = re.sub(r"([a-zA-Z]+)([0-9]+)", r"\1 \2", text)
return text
def convert_to_ascii(text):
return unidecoder(text)
def basic_cleaners(text):
'''Basic pipeline that collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, with number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
def english_cleaners_v2(text):
text = convert_to_ascii(text)
text = expand_datestime(text)
text = expand_letters_and_numbers(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = spell_acronyms(text)
text = lowercase(text)
text = collapse_whitespace(text)
# compatibility with basic_english symbol set
text = re.sub(r'/+', ' ', text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/cleaners.py |
import re
_ampm_re = re.compile(
r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
txt = txt if int(matches[1]) == 0 else txt + ' ' + matches[1]
if matches[2][0].lower() == 'a':
txt += ' a.m.'
elif matches[2][0].lower() == 'p':
txt += ' p.m.'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
#text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/datestime.py |
import re
from . import cmudict
_letter_to_arpabet = {
'A': 'EY1',
'B': 'B IY1',
'C': 'S IY1',
'D': 'D IY1',
'E': 'IY1',
'F': 'EH1 F',
'G': 'JH IY1',
'H': 'EY1 CH',
'I': 'AY1',
'J': 'JH EY1',
'K': 'K EY1',
'L': 'EH1 L',
'M': 'EH1 M',
'N': 'EH1 N',
'O': 'OW1',
'P': 'P IY1',
'Q': 'K Y UW1',
'R': 'AA1 R',
'S': 'EH1 S',
'T': 'T IY1',
'U': 'Y UW1',
'V': 'V IY1',
'X': 'EH1 K S',
'Y': 'W AY1',
'W': 'D AH1 B AH0 L Y UW0',
'Z': 'Z IY1',
's': 'Z'
}
# Acronyms that should not be expanded
hardcoded_acronyms = [
'BMW', 'MVD', 'WDSU', 'GOP', 'UK', 'AI', 'GPS', 'BP', 'FBI', 'HD',
'CES', 'LRA', 'PC', 'NBA', 'BBL', 'OS', 'IRS', 'SAC', 'UV', 'CEO', 'TV',
'CNN', 'MSS', 'GSA', 'USSR', 'DNA', 'PRS', 'TSA', 'US', 'GPU', 'USA',
'FPCC', 'CIA']
# Words and acronyms that should be read as regular words, e.g., NATO, HAPPY, etc.
uppercase_whiteliset = []
acronyms_exceptions = {
'NVIDIA': 'N.VIDIA',
}
non_uppercase_exceptions = {
'email': 'e-mail',
}
# must ignore roman numerals
_acronym_re = re.compile(r'([a-z]*[A-Z][A-Z]+)s?\.?')
_non_uppercase_re = re.compile(r'\b({})\b'.format('|'.join(non_uppercase_exceptions.keys())), re.IGNORECASE)
def _expand_acronyms_to_arpa(m, add_spaces=True):
acronym = m.group(0)
# remove dots if they exist
acronym = re.sub('\.', '', acronym)
acronym = "".join(acronym.split())
arpabet = cmudict.lookup(acronym)
if arpabet is None:
acronym = list(acronym)
arpabet = ["{" + _letter_to_arpabet[letter] + "}" for letter in acronym]
# temporary fix
if arpabet[-1] == '{Z}' and len(arpabet) > 1:
arpabet[-2] = arpabet[-2][:-1] + ' ' + arpabet[-1][1:]
del arpabet[-1]
arpabet = ' '.join(arpabet)
elif len(arpabet) == 1:
arpabet = "{" + arpabet[0] + "}"
else:
arpabet = acronym
return arpabet
def normalize_acronyms(text):
text = re.sub(_acronym_re, _expand_acronyms_to_arpa, text)
return text
def expand_acronyms(m):
text = m.group(1)
if text in acronyms_exceptions:
text = acronyms_exceptions[text]
elif text in uppercase_whiteliset:
text = text
else:
text = '.'.join(text) + '.'
if 's' in m.group(0):
text = text + '\'s'
if text[-1] != '.' and m.group(0)[-1] == '.':
return text + '.'
else:
return text
def spell_acronyms(text):
text = re.sub(_non_uppercase_re, lambda m: non_uppercase_exceptions[m.group(0).lower()], text)
text = re.sub(_acronym_re, expand_acronyms, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/acronyms.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/unidecoder/homoglyphs.py |
import re
import numpy as np
from .chinese import split_text, is_chinese, chinese_text_to_symbols
from ..text_processing import TextProcessing
class MandarinTextProcessing(TextProcessing):
def __init__(self, symbol_set, cleaner_names, p_arpabet=0.0,
handle_arpabet='word', handle_arpabet_ambiguous='ignore',
expand_currency=True):
super().__init__(symbol_set, cleaner_names, p_arpabet, handle_arpabet,
handle_arpabet_ambiguous, expand_currency)
def sequence_to_text(self, sequence):
result = ''
tmp = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose ARPAbet and mandarin phonemes back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
elif len(s) > 1 and s[0] == '#' and s[1].isdigit(): # mandarin tone
tmp += s[1] + '} '
result += tmp
tmp = ''
elif len(s) > 1 and s[0] == '#' and (s[1].isalpha() or s[1] == '^'): # mandarin phoneme
if tmp == '':
tmp += ' {' + s[1:] + ' '
else:
tmp += s[1:] + ' '
elif len(s) > 1 and s[0] == '#': # chinese punctuation
s = s[1]
result += s
else:
result += s
return result.replace('}{', ' ').replace(' ', ' ')
def chinese_symbols_to_sequence(self, symbols):
return self.symbols_to_sequence(['#' + s for s in symbols])
def encode_text(self, text, return_all=False):
# split the text into English and Chinese segments
segments = [segment for segment in split_text(text) if segment != ""]
text_encoded = []
text_clean = ""
text_arpabet = ""
for segment in segments:
if is_chinese(segment[0]): # process the Chinese segment
chinese_symbols, segment_arpabet = chinese_text_to_symbols(segment)
segment_encoded = self.chinese_symbols_to_sequence(chinese_symbols)
segment_clean = segment
segment_encoded = segment_encoded
else: # process the English segment
segment_encoded, segment_clean, segment_arpabet = \
super().encode_text(segment, return_all=True)
text_encoded += segment_encoded
text_clean += segment_clean
text_arpabet += segment_arpabet
if return_all:
return text_encoded, text_clean, text_arpabet
return text_encoded | DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/zh/mandarin_text_processing.py |
# *****************************************************************************
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import re
from pypinyin import lazy_pinyin, Style
valid_symbols = ['^', 'A', 'AI', 'AN', 'ANG', 'AO', 'B', 'C', 'CH', 'D',
'E', 'EI', 'EN', 'ENG', 'ER', 'F', 'G', 'H', 'I', 'IE',
'IN', 'ING', 'IU', 'J', 'K', 'L', 'M', 'N', 'O', 'ONG',
'OU', 'P', 'Q', 'R', 'S', 'SH', 'T', 'U', 'UI', 'UN',
'V', 'VE', 'VN', 'W', 'X', 'Y', 'Z', 'ZH']
tones = ['1', '2', '3', '4', '5']
chinese_punctuations = ",。?!;:、‘’“”()【】「」《》"
valid_symbols += tones
def load_pinyin_dict(path="common/text/zh/pinyin_dict.txt"):
with open(path) as f:
return {l.split()[0]: l.split()[1:] for l in f}
pinyin_dict = load_pinyin_dict()
def is_chinese(text):
return u'\u4e00' <= text[0] <= u'\u9fff' or text[0] in chinese_punctuations
def split_text(text):
regex = r'([\u4e00-\u9fff' + chinese_punctuations + ']+)'
return re.split(regex, text)
def chinese_text_to_symbols(text):
symbols = []
phonemes_and_tones = ""
# convert text to mandarin pinyin sequence
# ignore polyphonic words as it has little effect on training
pinyin_seq = lazy_pinyin(text, style=Style.TONE3)
for item in pinyin_seq:
if item in chinese_punctuations:
symbols += [item]
phonemes_and_tones += ' ' + item
continue
if not item[-1].isdigit():
item += '5'
item, tone = item[:-1], item[-1]
phonemes = pinyin_dict[item.upper()]
symbols += phonemes
symbols += [tone]
phonemes_and_tones += '{' + ' '.join(phonemes + [tone]) + '}'
return symbols, phonemes_and_tones
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/common/text/zh/chinese.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from pathlib import Path
# Define val and test; the remaining ones will be train IDs
val_ids = {
'com_SF_ce227', 'com_SF_ce832', 'com_SF_ce912','com_SF_ce979',
'com_SF_ce998', 'com_SF_ce1045', 'com_SF_ce1282','com_SF_ce1329',
'com_SF_ce1350', 'com_SF_ce1376', 'com_SF_ce1519','com_SF_ce1664',
'com_SF_ce1777', 'com_SF_ce1843', 'com_SF_ce2017','com_SF_ce2042',
'com_SF_ce2100', 'com_SF_ce2251', 'com_SF_ce2443','com_SF_ce2566',
}
test_ids = {
'com_SF_ce161', 'com_SF_ce577', 'com_SF_ce781', 'com_SF_ce814',
'com_SF_ce1042', 'com_SF_ce1089', 'com_SF_ce1123', 'com_SF_ce1425',
'com_SF_ce1514', 'com_SF_ce1577', 'com_SF_ce1780', 'com_SF_ce1857',
'com_SF_ce1940', 'com_SF_ce2051', 'com_SF_ce2181', 'com_SF_ce2258',
'com_SF_ce2406', 'com_SF_ce2512', 'com_SF_ce2564', 'com_SF_ce2657'
}
def generate(fpath, ids_text, pitch=True, text=True):
with open(fpath, 'w') as f:
for id_, txt in ids_text.items():
row = f"wavs/{id_}.wav"
row += "|" + f"pitch/{id_}.pt" if pitch else ""
row += "|" + txt if text else ""
f.write(row + "\n")
def generate_inference_tsv(fpath, ids_text):
with open(fpath, 'w') as f:
f.write("output\ttext\n")
for id_, txt in ids_text.items():
f.write(f"{id_}.wav\t{txt}\n")
def main():
parser = argparse.ArgumentParser(
description='SF bilingual dataset filelists generator')
parser.add_argument('transcripts', type=Path, default='./text_SF.txt',
help='Path to LJSpeech dataset metadata')
parser.add_argument('output_dir', default='data/filelists', type=Path,
help='Directory to generate filelists to')
args = parser.parse_args()
with open(args.transcripts) as f:
# A dict of ID:transcript pairs
transcripts = dict(line.replace("\ufeff", "").replace("-", "-").strip().split(' ', 1)
for line in f)
transcripts = {id_.replace("com_DL", "com_SF"): text.lower()
for id_, text in transcripts.items()}
val_ids_text = {id_: transcripts[id_] for id_ in val_ids}
test_ids_text = {id_: transcripts[id_] for id_ in test_ids}
train_ids_text = {id_: transcripts[id_] for id_ in transcripts
if id_ not in test_ids and id_ not in val_ids}
prefix = Path(args.output_dir, "sf_audio_pitch_text_")
generate(str(prefix) + "val.txt", val_ids_text)
generate(str(prefix) + "test.txt", test_ids_text)
generate(str(prefix) + "train.txt", train_ids_text)
prefix = Path(args.output_dir, "sf_audio_")
generate(str(prefix) + "val.txt", val_ids_text, False, False)
generate(str(prefix) + "test.txt", test_ids_text, False, False)
generate(str(prefix) + "train.txt", train_ids_text, False, False)
# train + val + test for pre-processing
generate(Path(args.output_dir, "sf_audio_text.txt"),
{**val_ids_text, **test_ids_text, **train_ids_text}, False, True)
generate_inference_tsv(Path(args.output_dir, "sf_test.tsv"), test_ids_text)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/scripts/mandarin_chinese/split_sf.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn as nn
from tacotron2.loss_function import Tacotron2Loss
from waveglow.loss_function import WaveGlowLoss
def get_loss_function(loss_function, sigma=1.0):
if loss_function == 'Tacotron2':
loss = Tacotron2Loss()
elif loss_function == 'WaveGlow':
loss = WaveGlowLoss(sigma=sigma)
else:
raise NotImplementedError(
"unknown loss function requested: {}".format(loss_function))
loss.cuda()
return loss
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/loss_functions.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
from os.path import abspath, dirname
# enabling modules discovery from global entrypoint
sys.path.append(abspath(dirname(__file__)+'/'))
from tacotron2.model import Tacotron2
from waveglow.model import WaveGlow
import torch
def model_parser(model_name, parser, add_help=False):
if model_name == 'Tacotron2':
from tacotron2.arg_parser import tacotron2_parser
return tacotron2_parser(parser, add_help)
if model_name == 'WaveGlow':
from waveglow.arg_parser import waveglow_parser
return waveglow_parser(parser, add_help)
else:
raise NotImplementedError(model_name)
def batchnorm_to_float(module):
"""Converts batch norm to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module
def init_bn(module):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
if module.affine:
module.weight.data.uniform_()
for child in module.children():
init_bn(child)
def get_model(model_name, model_config, cpu_run,
uniform_initialize_bn_weight=False, forward_is_infer=False,
jittable=False):
""" Code chooses a model based on name"""
model = None
if model_name == 'Tacotron2':
if forward_is_infer:
class Tacotron2__forward_is_infer(Tacotron2):
def forward(self, inputs, input_lengths):
return self.infer(inputs, input_lengths)
model = Tacotron2__forward_is_infer(**model_config)
else:
model = Tacotron2(**model_config)
elif model_name == 'WaveGlow':
model = WaveGlow(**model_config)
if forward_is_infer:
model.forward = model.infer
else:
raise NotImplementedError(model_name)
if uniform_initialize_bn_weight:
init_bn(model)
if not cpu_run:
model = model.cuda()
return model
def get_model_config(model_name, args):
""" Code chooses a model based on name"""
if model_name == 'Tacotron2':
model_config = dict(
# optimization
mask_padding=args.mask_padding,
# audio
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=args.n_symbols,
symbols_embedding_dim=args.symbols_embedding_dim,
# encoder
encoder_kernel_size=args.encoder_kernel_size,
encoder_n_convolutions=args.encoder_n_convolutions,
encoder_embedding_dim=args.encoder_embedding_dim,
# attention
attention_rnn_dim=args.attention_rnn_dim,
attention_dim=args.attention_dim,
# attention location
attention_location_n_filters=args.attention_location_n_filters,
attention_location_kernel_size=args.attention_location_kernel_size,
# decoder
n_frames_per_step=args.n_frames_per_step,
decoder_rnn_dim=args.decoder_rnn_dim,
prenet_dim=args.prenet_dim,
max_decoder_steps=args.max_decoder_steps,
gate_threshold=args.gate_threshold,
p_attention_dropout=args.p_attention_dropout,
p_decoder_dropout=args.p_decoder_dropout,
# postnet
postnet_embedding_dim=args.postnet_embedding_dim,
postnet_kernel_size=args.postnet_kernel_size,
postnet_n_convolutions=args.postnet_n_convolutions,
decoder_no_early_stopping=args.decoder_no_early_stopping
)
return model_config
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
return model_config
else:
raise NotImplementedError(model_name)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/models.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
import sys
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with FP16')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
waveglow_latency = measurements_all['waveglow_latency']
tacotron2_latency = measurements_all['tacotron2_latency']
denoiser_latency = measurements_all['denoiser_latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.0f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {:.0f}".format(np.mean(num_mels_per_audio)))
print("Tacotron2 latency average (seconds) = {:.2f}".format(np.mean(tacotron2_latency)))
print("WaveGlow latency average (seconds) = {:.2f}".format(np.mean(waveglow_latency)))
print("Denoiser latency average (seconds) = {:.4f}".format(np.mean(denoiser_latency)))
print("Latency average (seconds) = {:.2f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.2f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.2f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.2f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.2f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.2f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.2f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"denoiser_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing", args.cpu):
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad():
with MeasureTime(measurements, "latency", args.cpu):
with MeasureTime(measurements, "tacotron2_latency", args.cpu):
mel, mel_lengths, _ = tacotron2.infer(sequences_padded, input_lengths)
with MeasureTime(measurements, "waveglow_latency", args.cpu):
audios = waveglow.infer(mel, sigma=args.sigma_infer)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion", args.cpu):
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_latency", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
with MeasureTime(measurements, "data_transfer", args.cpu):
audios = audios.cpu()
with MeasureTime(measurements, "storage", args.cpu):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/test_infer.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import models
import torch
import argparse
import numpy as np
import json
import time
import os
import sys
import random
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, MeasureTime, prepare_input_sequence
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-m', '--model-name', type=str, default='',
required=True, help='Model to train')
parser.add_argument('--model', type=str, default='',
help='Full path to the model checkpoint file')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-bs', '--batch-size', type=int, default=1)
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save results')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--synth-data', action='store_true',
help='Test with synthetic data')
return parser
def gen_text(use_synthetic_data):
batch_size = 1
text_len = 170
if use_synthetic_data:
text_padded = torch.randint(low=0, high=148,
size=(batch_size, text_len),
dtype=torch.long).cuda()
input_lengths = torch.IntTensor([text_padded.size(1)]*
batch_size).cuda().long()
else:
text = 'The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. '*2
text = [text[:text_len]]
text_padded, input_lengths = prepare_input_sequence(text)
return (text_padded, input_lengths)
def gen_mel(use_synthetic_data, n_mel_channels, fp16):
if use_synthetic_data:
batch_size = 1
num_mels = 895
mel_padded = torch.zeros(batch_size, n_mel_channels,
num_mels).normal_(-5.62, 1.98).cuda()
else:
mel_padded = torch.load("data/mel.pt")
if fp16:
mel_padded = mel_padded.half()
return mel_padded
def main():
"""
Launches inference benchmark.
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
log_file = os.path.join(args.output, args.log_file)
torch.manual_seed(1234)
random.seed(1234)
np.random.seed(1234)
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
DLLogger.metadata('infer_latency', {'unit': 's'})
DLLogger.metadata('infer_items_per_sec', {'unit': 'items/s'})
if args.synth_data:
model = load_and_setup_model(args.model_name, parser, None, args.fp16,
cpu_run=False, forward_is_infer=True)
else:
if not os.path.isfile(args.model):
print(f"File {args.model} does not exist!")
sys.exit(1)
model = load_and_setup_model(args.model_name, parser, args.model,
args.fp16, cpu_run=False,
forward_is_infer=True)
if args.model_name == "Tacotron2":
model = torch.jit.script(model)
warmup_iters = 6
num_iters = warmup_iters + 1
for i in range(num_iters):
measurements = {}
if args.model_name == 'Tacotron2':
text_padded, input_lengths = gen_text(args.synth_data)
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
mels, _, _ = model(text_padded, input_lengths)
num_items = mels.size(0)*mels.size(2)
if args.model_name == 'WaveGlow':
n_mel_channels = model.upsample.in_channels
mel_padded = gen_mel(args.synth_data, n_mel_channels, args.fp16)
with torch.no_grad(), MeasureTime(measurements, "inference_time"):
audios = model(mel_padded)
audios = audios.float()
num_items = audios.size(0)*audios.size(1)
if i >= warmup_iters:
DLLogger.log(step=(i-warmup_iters,), data={"latency": measurements['inference_time']})
DLLogger.log(step=(i-warmup_iters,), data={"items_per_sec": num_items/measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_latency': measurements['inference_time']})
DLLogger.log(step=tuple(),
data={'infer_items_per_sec': num_items/measurements['inference_time']})
DLLogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/inference_perf.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from tacotron2.data_function import TextMelCollate
from tacotron2.data_function import TextMelLoader
from waveglow.data_function import MelAudioLoader
from tacotron2.data_function import batch_to_gpu as batch_to_gpu_tacotron2
from waveglow.data_function import batch_to_gpu as batch_to_gpu_waveglow
def get_collate_function(model_name, n_frames_per_step=1):
if model_name == 'Tacotron2':
collate_fn = TextMelCollate(n_frames_per_step)
elif model_name == 'WaveGlow':
collate_fn = torch.utils.data.dataloader.default_collate
else:
raise NotImplementedError(
"unknown collate function requested: {}".format(model_name))
return collate_fn
def get_data_loader(model_name, dataset_path, audiopaths_and_text, args):
if model_name == 'Tacotron2':
data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args)
elif model_name == 'WaveGlow':
data_loader = MelAudioLoader(dataset_path, audiopaths_and_text, args)
else:
raise NotImplementedError(
"unknown data loader requested: {}".format(model_name))
return data_loader
def get_batch_to_gpu(model_name):
if model_name == 'Tacotron2':
batch_to_gpu = batch_to_gpu_tacotron2
elif model_name == 'WaveGlow':
batch_to_gpu = batch_to_gpu_waveglow
else:
raise NotImplementedError(
"unknown batch_to_gpu requested: {}".format(model_name))
return batch_to_gpu
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/data_functions.py |
import argparse
import torch
from tacotron2.data_function import TextMelLoader
from tacotron2_common.utils import load_filepaths_and_text
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('--wav-files', required=True,
type=str, help='Path to filelist with audio paths and text')
parser.add_argument('--mel-files', required=True,
type=str, help='Path to filelist with mel paths and text')
parser.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
parser.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
parser.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
parser.add_argument('--win-length', default=1024, type=int,
help='Window length')
parser.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
parser.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
return parser
def audio2mel(dataset_path, audiopaths_and_text, melpaths_and_text, args):
melpaths_and_text_list = load_filepaths_and_text(dataset_path, melpaths_and_text)
audiopaths_and_text_list = load_filepaths_and_text(dataset_path, audiopaths_and_text)
data_loader = TextMelLoader(dataset_path, audiopaths_and_text, args)
for i in range(len(melpaths_and_text_list)):
if i%100 == 0:
print("done", i, "/", len(melpaths_and_text_list))
mel = data_loader.get_mel(audiopaths_and_text_list[i][0])
torch.save(mel, melpaths_and_text_list[i][0])
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args = parser.parse_args()
args.load_mel_from_disk = False
audio2mel(args.dataset_path, args.wav_files, args.mel_files, args)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/preprocess_audio2mel.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import time
import argparse
import numpy as np
from contextlib import contextmanager
import torch
from torch.utils.data import DataLoader
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
import models
import loss_functions
import data_functions
from tacotron2_common.utils import ParseFromConfigFile
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('-m', '--model-name', type=str, default='', required=True,
help='Model to train')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--anneal-steps', nargs='*',
help='Epochs after which decrease learning rate')
parser.add_argument('--anneal-factor', type=float, choices=[0.1, 0.3], default=0.1,
help='Factor for annealing learning rate')
parser.add_argument('--config-file', action=ParseFromConfigFile,
type=str, help='Path to configuration file')
parser.add_argument('--seed', default=None, type=int,
help='Seed for random number generators')
# training
training = parser.add_argument_group('training setup')
training.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
training.add_argument('--epochs-per-checkpoint', type=int, default=50,
help='Number of epochs per checkpoint')
training.add_argument('--checkpoint-path', type=str, default='',
help='Checkpoint path to resume training')
training.add_argument('--resume-from-last', action='store_true',
help='Resumes training from the last checkpoint; uses the directory provided with \'--output\' option to search for the checkpoint \"checkpoint_<model_name>_last.pt\"')
training.add_argument('--dynamic-loss-scaling', type=bool, default=True,
help='Enable dynamic loss scaling')
training.add_argument('--amp', action='store_true',
help='Enable AMP')
training.add_argument('--cudnn-enabled', action='store_true',
help='Enable cudnn')
training.add_argument('--cudnn-benchmark', action='store_true',
help='Run cudnn benchmark')
training.add_argument('--disable-uniform-initialize-bn-weight', action='store_true',
help='disable uniform initialization of batchnorm layer weight')
optimization = parser.add_argument_group('optimization setup')
optimization.add_argument(
'--use-saved-learning-rate', default=False, type=bool)
optimization.add_argument('-lr', '--learning-rate', type=float, required=True,
help='Learing rate')
optimization.add_argument('--weight-decay', default=1e-6, type=float,
help='Weight decay')
optimization.add_argument('--grad-clip-thresh', default=1.0, type=float,
help='Clip threshold for gradients')
optimization.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
optimization.add_argument('--grad-clip', default=5.0, type=float,
help='Enables gradient clipping and sets maximum gradient norm value')
# dataset parameters
dataset = parser.add_argument_group('dataset parameters')
dataset.add_argument('--load-mel-from-disk', action='store_true',
help='Loads mel spectrograms from disk instead of computing them on the fly')
dataset.add_argument('--training-files',
default='filelists/ljs_audio_text_train_filelist.txt',
type=str, help='Path to training filelist')
dataset.add_argument('--validation-files',
default='filelists/ljs_audio_text_val_filelist.txt',
type=str, help='Path to validation filelist')
dataset.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
# audio parameters
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
distributed = parser.add_argument_group('distributed setup')
# distributed.add_argument('--distributed-run', default=True, type=bool,
# help='enable distributed run')
distributed.add_argument('--rank', default=0, type=int,
help='Rank of the process, do not set! Done by multiproc module')
distributed.add_argument('--world-size', default=1, type=int,
help='Number of processes, do not set! Done by multiproc module')
distributed.add_argument('--dist-url', type=str, default='tcp://localhost:23456',
help='Url used to set up distributed training')
distributed.add_argument('--group-name', type=str, default='group_name',
required=False, help='Distributed group name')
distributed.add_argument('--dist-backend', default='nccl', type=str, choices={'nccl'},
help='Distributed run backend')
benchmark = parser.add_argument_group('benchmark')
benchmark.add_argument('--bench-class', type=str, default='')
return parser
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt/num_gpus
else:
rt = torch.div(rt, num_gpus, rounding_mode='floor')
return rt
def init_distributed(args, world_size, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=world_size, rank=rank, group_name=group_name)
print("Done initializing distributed")
def save_checkpoint(model, optimizer, scaler, epoch, config, output_dir,
model_name, local_rank, world_size):
random_rng_state = torch.random.get_rng_state().cuda()
cuda_rng_state = torch.cuda.get_rng_state(local_rank).cuda()
random_rng_states_all = [torch.empty_like(random_rng_state) for _ in range(world_size)]
cuda_rng_states_all = [torch.empty_like(cuda_rng_state) for _ in range(world_size)]
if world_size > 1:
dist.all_gather(random_rng_states_all, random_rng_state)
dist.all_gather(cuda_rng_states_all, cuda_rng_state)
else:
random_rng_states_all = [random_rng_state]
cuda_rng_states_all = [cuda_rng_state]
random_rng_states_all = torch.stack(random_rng_states_all).cpu()
cuda_rng_states_all = torch.stack(cuda_rng_states_all).cpu()
if local_rank == 0:
checkpoint = {'epoch': epoch,
'cuda_rng_state_all': cuda_rng_states_all,
'random_rng_states_all': random_rng_states_all,
'config': config,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'scaler': scaler.state_dict()}
checkpoint_filename = "checkpoint_{}_{}.pt".format(model_name, epoch)
checkpoint_path = os.path.join(output_dir, checkpoint_filename)
print("Saving model and optimizer state at epoch {} to {}".format(
epoch, checkpoint_path))
torch.save(checkpoint, checkpoint_path)
symlink_src = checkpoint_filename
symlink_dst = os.path.join(
output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink_dst) and os.path.islink(symlink_dst):
print("Updating symlink", symlink_dst, "to point to", symlink_src)
os.remove(symlink_dst)
os.symlink(symlink_src, symlink_dst)
def get_last_checkpoint_filename(output_dir, model_name):
symlink = os.path.join(output_dir, "checkpoint_{}_last.pt".format(model_name))
if os.path.exists(symlink):
print("Loading checkpoint from symlink", symlink)
return os.path.join(output_dir, os.readlink(symlink))
else:
print("No last checkpoint available - starting from epoch 0 ")
return ""
def load_checkpoint(model, optimizer, scaler, epoch, filepath, local_rank):
checkpoint = torch.load(filepath, map_location='cpu')
epoch[0] = checkpoint['epoch']+1
device_id = local_rank % torch.cuda.device_count()
torch.cuda.set_rng_state(checkpoint['cuda_rng_state_all'][device_id])
if 'random_rng_states_all' in checkpoint:
torch.random.set_rng_state(checkpoint['random_rng_states_all'][device_id])
elif 'random_rng_state' in checkpoint:
torch.random.set_rng_state(checkpoint['random_rng_state'])
else:
raise Exception("Model checkpoint must have either 'random_rng_state' or 'random_rng_states_all' key.")
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scaler.load_state_dict(checkpoint['scaler'])
return checkpoint['config']
# adapted from: https://discuss.pytorch.org/t/opinion-eval-should-be-a-context-manager/18998/3
# Following snippet is licensed under MIT license
@contextmanager
def evaluating(model):
'''Temporarily switch to evaluation mode.'''
istrain = model.training
try:
model.eval()
yield model
finally:
if istrain:
model.train()
def validate(model, criterion, valset, epoch, batch_iter, batch_size,
world_size, collate_fn, distributed_run, perf_bench, batch_to_gpu, amp_run):
"""Handles all the validation scoring and printing"""
with evaluating(model), torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, num_workers=1, shuffle=False,
sampler=val_sampler,
batch_size=batch_size, pin_memory=False,
collate_fn=collate_fn,
drop_last=(True if perf_bench else False))
val_loss = 0.0
num_iters = 0
val_items_per_sec = 0.0
for i, batch in enumerate(val_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
x, y, num_items = batch_to_gpu(batch)
#AMP upstream autocast
with torch.cuda.amp.autocast(enabled=amp_run):
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else: #
reduced_val_loss = loss.item()
reduced_num_items = num_items.item()
val_loss += reduced_val_loss
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
DLLogger.log(step=(epoch, batch_iter, i), data={'val_items_per_sec': items_per_sec})
val_items_per_sec += items_per_sec
num_iters += 1
val_loss = val_loss/num_iters
val_items_per_sec = val_items_per_sec/num_iters
DLLogger.log(step=(epoch,), data={'val_loss': val_loss})
DLLogger.log(step=(epoch,), data={'val_items_per_sec': val_items_per_sec})
return val_loss, val_items_per_sec
def adjust_learning_rate(iteration, epoch, optimizer, learning_rate,
anneal_steps, anneal_factor, rank):
p = 0
if anneal_steps is not None:
for i, a_step in enumerate(anneal_steps):
if epoch >= int(a_step):
p = p+1
if anneal_factor == 0.3:
lr = learning_rate*((0.1 ** (p//2))*(1.0 if p % 2 == 0 else 0.3))
else:
lr = learning_rate*(anneal_factor ** p)
if optimizer.param_groups[0]['lr'] != lr:
DLLogger.log(step=(epoch, iteration), data={'learning_rate changed': str(optimizer.param_groups[0]['lr'])+" -> "+str(lr)})
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Training')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
if 'LOCAL_RANK' in os.environ and 'WORLD_SIZE' in os.environ:
local_rank = int(os.environ['LOCAL_RANK'])
world_size = int(os.environ['WORLD_SIZE'])
else:
local_rank = args.rank
world_size = args.world_size
distributed_run = world_size > 1
if args.seed is not None:
torch.manual_seed(args.seed + local_rank)
np.random.seed(args.seed + local_rank)
if local_rank == 0:
log_file = os.path.join(args.output, args.log_file)
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
StdOutBackend(Verbosity.VERBOSE)])
else:
DLLogger.init(backends=[])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
DLLogger.metadata('run_time', {'unit': 's'})
DLLogger.metadata('val_loss', {'unit': None})
DLLogger.metadata('train_items_per_sec', {'unit': 'items/s'})
DLLogger.metadata('val_items_per_sec', {'unit': 'items/s'})
model_name = args.model_name
parser = models.model_parser(model_name, parser)
args, _ = parser.parse_known_args()
torch.backends.cudnn.enabled = args.cudnn_enabled
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if distributed_run:
init_distributed(args, world_size, local_rank, args.group_name)
torch.cuda.synchronize()
run_start_time = time.perf_counter()
model_config = models.get_model_config(model_name, args)
model = models.get_model(model_name, model_config,
cpu_run=False,
uniform_initialize_bn_weight=not args.disable_uniform_initialize_bn_weight)
if distributed_run:
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate,
weight_decay=args.weight_decay)
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
try:
sigma = args.sigma
except AttributeError:
sigma = None
start_epoch = [0]
if args.resume_from_last:
args.checkpoint_path = get_last_checkpoint_filename(args.output, model_name)
if args.checkpoint_path != "":
model_config = load_checkpoint(model, optimizer, scaler, start_epoch,
args.checkpoint_path, local_rank)
start_epoch = start_epoch[0]
criterion = loss_functions.get_loss_function(model_name, sigma)
try:
n_frames_per_step = args.n_frames_per_step
except AttributeError:
n_frames_per_step = None
collate_fn = data_functions.get_collate_function(
model_name, n_frames_per_step)
trainset = data_functions.get_data_loader(
model_name, args.dataset_path, args.training_files, args)
if distributed_run:
train_sampler = DistributedSampler(trainset, seed=(args.seed or 0))
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=1, shuffle=shuffle,
sampler=train_sampler,
batch_size=args.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
valset = data_functions.get_data_loader(
model_name, args.dataset_path, args.validation_files, args)
batch_to_gpu = data_functions.get_batch_to_gpu(model_name)
iteration = 0
train_epoch_items_per_sec = 0.0
val_loss = 0.0
num_iters = 0
model.train()
for epoch in range(start_epoch, args.epochs):
torch.cuda.synchronize()
epoch_start_time = time.perf_counter()
# used to calculate avg items/sec over epoch
reduced_num_items_epoch = 0
train_epoch_items_per_sec = 0.0
num_iters = 0
reduced_loss = 0
if distributed_run:
train_loader.sampler.set_epoch(epoch)
for i, batch in enumerate(train_loader):
torch.cuda.synchronize()
iter_start_time = time.perf_counter()
DLLogger.log(step=(epoch, i),
data={'glob_iter/iters_per_epoch': str(iteration)+"/"+str(len(train_loader))})
adjust_learning_rate(iteration, epoch, optimizer, args.learning_rate,
args.anneal_steps, args.anneal_factor, local_rank)
model.zero_grad()
x, y, num_items = batch_to_gpu(batch)
#AMP upstream autocast
with torch.cuda.amp.autocast(enabled=args.amp):
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_loss = reduce_tensor(loss.data, world_size).item()
reduced_num_items = reduce_tensor(num_items.data, 1).item()
else:
reduced_loss = loss.item()
reduced_num_items = num_items.item()
if np.isnan(reduced_loss):
raise Exception("loss is NaN")
DLLogger.log(step=(epoch,i), data={'train_loss': reduced_loss})
num_iters += 1
# accumulate number of items processed in this epoch
reduced_num_items_epoch += reduced_num_items
if args.amp:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
optimizer.step()
model.zero_grad(set_to_none=True)
torch.cuda.synchronize()
iter_stop_time = time.perf_counter()
iter_time = iter_stop_time - iter_start_time
items_per_sec = reduced_num_items/iter_time
train_epoch_items_per_sec += items_per_sec
DLLogger.log(step=(epoch, i), data={'train_items_per_sec': items_per_sec})
DLLogger.log(step=(epoch, i), data={'train_iter_time': iter_time})
iteration += 1
torch.cuda.synchronize()
epoch_stop_time = time.perf_counter()
epoch_time = epoch_stop_time - epoch_start_time
DLLogger.log(step=(epoch,), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
DLLogger.log(step=(epoch,), data={'train_loss': reduced_loss})
DLLogger.log(step=(epoch,), data={'train_epoch_time': epoch_time})
val_loss, val_items_per_sec = validate(model, criterion, valset, epoch,
iteration, args.batch_size,
world_size, collate_fn,
distributed_run, args.bench_class=="perf-train",
batch_to_gpu,
args.amp)
if (epoch % args.epochs_per_checkpoint == 0) and (args.bench_class == "" or args.bench_class == "train"):
save_checkpoint(model, optimizer, scaler, epoch, model_config,
args.output, args.model_name, local_rank, world_size)
if local_rank == 0:
DLLogger.flush()
torch.cuda.synchronize()
run_stop_time = time.perf_counter()
run_time = run_stop_time - run_start_time
DLLogger.log(step=tuple(), data={'run_time': run_time})
DLLogger.log(step=tuple(), data={'val_loss': val_loss})
DLLogger.log(step=tuple(), data={'train_loss': reduced_loss})
DLLogger.log(step=tuple(), data={'train_items_per_sec':
(train_epoch_items_per_sec/num_iters if num_iters > 0 else 0.0)})
DLLogger.log(step=tuple(), data={'val_items_per_sec': val_items_per_sec})
if local_rank == 0:
DLLogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/train.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import os
import numpy as np
from scipy.io.wavfile import write
import matplotlib
import matplotlib.pyplot as plt
import sys
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='output folder to save audio (file per phrase)')
parser.add_argument('--suffix', type=str, default="", help="output filename suffix")
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('--waveglow', type=str,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.9, type=float)
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
run_mode = parser.add_mutually_exclusive_group()
run_mode.add_argument('--fp16', action='store_true',
help='Run inference with mixed precision')
run_mode.add_argument('--cpu', action='store_true',
help='Run inference on CPU')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--include-warmup', action='store_true',
help='Include warmup')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
return parser
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def load_and_setup_model(model_name, parser, checkpoint, fp16_run, cpu_run,
forward_is_infer=False, jittable=False):
model_parser = models.model_parser(model_name, parser, add_help=False)
model_args, _ = model_parser.parse_known_args()
model_config = models.get_model_config(model_name, model_args)
model = models.get_model(model_name, model_config, cpu_run=cpu_run,
forward_is_infer=forward_is_infer,
jittable=jittable)
if checkpoint is not None:
if cpu_run:
state_dict = torch.load(checkpoint, map_location=torch.device('cpu'))['state_dict']
else:
state_dict = torch.load(checkpoint)['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
model.load_state_dict(state_dict)
if model_name == "WaveGlow":
model = model.remove_weightnorm(model)
model.eval()
if fp16_run:
model.half()
return model
# taken from tacotron2/data_function.py:TextMelCollate.__call__
def pad_sequences(batch):
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]]
text_padded[i, :text.size(0)] = text
return text_padded, input_lengths
def prepare_input_sequence(texts, cpu_run=False):
d = []
for i,text in enumerate(texts):
d.append(torch.IntTensor(
text_to_sequence(text, ['english_cleaners'])[:]))
text_padded, input_lengths = pad_sequences(d)
if not cpu_run:
text_padded = text_padded.cuda().long()
input_lengths = input_lengths.cuda().long()
else:
text_padded = text_padded.long()
input_lengths = input_lengths.long()
return text_padded, input_lengths
class MeasureTime():
def __init__(self, measurements, key, cpu_run=False):
self.measurements = measurements
self.key = key
self.cpu_run = cpu_run
def __enter__(self):
if not self.cpu_run:
torch.cuda.synchronize()
self.t0 = time.perf_counter()
def __exit__(self, exc_type, exc_value, exc_traceback):
if not self.cpu_run:
torch.cuda.synchronize()
self.measurements[self.key] = time.perf_counter() - self.t0
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU or CPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
log_file = os.path.join(args.output, args.log_file)
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
args.fp16, args.cpu, forward_is_infer=True)
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
args.fp16, args.cpu, forward_is_infer=True,
jittable=True)
denoiser = Denoiser(waveglow)
if not args.cpu:
denoiser.cuda()
waveglow.make_ts_scriptable()
jitted_waveglow = torch.jit.script(waveglow)
jitted_tacotron2 = torch.jit.script(tacotron2)
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
if args.include_warmup:
sequence = torch.randint(low=0, high=148, size=(1,50)).long()
input_lengths = torch.IntTensor([sequence.size(1)]).long()
if not args.cpu:
sequence = sequence.cuda()
input_lengths = input_lengths.cuda()
for i in range(3):
with torch.no_grad():
mel, mel_lengths, _ = jitted_tacotron2(sequence, input_lengths)
_ = jitted_waveglow(mel)
measurements = {}
sequences_padded, input_lengths = prepare_input_sequence(texts, args.cpu)
with torch.no_grad(), MeasureTime(measurements, "tacotron2_time", args.cpu):
mel, mel_lengths, alignments = jitted_tacotron2(sequences_padded, input_lengths)
with torch.no_grad(), MeasureTime(measurements, "waveglow_time", args.cpu):
audios = jitted_waveglow(mel, sigma=args.sigma_infer)
audios = audios.float()
with torch.no_grad(), MeasureTime(measurements, "denoiser_time", args.cpu):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
print("Stopping after",mel.size(2),"decoder steps")
tacotron2_infer_perf = mel.size(0)*mel.size(2)/measurements['tacotron2_time']
waveglow_infer_perf = audios.size(0)*audios.size(1)/measurements['waveglow_time']
DLLogger.log(step=0, data={"tacotron2_items_per_sec": tacotron2_infer_perf})
DLLogger.log(step=0, data={"tacotron2_latency": measurements['tacotron2_time']})
DLLogger.log(step=0, data={"waveglow_items_per_sec": waveglow_infer_perf})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"denoiser_latency": measurements['denoiser_time']})
DLLogger.log(step=0, data={"latency": (measurements['tacotron2_time']+measurements['waveglow_time']+measurements['denoiser_time'])})
for i, audio in enumerate(audios):
plt.imshow(alignments[i].float().data.cpu().numpy().T, aspect="auto", origin="lower")
figure_path = os.path.join(args.output,"alignment_"+str(i)+args.suffix+".png")
plt.savefig(figure_path)
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = os.path.join(args.output,"audio_"+str(i)+args.suffix+".wav")
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/inference.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
from train import main as main_train
from inference_perf import main as main_infer
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--bench-class', type=str, choices=['train', 'perf-infer', 'perf-train'], required=True, help='Choose test class')
return parser
def main():
parser = argparse.ArgumentParser(description='PyTorch Tacotron 2 Testing')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
if "train" in args.bench_class:
main_train()
else:
main_infer()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/main.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
import subprocess
import torch
def main():
argslist = list(sys.argv)[1:]
world_size = torch.cuda.device_count()
if '--world-size' in argslist:
argslist[argslist.index('--world-size') + 1] = str(world_size)
else:
argslist.append('--world-size')
argslist.append(str(world_size))
workers = []
for i in range(world_size):
if '--rank' in argslist:
argslist[argslist.index('--rank') + 1] = str(i)
else:
argslist.append('--rank')
argslist.append(str(i))
stdout = None if i == 0 else subprocess.DEVNULL
worker = subprocess.Popen(
[str(sys.executable)] + argslist, stdout=stdout)
workers.append(worker)
returncode = 0
try:
pending = len(workers)
while pending > 0:
for worker in workers:
try:
worker_returncode = worker.wait(1)
except subprocess.TimeoutExpired:
continue
pending -= 1
if worker_returncode != 0:
if returncode != 1:
for worker in workers:
worker.terminate()
returncode = 1
except KeyboardInterrupt:
print('Pressed CTRL-C, TERMINATING')
for worker in workers:
worker.terminate()
for worker in workers:
worker.wait()
raise
sys.exit(returncode)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/multiproc.py |
from .entrypoints import nvidia_waveglow
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/__init__.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import torch
import tacotron2_common.layers as layers
from tacotron2_common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self, dataset_path, audiopaths_and_text, args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = args.max_wav_value
self.sampling_rate = args.sampling_rate
self.stft = layers.TacotronSTFT(
args.filter_length, args.hop_length, args.win_length,
args.n_mel_channels, args.sampling_rate, args.mel_fmin,
args.mel_fmax)
self.segment_length = args.segment_length
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = torch.randint(0, max_audio_start + 1, size=(1,)).item()
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/data_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
torch._C._jit_set_autocast_mode(False)
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels : int):
n_channels_int = n_channels
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.linalg.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def forward(self, z):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()
z = self.conv(z)
return z, log_det_W
def infer(self, z):
self._invert()
return F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
def _invert(self):
if not hasattr(self, 'W_inverse'):
W = self.conv.weight.squeeze()
self.W_inverse = W.float().inverse().unsqueeze(-1).to(W.dtype)
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(
res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, audio, spect):
audio = self.start(audio)
output = 0
for i, (in_layer, cond_layer, res_skip_layer) in enumerate(
zip(self.in_layers, self.cond_layers, self.res_skip_layers)):
acts = fused_add_tanh_sigmoid_multiply(
in_layer(audio),
cond_layer(spect),
self.n_channels)
res_skip_acts = res_skip_layer(acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, :self.n_channels, :] + audio
skip_acts = res_skip_acts[:, self.n_channels:, :]
else:
skip_acts = res_skip_acts
output += skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, :self.n_early_size, :])
audio = audio[:, self.n_early_size:, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) // 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k](audio_0, spect)
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0),
self.n_remaining_channels,
spect.size(2), device=spect.device).to(spect.dtype)
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k](audio_0, spect)
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size, spect.size(
2), device=spect.device).to(spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(
0, 2, 1).contiguous().view(
audio.size(0), -1).data
return audio
def infer_onnx(self, spect, z, sigma=0.9):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
length_spect_group = spect.size(2)//8
mel_dim = 80
batch_size = spect.size(0)
spect = spect.view((batch_size, mel_dim, length_spect_group, self.n_group))
spect = spect.permute(0, 2, 1, 3)
spect = spect.contiguous()
spect = spect.view((batch_size, length_spect_group, self.n_group*mel_dim))
spect = spect.permute(0, 2, 1)
spect = spect.contiguous()
audio = z[:, :self.n_remaining_channels, :]
z = z[:, self.n_remaining_channels:self.n_group, :]
audio = sigma*audio
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) // 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:(n_half+n_half), :]
output = self.WN[k](audio_0, spect)
s = output[:, n_half:(n_half+n_half), :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
audio = torch.cat((z[:, :self.n_early_size, :], audio), 1)
z = z[:, self.n_early_size:self.n_group, :]
audio = audio.permute(0,2,1).contiguous().view(batch_size, (length_spect_group * self.n_group))
return audio
def _infer_ts(self, spect, sigma : float=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0), self.n_remaining_channels,
spect.size(2), device=spect.device,
dtype=spect.dtype)
audio *= sigma
for kk, (wn, convinv) in enumerate(zip(self.WN_rev, self.convinv_rev)):
k = self.n_flows - kk - 1
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = wn(audio_0, spect)
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = convinv.infer(audio)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size,
spect.size(2), device=spect.device,
dtype=spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
return audio.permute(0, 2, 1).contiguous().view(audio.size(0), -1).data
def make_ts_scriptable(self, forward_is_infer=True):
self.WN_rev = torch.nn.ModuleList(reversed(self.WN))
self.convinv_rev = torch.nn.ModuleList(reversed(self.convinv))
for conv in self.convinv_rev:
conv._invert()
self.infer = self._infer_ts
if forward_is_infer:
self.forward = self._infer_ts
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/model.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
sys.path.append('tacotron2')
import torch
from tacotron2_common.layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
device = waveglow.upsample.weight.device
dtype = waveglow.upsample.weight.dtype
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(device)
if mode == 'zeros':
mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device)
elif mode == 'normal':
mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio)
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/denoiser.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
return loss / (z.size(0) * z.size(1) * z.size(2))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/loss_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def waveglow_parser(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=512, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/arg_parser.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_waveglow(pretrained=True, **kwargs):
"""Constructs a WaveGlow model (nn.module with additional infer(input) method).
For detailed information on model input and output, training recipies, inference and performance
visit: github.com/NVIDIA/DeepLearningExamples and/or ngc.nvidia.com
Args:
pretrained (bool): If True, returns a model pretrained on LJ Speech dataset.
model_math (str, 'fp32'): returns a model in given precision ('fp32' or 'fp16')
"""
from waveglow import model as waveglow
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
if pretrained:
if fp16:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_amp/versions/19.09.0/files/nvidia_waveglowpyt_fp16_20190427'
else:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/waveglow_ckpt_fp32/versions/19.09.0/files/nvidia_waveglowpyt_fp32_20190427'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
else:
config = {'n_mel_channels': 80, 'n_flows': 12, 'n_group': 8,
'n_early_every': 4, 'n_early_size': 2,
'WN_config': {'n_layers': 8, 'kernel_size': 3,
'n_channels': 512}}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
elif k in config['WN_config'].keys():
config['WN_config'][k] = v
m = waveglow.WaveGlow(**config)
if pretrained:
m.load_state_dict(state_dict)
return m
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/waveglow/entrypoints.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import torch
import sys
import os
from scipy.signal import get_window
import librosa.util as librosa_util
WAVEGLOW_CONFIG = {
"n_mel_channels": 80,
"n_flows": 12,
"n_group": 8,
"n_early_every": 4,
"n_early_size": 2,
"WN_config": {
"n_layers": 8,
"kernel_size": 3,
"n_channels": 256
}
}
def gen_win_sq(
denoiser):
window = denoiser.stft.window
win_length = denoiser.stft.win_length
n_fft = denoiser.stft.filter_length
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=None)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
return win_sq
if len(sys.argv) < 4 or len(sys.argv) > 5:
print("USAGE:")
print(
"\t%s <tacotron2 directory> <waveglow checkpoint> <json output> [strength, default=0.1]" % sys.argv[0])
sys.exit(1)
json_path = sys.argv[3]
sys.path.append(sys.argv[1])
# must be imported after path is modified
from import_utils import load_waveglow
from waveglow.denoiser import Denoiser
strength = 0.1
if len(sys.argv) == 5:
strength = float(sys.argv[4])
print("Building denoiser")
waveglow = load_waveglow(sys.argv[2], WAVEGLOW_CONFIG)
denoiser = Denoiser(waveglow).cuda()
statedict = {}
statedict["denoiser.stft.forward_basis"] = denoiser.stft.forward_basis.cpu(
).numpy().tolist()
statedict["denoiser.stft.inverse_basis"] = denoiser.stft.inverse_basis.cpu(
).numpy().tolist()
statedict["denoiser.stft.win_sq"] = gen_win_sq(denoiser).tolist()
statedict["denoiser.bias_spec"] = (
denoiser.bias_spec*strength).cpu().numpy().tolist()
with open(json_path, "w") as fout:
json.dump(statedict, fout, indent=2)
print("Wrote to '%s'" % json_path)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/denoiser_to_json.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
if len(sys.argv) != 3:
print("Must specify path to PyTorch Tacotron2 containing 'text' module o load and text file to write")
sys.exit(1)
modulePath = sys.argv[1]
outputPath = sys.argv[2]
sys.path.append(modulePath)
from text import symbols
i=0
with open(outputPath, "w") as fout:
print("# sequence-number symbol", file=fout)
for s in symbols:
print("%d %s" % (i, s), file=fout)
i+=1
print("Successfully wrote %d symbols to '%s'." % (i, outputPath))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/export_symbols.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import sys
import onnx
import numpy as np
from scipy.io.wavfile import write
import argparse
import torch
args = None
def convert_conv_1d_to_2d(conv1d):
conv2d = torch.nn.Conv2d(conv1d.weight.size(1),
conv1d.weight.size(0),
(conv1d.weight.size(2), 1),
stride=(conv1d.stride[0], 1),
dilation=(conv1d.dilation[0], 1),
padding=(conv1d.padding[0], 0))
conv2d.weight.data[:, :, :, 0] = conv1d.weight.data
conv2d.bias.data = conv1d.bias.data
return conv2d
def convert_WN_1d_to_2d_(WN):
"""
Modifies the WaveNet like affine coupling layer in-place to use 2-d convolutions
"""
WN.start = convert_conv_1d_to_2d(WN.start)
WN.end = convert_conv_1d_to_2d(WN.end)
for i in range(len(WN.in_layers)):
WN.in_layers[i] = convert_conv_1d_to_2d(WN.in_layers[i])
for i in range(len(WN.res_skip_layers)):
WN.res_skip_layers[i] = convert_conv_1d_to_2d(WN.res_skip_layers[i])
for i in range(len(WN.res_skip_layers)):
WN.cond_layers[i] = convert_conv_1d_to_2d(WN.cond_layers[i])
def convert_convinv_1d_to_2d(convinv):
"""
Takes an invertible 1x1 1-d convolution and returns a 2-d convolution that does
the inverse
"""
conv2d = torch.nn.Conv2d(convinv.W_inverse.size(1),
convinv.W_inverse.size(0),
1, bias=False)
conv2d.weight.data[:, :, :, 0] = convinv.W_inverse.data
return conv2d
def convert_1d_to_2d_(glow):
"""
Caffe2 and TensorRT don't seem to support 1-d convolutions or properly
convert ONNX exports with 1d convolutions to 2d convolutions yet, so we
do the conversion to 2-d convolutions before ONNX export
"""
# Convert upsample to 2d
upsample = torch.nn.ConvTranspose2d(glow.upsample.weight.size(0),
glow.upsample.weight.size(1),
(glow.upsample.weight.size(2), 1),
stride=(glow.upsample.stride[0], 1))
upsample.weight.data[:, :, :, 0] = glow.upsample.weight.data
upsample.bias.data = glow.upsample.bias.data
glow.upsample = upsample
# Convert WN to 2d
for WN in glow.WN:
convert_WN_1d_to_2d_(WN)
# Convert invertible conv to 2d
for i in range(len(glow.convinv)):
glow.convinv[i] = convert_convinv_1d_to_2d(glow.convinv[i])
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
in_act = input_a+input_b
in_left = in_act[:, 0:n_channels, :, :]
in_right = in_act[:, n_channels:2*n_channels, :, :]
t_act = torch.tanh(in_left)
s_act = torch.sigmoid(in_right)
acts = t_act * s_act
return acts
def WN_forward(self, forward_input):
"""
This is a forward replacement for the WN forward. This is required because
the code was written for 1d convs which isn't yet supported from ONNX
exports.
"""
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
self.n_channels)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, 0:self.n_channels, :, :] + audio
skip_acts = res_skip_acts[:,
self.n_channels:2*self.n_channels, :, :]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
def infer_o(self, spect, z):
"""
In order to for the trace to work running through ONNX with 2d convolutions
we need to overwrite the forward method. All shape information is
pre-calculated so ONNX doesn't export "Dynamic" outputs which are not yet
suported by TensorRT
"""
batch_size = spect.size(0)
spect = spect.permute(0, 3, 2, 1).contiguous()
spect = self.upsample(spect)
spect = torch.squeeze(spect, 3)
spect = spect.view(batch_size, self.upsample_weight_size, self.length_spect_group, self.n_group)
spect = spect.permute(0, 2, 1, 3)
spect = spect.contiguous()
spect = spect.view(batch_size, self.length_spect_group, self.upsample_weight_size*self.n_group)
spect = spect.permute(0, 2, 1)
spect = torch.unsqueeze(spect, 3)
spect = spect.contiguous()
audio = z[:, :self.n_remaining_channels, :, :]
z = z[:, self.n_remaining_channels:self.n_group, :, :]
for k in reversed(range(self.n_flows)):
n_half = self.n_halves[k]
audio_0 = audio[:, 0:n_half, :, :]
audio_1 = audio[:, n_half:2*n_half, :, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:2*n_half, :, :]
b = output[:, 0:n_half, :, :]
audio_1 = (audio_1 - b)/torch.exp(s)
audio_0 = audio_0.expand(audio_1.size(0), audio_0.size(1),
audio_0.size(2), audio_0.size(3))
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k](audio)
if k % self.n_early_every == 0 and k > 0:
zb = z[:, 0:self.n_early_size, :, :].expand(audio.size(0),
self.n_early_size, z.size(2), z.size(3))
audio = torch.cat((zb, audio), 1)
z = z[:, self.n_early_size:self.n_group -
self.n_remaining_channels, :, :]
audio = torch.squeeze(audio, 3)
audio = audio.permute(0, 2, 1).contiguous().view(
audio.size(0), (self.length_spect_group * self.n_group))
return audio
def main(waveglow_path, output_path, batch_size, length_mels):
"""
Takes a waveglow model, a batch size, and a length in mels about outputs a static
ONNX representation using 2D convoultions
"""
torch.manual_seed(0)
model = load_waveglow(waveglow_path, waveglow_config)
length_spect = length_mels
length_samples = 768 + 256*length_spect
model.upsample_weight_size = model.upsample.weight.size(0)
spect = torch.cuda.FloatTensor(
batch_size, model.upsample_weight_size, length_spect).normal_()
spect = torch.autograd.Variable(spect.cuda(), requires_grad=False)
# Run inference because it forces inverses to be calculated
with torch.no_grad():
test_out1 = model.infer(spect)
assert(length_samples % model.n_group == 0)
model.length_spect_group = int(length_samples / model.n_group)
# Pre-calculating the sizes of noise to use so it's not dynamic
n_halves = []
n_half = int(model.n_remaining_channels/2)
for k in reversed(range(model.n_flows)):
n_halves.append(n_half)
if k % model.n_early_every == 0 and k > 0:
n_half = n_half + int(model.n_early_size/2)
n_halves.reverse()
model.n_halves = n_halves
spect = torch.cuda.FloatTensor(
batch_size, 1, length_spect, model.upsample.weight.size(0)).normal_()
z = torch.cuda.FloatTensor(
1, model.n_group, model.length_spect_group, 1).normal_()
spect = torch.autograd.Variable(spect.cuda(), requires_grad=False)
z = torch.autograd.Variable(z, requires_grad=False)
# Replace old forward with inference
glow.WaveGlow.forward = infer_o
#glow.WN.forward = WN_forward
# Convert whole model to 2d convolutions
convert_1d_to_2d_(model)
model.cuda()
# Get output for comparison with Caffe2
with torch.no_grad():
test_out2 = model(spect, z)
# Export model
torch.onnx.export(model, (spect, z),
output_path,
dynamic_axes={'spect': {0: 'batch_size'},
'audio': {0: 'batch_size'}},
input_names=['spect', 'z'],
output_names=['audio'],
opset_version=10,
verbose=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow decoder checkpoint with model',
required=True)
parser.add_argument('-W', '--tacotron2_home', help='Path to DeepLearningExamples/PyTorch/SpeechSynthesis/Tacotron2 directory.',
required=True)
parser.add_argument('-o', "--onnx_path",
help="Path to output ONNX file", required=True)
parser.add_argument("--batch_size", default=1, type=int)
parser.add_argument("--length_mels", default=160, type=int)
# add wave glow arguments
waveglow = parser.add_argument_group("WaveGlow parameters")
waveglow.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
waveglow.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
waveglow.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
waveglow.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
waveglow.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
waveglow.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
waveglow.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = waveglow.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=256, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
args = parser.parse_args()
# do imports as needed
sys.path.append(args.tacotron2_home)
import waveglow.model as glow
from import_utils import load_waveglow
global waveglow_config
waveglow_config = {
"n_mel_channels": args.n_mel_channels,
"n_flows": args.flows,
"n_group": args.groups,
"n_early_every": args.early_every,
"n_early_size": args.early_size,
"WN_config": {
"n_layers": args.wn_layers,
"kernel_size": args.wn_kernel_size,
"n_channels": args.wn_channels
}
}
main(args.waveglow_path, args.onnx_path, args.batch_size, args.length_mels)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/waveglow_to_onnx.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import torch
import sys
if len(sys.argv) != 3:
print("Must specify statedict to load and json to write")
sys.exit(1)
statedict_path = sys.argv[1]
json_path = sys.argv[2]
print("Reading from '%s' and writing to '%s'." % (statedict_path, json_path))
statedict = dict(torch.load(statedict_path)["state_dict"])
outdict = {}
for k, v in dict(statedict).items():
if k.startswith("module."):
k = k[len("module."):]
print(k)
outdict[k] = v.cpu().numpy().tolist()
with open(json_path, "w") as fout:
json.dump(outdict, fout)
print("Wrote to '%s'" % json_path)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/tacotron2_to_json.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from .waveglow import load_waveglow
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/import_utils/__init__.py |
#!/usr/bin/env python3
##
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pickle
import torch
from waveglow.model import WaveGlow
def split_cond_layers(model):
for WN in model.WN:
if hasattr(WN, "cond_layer"):
n_layers = len(WN.res_skip_layers)
conv_weights = WN.cond_layer.weight
conv_bias = WN.cond_layer.bias
conv_stride = WN.cond_layer.stride
conv_dilation = WN.cond_layer.dilation
conv_padding = WN.cond_layer.padding
num_in_channels = conv_weights.size(1)
num_out_channels = conv_weights.size(0)//n_layers
kernel_size = conv_weights.size(2)
WN.cond_layers = []
for i in range(n_layers):
layer = torch.nn.Conv1d(
in_channels=num_in_channels,
out_channels=num_out_channels,
kernel_size=kernel_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation)
layer.weight.data[:, :, :] = conv_weights.data[
i*num_out_channels:(i+1)*num_out_channels, :, :]
layer.bias.data[:] = conv_bias.data[
i*num_out_channels:(i+1)*num_out_channels]
layer = torch.nn.utils.weight_norm(layer, name='weight')
WN.cond_layers.append(layer)
return model
def load_waveglow(filename, waveglow_config):
class RenamingUnpickler(pickle.Unpickler):
def find_class(self, module, name):
if module == 'glow':
module = 'waveglow.model'
return super().find_class(module, name)
class RenamingPickleModule:
def load(self, f, *args, **kw_args):
return self.Unpickler(f, *args, **kw_args).load()
def Unpickler(self, f, **pickle_load_args):
return RenamingUnpickler(f, **pickle_load_args)
pickle_module = RenamingPickleModule()
blob = torch.load(filename, pickle_module=pickle_module)
if 'state_dict' in blob:
waveglow = WaveGlow(**waveglow_config).cuda()
state_dict = {}
for key, value in blob["state_dict"].items():
newKey = key
if key.startswith("module."):
newKey = key[len("module."):]
state_dict[newKey] = value
waveglow.load_state_dict(state_dict)
else:
waveglow = blob['model']
waveglow = split_cond_layers(waveglow)
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow.cuda().eval()
return waveglow
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/trtis_cpp/scripts/import_utils/waveglow.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, size=n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common/audio_processing.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from tacotron2_common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].astype(np.float32))
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, size=filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose2d(
recombine_magnitude_phase.unsqueeze(-1),
Variable(self.inverse_basis.unsqueeze(-1), requires_grad=False),
stride=(self.hop_length,1),
padding=(0,0))
inverse_transform = inverse_transform.squeeze(-1)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common/stft.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import numpy as np
from scipy.io.wavfile import read
import torch
import os
import argparse
import json
class ParseFromConfigFile(argparse.Action):
def __init__(self, option_strings, type, dest, help=None, required=False):
super(ParseFromConfigFile, self).__init__(option_strings=option_strings, type=type, dest=dest, help=help, required=required)
def __call__(self, parser, namespace, values, option_string):
with open(values, 'r') as f:
data = json.load(f)
for group in data.keys():
for k,v in data[group].items():
underscore_k = k.replace('-', '_')
setattr(namespace, underscore_k, v)
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device, dtype=lengths.dtype)
mask = (ids < lengths.unsqueeze(1)).byte()
mask = torch.le(mask, 0)
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(dataset_path, filename, split="|"):
with open(filename, encoding='utf-8') as f:
def split_line(root, line):
parts = line.strip().split(split)
if len(parts) > 2:
raise Exception(
"incorrect line format for file: {}".format(filename))
path = os.path.join(root, parts[0])
text = parts[1]
return path,text
filepaths_and_text = [split_line(dataset_path, line) for line in f]
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return x
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common/utils.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from librosa.filters import mel as librosa_mel_fn
from tacotron2_common.audio_processing import dynamic_range_compression, dynamic_range_decompression
from tacotron2_common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
return self.conv(signal)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tacotron2_common/layers.py |
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import argparse
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument("--trtis_model_name",
type=str,
default='waveglow',
help="exports to appropriate directory for TRTIS")
parser.add_argument("--trtis_model_version",
type=int,
default=1,
help="exports to appropriate directory for TRTIS")
parser.add_argument('--fp16', action='store_true',
help='inference with mixed precision')
return parser
def main():
parser = argparse.ArgumentParser(
description='PyTorch WaveGlow TRTIS config exporter')
parser = parse_args(parser)
args = parser.parse_args()
# prepare repository
model_folder = os.path.join('./trtis_repo', args.trtis_model_name)
version_folder = os.path.join(model_folder, str(args.trtis_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
# build the config for TRTIS
config_filename = os.path.join(model_folder, "config.pbtxt")
config_template = r"""
name: "{model_name}"
platform: "tensorrt_plan"
default_model_filename: "waveglow_fp16.engine"
max_batch_size: 1
input {{
name: "mel"
data_type: {fp_type}
dims: [80, -1, 1]
}}
input {{
name: "z"
data_type: {fp_type}
dims: [8, -1, 1]
}}
output {{
name: "audio"
data_type: {fp_type}
dims: [-1]
}}
"""
config_values = {
"model_name": args.trtis_model_name,
"fp_type": "TYPE_FP16" if args.fp16 else "TYPE_FP32"
}
with open(model_folder + "/config.pbtxt", "w") as file:
final_config_str = config_template.format_map(config_values)
file.write(final_config_str)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/exports/export_waveglow_trt_config.py |
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import argparse
import sys
sys.path.append('./')
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str, required=True,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, default="trtis_repo/tacotron/1/model.pt",
help='filename for the Tacotron 2 TorchScript model')
parser.add_argument('--fp16', action='store_true',
help='inference with mixed precision')
return parser
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args = parser.parse_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
amp_run=args.fp16, cpu_run=False,
forward_is_infer=True)
jitted_tacotron2 = torch.jit.script(tacotron2)
torch.jit.save(jitted_tacotron2, args.output)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/exports/export_tacotron2_ts.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import sys
sys.path.append('./')
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron 2 ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
input_lengths_cpu = input_lengths[:] # TODO
input_lengths_cpu = input_lengths_cpu.cpu().numpy() # TODO
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths_cpu, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
from trt.inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.output+"/"+"encoder.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {1: "text_seq"},
"memory": {1: "mem_seq"},
"processed_memory": {1: "mem_seq"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.output+"/"+"decoder_iter.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"attention_weights" : {1: "seq_len"},
"attention_weights_cum" : {1: "seq_len"},
"memory" : {1: "seq_len"},
"processed_memory" : {1: "seq_len"},
"mask" : {1: "seq_len"},
"out_attention_weights" : {1: "seq_len"},
"out_attention_weights_cum" : {1: "seq_len"}
})
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.output+"/"+"postnet.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {2: "mel_seq"},
"mel_outputs_postnet": {2: "mel_seq"}})
mel = test_inference(encoder, decoder_iter, postnet)
torch.save(mel, "mel.pt")
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/exports/export_tacotron2_onnx.py |
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import types
import torch
import argparse
import sys
sys.path.append('./')
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported WaveGlow ONNX model')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
return parser
def infer_onnx(self, spect, z, sigma=0.9):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
length_spect_group = spect.size(2)//8
mel_dim = 80
batch_size = spect.size(0)
spect = spect.view((batch_size, mel_dim, length_spect_group, self.n_group))
spect = spect.permute(0, 2, 1, 3)
spect = spect.contiguous()
spect = spect.view((batch_size, length_spect_group, self.n_group*mel_dim))
spect = spect.permute(0, 2, 1)
spect = spect.contiguous()
audio = z[:, :self.n_remaining_channels, :]
z = z[:, self.n_remaining_channels:self.n_group, :]
audio = sigma*audio
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:(n_half+n_half), :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:(n_half+n_half), :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
audio = torch.cat((z[:, :self.n_early_size, :], audio), 1)
z = z[:, self.n_early_size:self.n_group, :]
audio = audio.permute(0,2,1).contiguous().view(batch_size, (length_spect_group * self.n_group))
return audio
def export_onnx(parser, args):
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
fp16_run=args.fp16, cpu_run=False,
forward_is_infer=False)
# 80 mel channels, 620 mel spectrograms ~ 7 seconds of speech
mel = torch.randn(1, 80, 620).cuda()
stride = 256 # value from waveglow upsample
n_group = 8
z_size2 = (mel.size(2)*stride)//n_group
z = torch.randn(1, n_group, z_size2).cuda()
if args.fp16:
mel = mel.half()
z = z.half()
with torch.no_grad():
# run inference to force calculation of inverses
waveglow.infer(mel, sigma=args.sigma_infer)
# export to ONNX
if args.fp16:
waveglow = waveglow.half()
fType = types.MethodType
waveglow.forward = fType(infer_onnx, waveglow)
opset_version = 12
torch.onnx.export(waveglow, (mel, z), args.output+"/"+"waveglow.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel", "z"],
output_names=["audio"],
dynamic_axes={"mel": {0: "batch_size", 2: "mel_seq"},
"z": {0: "batch_size", 2: "z_seq"},
"audio": {0: "batch_size", 1: "audio_seq"}})
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
export_onnx(parser, args)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/exports/export_waveglow_onnx.py |
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import argparse
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument("--trtis_model_name",
type=str,
default='tacotron2',
help="exports to appropriate directory for TRTIS")
parser.add_argument("--trtis_model_version",
type=int,
default=1,
help="exports to appropriate directory for TRTIS")
parser.add_argument("--trtis_max_batch_size",
type=int,
default=1,
help="Specifies the 'max_batch_size' in the TRTIS model config.\
See the TRTIS documentation for more info.")
parser.add_argument('--fp16', action='store_true',
help='inference with mixed precision')
return parser
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 TRTIS config exporter')
parser = parse_args(parser)
args = parser.parse_args()
# prepare repository
model_folder = os.path.join('./trtis_repo', args.trtis_model_name)
version_folder = os.path.join(model_folder, str(args.trtis_model_version))
if not os.path.exists(version_folder):
os.makedirs(version_folder)
# build the config for TRTIS
config_filename = os.path.join(model_folder, "config.pbtxt")
config_template = r"""
name: "{model_name}"
platform: "pytorch_libtorch"
max_batch_size: {max_batch_size}
input [
{{
name: "sequence__0"
data_type: TYPE_INT64
dims: [-1]
}},
{{
name: "input_lengths__1"
data_type: TYPE_INT64
dims: [1]
reshape: {{ shape: [ ] }}
}}
]
output [
{{
name: "mel_outputs_postnet__0"
data_type: {fp_type}
dims: [80,-1]
}},
{{
name: "mel_lengths__1"
data_type: TYPE_INT32
dims: [1]
reshape: {{ shape: [ ] }}
}},
{{
name: "alignments__2"
data_type: {fp_type}
dims: [-1,-1]
}}
]
"""
config_values = {
"model_name": args.trtis_model_name,
"max_batch_size": args.trtis_max_batch_size,
"fp_type": "TYPE_FP16" if args.fp16 else "TYPE_FP32"
}
with open(model_folder + "/config.pbtxt", "w") as file:
final_config_str = config_template.format_map(config_values)
file.write(final_config_str)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/exports/export_tacotron2_ts_config.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import sys
sys.path.append('./')
from tacotron2.text import text_to_sequence
import models
import torch
import argparse
import numpy as np
from scipy.io.wavfile import write
from inference import checkpoint_from_distributed, unwrap_distributed, MeasureTime, prepare_input_sequence, load_and_setup_model
from inference_trt import infer_tacotron2_trt, infer_waveglow_trt
from trt_utils import load_engine
import tensorrt as trt
import time
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from apex import amp
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--encoder', type=str, required=True,
help='full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--num-iters', type=int, default=10,
help='Number of iterations')
parser.add_argument('-il', '--input-length', type=int, default=64,
help='Input length')
parser.add_argument('-bs', '--batch-size', type=int, default=1,
help='Batch size')
return parser
def print_stats(measurements_all):
print(np.mean(measurements_all['latency'][1:]),
np.mean(measurements_all['throughput'][1:]),
np.mean(measurements_all['pre_processing'][1:]),
np.mean(measurements_all['type_conversion'][1:])+
np.mean(measurements_all['storage'][1:])+
np.mean(measurements_all['data_transfer'][1:]),
np.mean(measurements_all['num_mels_per_audio'][1:]))
throughput = measurements_all['throughput']
preprocessing = measurements_all['pre_processing']
type_conversion = measurements_all['type_conversion']
storage = measurements_all['storage']
data_transfer = measurements_all['data_transfer']
postprocessing = [sum(p) for p in zip(type_conversion,storage,data_transfer)]
latency = measurements_all['latency']
num_mels_per_audio = measurements_all['num_mels_per_audio']
latency.sort()
cf_50 = max(latency[:int(len(latency)*0.50)])
cf_90 = max(latency[:int(len(latency)*0.90)])
cf_95 = max(latency[:int(len(latency)*0.95)])
cf_99 = max(latency[:int(len(latency)*0.99)])
cf_100 = max(latency[:int(len(latency)*1.0)])
print("Throughput average (samples/sec) = {:.4f}".format(np.mean(throughput)))
print("Preprocessing average (seconds) = {:.4f}".format(np.mean(preprocessing)))
print("Postprocessing average (seconds) = {:.4f}".format(np.mean(postprocessing)))
print("Number of mels per audio average = {}".format(np.mean(num_mels_per_audio))) #
print("Latency average (seconds) = {:.4f}".format(np.mean(latency)))
print("Latency std (seconds) = {:.4f}".format(np.std(latency)))
print("Latency cl 50 (seconds) = {:.4f}".format(cf_50))
print("Latency cl 90 (seconds) = {:.4f}".format(cf_90))
print("Latency cl 95 (seconds) = {:.4f}".format(cf_95))
print("Latency cl 99 (seconds) = {:.4f}".format(cf_99))
print("Latency cl 100 (seconds) = {:.4f}".format(cf_100))
def main():
"""
Launches text to speech (inference).
Inference is executed on a single GPU.
"""
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, unknown_args = parser.parse_known_args()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
for k,v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k:v})
DLLogger.log(step="PARAMETER", data={'model_name':'Tacotron2_PyT'})
measurements_all = {"pre_processing": [],
"tacotron2_encoder_time": [],
"tacotron2_decoder_time": [],
"tacotron2_postnet_time": [],
"tacotron2_latency": [],
"waveglow_latency": [],
"latency": [],
"type_conversion": [],
"data_transfer": [],
"storage": [],
"tacotron2_items_per_sec": [],
"waveglow_items_per_sec": [],
"num_mels_per_audio": [],
"throughput": []}
print("args:", args, unknown_args)
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser,
args.waveglow_ckpt,
fp16_run=args.fp16,
cpu_run=False,
forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
texts = ["The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves. The forms of printed letters should be beautiful, and that their arrangement on the page should be reasonable and a help to the shapeliness of the letters themselves."]
texts = [texts[0][:args.input_length]]
texts = texts*args.batch_size
warmup_iters = 3
for iter in range(args.num_iters):
measurements = {}
with MeasureTime(measurements, "pre_processing"):
sequences_padded, input_lengths = prepare_input_sequence(texts)
sequences_padded = sequences_padded.to(torch.int32)
input_lengths = input_lengths.to(torch.int32)
with torch.no_grad():
with MeasureTime(measurements, "latency"):
with MeasureTime(measurements, "tacotron2_latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences_padded, input_lengths, measurements, args.fp16)
with MeasureTime(measurements, "waveglow_latency"):
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
num_mels = mel.size(0)*mel.size(2)
num_samples = audios.size(0)*audios.size(1)
with MeasureTime(measurements, "type_conversion"):
audios = audios.float()
with MeasureTime(measurements, "data_transfer"):
audios = audios.cpu()
with MeasureTime(measurements, "storage"):
audios = audios.numpy()
for i, audio in enumerate(audios):
audio_path = "audio_"+str(i)+".wav"
write(audio_path, args.sampling_rate,
audio[:mel_lengths[i]*args.stft_hop_length])
measurements['tacotron2_items_per_sec'] = num_mels/measurements['tacotron2_latency']
measurements['waveglow_items_per_sec'] = num_samples/measurements['waveglow_latency']
measurements['num_mels_per_audio'] = mel.size(2)
measurements['throughput'] = num_samples/measurements['latency']
if iter >= warmup_iters:
for k,v in measurements.items():
if k in measurements_all.keys():
measurements_all[k].append(v)
DLLogger.log(step=(iter-warmup_iters), data={k: v})
DLLogger.flush()
print_stats(measurements_all)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/test_infer_trt.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import tensorrt as trt
def is_dimension_dynamic(dim):
return dim is None or dim <= 0
def is_shape_dynamic(shape):
return any([is_dimension_dynamic(dim) for dim in shape])
def run_trt_engine(context, engine, tensors):
bindings = [None]*engine.num_bindings
for name,tensor in tensors['inputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
if engine.is_shape_binding(idx) and is_shape_dynamic(context.get_shape(idx)):
context.set_shape_input(idx, tensor)
elif is_shape_dynamic(engine.get_binding_shape(idx)):
context.set_binding_shape(idx, tensor.shape)
for name,tensor in tensors['outputs'].items():
idx = engine.get_binding_index(name)
bindings[idx] = tensor.data_ptr()
context.execute_v2(bindings=bindings)
def load_engine(engine_filepath, trt_logger):
with open(engine_filepath, "rb") as f, trt.Runtime(trt_logger) as runtime:
engine = runtime.deserialize_cuda_engine(f.read())
return engine
def engine_info(engine_filepath):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
engine = load_engine(engine_filepath, TRT_LOGGER)
binding_template = r"""
{btype} {{
name: "{bname}"
data_type: {dtype}
dims: {dims}
}}"""
type_mapping = {"DataType.HALF": "TYPE_FP16",
"DataType.FLOAT": "TYPE_FP32",
"DataType.INT32": "TYPE_INT32",
"DataType.BOOL" : "TYPE_BOOL"}
print("engine name", engine.name)
print("has_implicit_batch_dimension", engine.has_implicit_batch_dimension)
start_dim = 0 if engine.has_implicit_batch_dimension else 1
print("num_optimization_profiles", engine.num_optimization_profiles)
print("max_batch_size:", engine.max_batch_size)
print("device_memory_size:", engine.device_memory_size)
print("max_workspace_size:", engine.max_workspace_size)
print("num_layers:", engine.num_layers)
for i in range(engine.num_bindings):
btype = "input" if engine.binding_is_input(i) else "output"
bname = engine.get_binding_name(i)
dtype = engine.get_binding_dtype(i)
bdims = engine.get_binding_shape(i)
config_values = {
"btype": btype,
"bname": bname,
"dtype": type_mapping[str(dtype)],
"dims": list(bdims[start_dim:])
}
final_binding_str = binding_template.format_map(config_values)
print(final_binding_str)
def build_engine(model_file, shapes, max_ws=512*1024*1024, fp16=False):
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
builder = trt.Builder(TRT_LOGGER)
builder.fp16_mode = fp16
config = builder.create_builder_config()
config.max_workspace_size = max_ws
if fp16:
config.flags |= 1 << int(trt.BuilderFlag.FP16)
profile = builder.create_optimization_profile()
for s in shapes:
profile.set_shape(s['name'], min=s['min'], opt=s['opt'], max=s['max'])
config.add_optimization_profile(profile)
explicit_batch = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(explicit_batch)
with trt.OnnxParser(network, TRT_LOGGER) as parser:
with open(model_file, 'rb') as model:
parsed = parser.parse(model.read())
for i in range(parser.num_errors):
print("TensorRT ONNX parser error:", parser.get_error(i))
engine = builder.build_engine(network, config=config)
return engine
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/trt_utils.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import tensorrt as trt
import numpy as np
from scipy.io.wavfile import write
import time
import torch
import argparse
import sys
sys.path.append('./')
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
from tacotron2.text import text_to_sequence
from inference import MeasureTime, prepare_input_sequence, load_and_setup_model
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from trt_utils import load_engine, run_trt_engine
from waveglow.denoiser import Denoiser
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='full path to the input text (phareses separated by new line)')
parser.add_argument('-o', '--output', required=True,
help='output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, required=True,
help='full path to the Encoder engine')
parser.add_argument('--decoder', type=str, required=True,
help='full path to the DecoderIter engine')
parser.add_argument('--postnet', type=str, required=True,
help='full path to the Postnet engine')
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow engine')
parser.add_argument('--waveglow-ckpt', type=str, default="",
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('--log-file', type=str, default='nvlog.json',
help='Filename for logging')
parser.add_argument('-d', '--denoising-strength', default=0.01, type=float)
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--stft-hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
return parser
def init_decoder_inputs(memory, processed_memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
mask = get_mask_from_lengths(memory_lengths).to(device)
decoder_input = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
return (decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
def init_decoder_outputs(memory, memory_lengths):
device = memory.device
dtype = memory.dtype
bs = memory.size(0)
seq_len = memory.size(1)
attention_rnn_dim = 1024
decoder_rnn_dim = 1024
encoder_embedding_dim = 512
n_mel_channels = 80
attention_hidden = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
attention_cell = torch.zeros(bs, attention_rnn_dim, device=device, dtype=dtype)
decoder_hidden = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
decoder_cell = torch.zeros(bs, decoder_rnn_dim, device=device, dtype=dtype)
attention_weights = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_weights_cum = torch.zeros(bs, seq_len, device=device, dtype=dtype)
attention_context = torch.zeros(bs, encoder_embedding_dim, device=device, dtype=dtype)
decoder_output = torch.zeros(bs, n_mel_channels, device=device, dtype=dtype)
gate_prediction = torch.zeros(bs, 1, device=device, dtype=dtype)
return (attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, decoder_output, gate_prediction)
def init_decoder_tensors(decoder_inputs, decoder_outputs):
decoder_tensors = {
"inputs" : {
'decoder_input': decoder_inputs[0],
'attention_hidden': decoder_inputs[1],
'attention_cell': decoder_inputs[2],
'decoder_hidden': decoder_inputs[3],
'decoder_cell': decoder_inputs[4],
'attention_weights': decoder_inputs[5],
'attention_weights_cum': decoder_inputs[6],
'attention_context': decoder_inputs[7],
'memory': decoder_inputs[8],
'processed_memory': decoder_inputs[9],
'mask': decoder_inputs[10]
},
"outputs" : {
'out_attention_hidden': decoder_outputs[0],
'out_attention_cell': decoder_outputs[1],
'out_decoder_hidden': decoder_outputs[2],
'out_decoder_cell': decoder_outputs[3],
'out_attention_weights': decoder_outputs[4],
'out_attention_weights_cum': decoder_outputs[5],
'out_attention_context': decoder_outputs[6],
'decoder_output': decoder_outputs[7],
'gate_prediction': decoder_outputs[8]
}
}
return decoder_tensors
def swap_inputs_outputs(decoder_inputs, decoder_outputs):
new_decoder_inputs = (decoder_outputs[7], # decoder_output
decoder_outputs[0], # attention_hidden
decoder_outputs[1], # attention_cell
decoder_outputs[2], # decoder_hidden
decoder_outputs[3], # decoder_cell
decoder_outputs[4], # attention_weights
decoder_outputs[5], # attention_weights_cum
decoder_outputs[6], # attention_context
decoder_inputs[8], # memory
decoder_inputs[9], # processed_memory
decoder_inputs[10]) # mask
new_decoder_outputs = (decoder_inputs[1], # attention_hidden
decoder_inputs[2], # attention_cell
decoder_inputs[3], # decoder_hidden
decoder_inputs[4], # decoder_cell
decoder_inputs[5], # attention_weights
decoder_inputs[6], # attention_weights_cum
decoder_inputs[7], # attention_context
decoder_inputs[0], # decoder_input
decoder_outputs[8])# gate_output
return new_decoder_inputs, new_decoder_outputs
def infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, fp16):
memory = torch.zeros((len(sequence_lengths), sequence_lengths[0], 512)).cuda()
if fp16:
memory = memory.half()
device = memory.device
dtype = memory.dtype
processed_memory = torch.zeros((len(sequence_lengths),sequence_lengths[0],128), device=device, dtype=dtype)
lens = torch.zeros_like(sequence_lengths)
encoder_tensors = {
"inputs" :
{'sequences': sequences, 'sequence_lengths': sequence_lengths},
"outputs" :
{'memory': memory, 'lens': lens, 'processed_memory': processed_memory}
}
print("Running Tacotron2 Encoder")
with MeasureTime(measurements, "tacotron2_encoder_time"):
run_trt_engine(encoder_context, encoder, encoder_tensors)
device = memory.device
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1, device = device), torch.zeros(1, device = device), torch.zeros(1, device = device))
gate_threshold = 0.5
max_decoder_steps = 1664
first_iter = True
decoder_inputs = init_decoder_inputs(memory, processed_memory, sequence_lengths)
decoder_outputs = init_decoder_outputs(memory, sequence_lengths)
print("Running Tacotron2 Decoder")
measurements_decoder = {}
while True:
decoder_tensors = init_decoder_tensors(decoder_inputs, decoder_outputs)
with MeasureTime(measurements_decoder, "step"):
run_trt_engine(decoder_context, decoder_iter, decoder_tensors)
if first_iter:
mel_outputs = torch.unsqueeze(decoder_outputs[7], 2)
gate_outputs = torch.unsqueeze(decoder_outputs[8], 2)
alignments = torch.unsqueeze(decoder_outputs[4], 2)
measurements['tacotron2_decoder_time'] = measurements_decoder['step']
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(decoder_outputs[7], 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(decoder_outputs[8], 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(decoder_outputs[4], 2)), 2)
measurements['tacotron2_decoder_time'] += measurements_decoder['step']
dec = torch.le(torch.sigmoid(decoder_outputs[8]), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after",mel_outputs.size(2),"decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_inputs, decoder_outputs = swap_inputs_outputs(decoder_inputs, decoder_outputs)
mel_outputs_postnet = torch.zeros_like(mel_outputs, device=device, dtype=dtype)
postnet_tensors = {
"inputs" :
{'mel_outputs': mel_outputs},
"outputs" :
{'mel_outputs_postnet': mel_outputs_postnet}
}
print("Running Tacotron2 Postnet")
with MeasureTime(measurements, "tacotron2_postnet_time"):
run_trt_engine(postnet_context, postnet, postnet_tensors)
print("Tacotron2 Postnet done")
return mel_outputs_postnet, mel_lengths
def infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, fp16):
mel_size = mel.size(2)
batch_size = mel.size(0)
stride = 256
n_group = 8
z_size = mel_size*stride
z_size = z_size//n_group
z = torch.randn(batch_size, n_group, z_size).cuda()
audios = torch.zeros(batch_size, mel_size*stride).cuda()
if fp16:
z = z.half()
mel = mel.half()
audios = audios.half()
waveglow_tensors = {
"inputs" :
{'input__0': mel, 'input__1': z},
"outputs" :
{'output__0': audios}
}
print("Running WaveGlow")
with MeasureTime(measurements, "waveglow_time"):
run_trt_engine(waveglow_context, waveglow, waveglow_tensors)
return audios
def main():
parser = argparse.ArgumentParser(
description='TensorRT Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
# initialize CUDA state
torch.cuda.init()
TRT_LOGGER = trt.Logger(trt.Logger.WARNING)
encoder = load_engine(args.encoder, TRT_LOGGER)
decoder_iter = load_engine(args.decoder, TRT_LOGGER)
postnet = load_engine(args.postnet, TRT_LOGGER)
waveglow = load_engine(args.waveglow, TRT_LOGGER)
if args.waveglow_ckpt != "":
# setup denoiser using WaveGlow PyTorch checkpoint
waveglow_ckpt = load_and_setup_model('WaveGlow', parser, args.waveglow_ckpt,
True, forward_is_infer=True)
denoiser = Denoiser(waveglow_ckpt).cuda()
# after initialization, we don't need WaveGlow PyTorch checkpoint
# anymore - deleting
del waveglow_ckpt
torch.cuda.empty_cache()
# create TRT contexts for each engine
encoder_context = encoder.create_execution_context()
decoder_context = decoder_iter.create_execution_context()
postnet_context = postnet.create_execution_context()
waveglow_context = waveglow.create_execution_context()
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT,
args.output+'/'+args.log_file),
StdOutBackend(Verbosity.VERBOSE)])
texts = []
try:
f = open(args.input, 'r')
texts = f.readlines()
except:
print("Could not read file")
sys.exit(1)
measurements = {}
sequences, sequence_lengths = prepare_input_sequence(texts)
sequences = sequences.to(torch.int32)
sequence_lengths = sequence_lengths.to(torch.int32)
with MeasureTime(measurements, "latency"):
mel, mel_lengths = infer_tacotron2_trt(encoder, decoder_iter, postnet,
encoder_context, decoder_context, postnet_context,
sequences, sequence_lengths, measurements, args.fp16)
audios = infer_waveglow_trt(waveglow, waveglow_context, mel, measurements, args.fp16)
with encoder_context, decoder_context, postnet_context, waveglow_context:
pass
audios = audios.float()
if args.waveglow_ckpt != "":
with MeasureTime(measurements, "denoiser"):
audios = denoiser(audios, strength=args.denoising_strength).squeeze(1)
for i, audio in enumerate(audios):
audio = audio[:mel_lengths[i]*args.stft_hop_length]
audio = audio/torch.max(torch.abs(audio))
audio_path = args.output + "audio_"+str(i)+"_trt.wav"
write(audio_path, args.sampling_rate, audio.cpu().numpy())
DLLogger.log(step=0, data={"tacotron2_encoder_latency": measurements['tacotron2_encoder_time']})
DLLogger.log(step=0, data={"tacotron2_decoder_latency": measurements['tacotron2_decoder_time']})
DLLogger.log(step=0, data={"tacotron2_postnet_latency": measurements['tacotron2_postnet_time']})
DLLogger.log(step=0, data={"waveglow_latency": measurements['waveglow_time']})
DLLogger.log(step=0, data={"latency": measurements['latency']})
if args.waveglow_ckpt != "":
DLLogger.log(step=0, data={"denoiser": measurements['denoiser']})
DLLogger.flush()
prec = "fp16" if args.fp16 else "fp32"
latency = measurements['latency']
throughput = audios.size(1)/latency
log_data = "1,"+str(sequence_lengths[0].item())+","+prec+","+str(latency)+","+str(throughput)+","+str(mel_lengths[0].item())+"\n"
with open("log_bs1_"+prec+".log", 'a') as f:
f.write(log_data)
if __name__ == "__main__":
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/inference_trt.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import argparse
import os
import sys
sys.path.append('./')
from tacotron2_common.utils import ParseFromConfigFile
from inference import load_and_setup_model
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--waveglow', type=str, required=True,
help='full path to the WaveGlow model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported WaveGlow ONNX model')
parser.add_argument('--fp16', action='store_true',
help='inference with AMP')
parser.add_argument('-s', '--sigma-infer', default=0.6, type=float)
parser.add_argument('--config-file', action=ParseFromConfigFile,
type=str, help='Path to configuration file')
return parser
def export_onnx(parser, args):
waveglow = load_and_setup_model('WaveGlow', parser, args.waveglow,
fp16_run=args.fp16, cpu_run=False,
forward_is_infer=False)
# 80 mel channels, 620 mel spectrograms ~ 7 seconds of speech
mel = torch.randn(1, 80, 620).cuda()
stride = 256 # value from waveglow upsample
n_group = 8
z_size2 = (mel.size(2)*stride)//n_group
z = torch.randn(1, n_group, z_size2).cuda()
if args.fp16:
mel = mel.half()
z = z.half()
with torch.no_grad():
# run inference to force calculation of inverses
waveglow.infer(mel, sigma=args.sigma_infer)
# export to ONNX
if args.fp16:
waveglow = waveglow.half()
waveglow.forward = waveglow.infer_onnx
opset_version = 12
output_path = os.path.join(args.output, "waveglow.onnx")
torch.onnx.export(waveglow, (mel, z), output_path,
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel", "z"],
output_names=["audio"],
dynamic_axes={"mel": {0: "batch_size", 2: "mel_seq"},
"z": {0: "batch_size", 2: "z_seq"},
"audio": {0: "batch_size", 1: "audio_seq"}})
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 Inference')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
export_onnx(parser, args)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/convert_waveglow2onnx.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch import nn
from torch.nn import functional as F
import argparse
import sys
sys.path.append('./')
import models
from inference import checkpoint_from_distributed, unwrap_distributed, load_and_setup_model, prepare_input_sequence
from tacotron2_common.utils import to_gpu, get_mask_from_lengths
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('--tacotron2', type=str,
help='full path to the Tacotron2 model checkpoint file')
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory for the exported Tacotron 2 ONNX model')
parser.add_argument('--fp16', action='store_true',
help='Export with half precision to ONNX')
return parser
def encoder_infer(self, x, input_lengths):
device = x.device
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x.to(device))), 0.5, False)
x = x.transpose(1, 2)
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
lens = input_lengths*2
return outputs, lens
class Encoder(torch.nn.Module):
def __init__(self, tacotron2):
super(Encoder, self).__init__()
self.tacotron2 = tacotron2
self.tacotron2.encoder.lstm.flatten_parameters()
self.infer = encoder_infer
def forward(self, sequence, sequence_lengths):
embedded_inputs = self.tacotron2.embedding(sequence).transpose(1, 2)
memory, lens = self.infer(self.tacotron2.encoder, embedded_inputs, sequence_lengths)
processed_memory = self.tacotron2.decoder.attention_layer.memory_layer(memory)
return memory, processed_memory, lens
class Postnet(torch.nn.Module):
def __init__(self, tacotron2):
super(Postnet, self).__init__()
self.tacotron2 = tacotron2
def forward(self, mel_outputs):
mel_outputs_postnet = self.tacotron2.postnet(mel_outputs)
return mel_outputs + mel_outputs_postnet
def lstmcell2lstm_params(lstm_mod, lstmcell_mod):
lstm_mod.weight_ih_l0 = torch.nn.Parameter(lstmcell_mod.weight_ih)
lstm_mod.weight_hh_l0 = torch.nn.Parameter(lstmcell_mod.weight_hh)
lstm_mod.bias_ih_l0 = torch.nn.Parameter(lstmcell_mod.bias_ih)
lstm_mod.bias_hh_l0 = torch.nn.Parameter(lstmcell_mod.bias_hh)
def prenet_infer(self, x):
x1 = x[:]
for linear in self.layers:
x1 = F.relu(linear(x1))
x0 = x1[0].unsqueeze(0)
mask = torch.le(torch.rand(256, device='cuda').to(x.dtype), 0.5).to(x.dtype)
mask = mask.expand(x1.size(0), x1.size(1))
x1 = x1*mask*2.0
return x1
class DecoderIter(torch.nn.Module):
def __init__(self, tacotron2):
super(DecoderIter, self).__init__()
self.tacotron2 = tacotron2
dec = tacotron2.decoder
self.p_attention_dropout = dec.p_attention_dropout
self.p_decoder_dropout = dec.p_decoder_dropout
self.prenet = dec.prenet
self.prenet.infer = prenet_infer
self.attention_rnn = nn.LSTM(dec.prenet_dim + dec.encoder_embedding_dim,
dec.attention_rnn_dim, 1)
lstmcell2lstm_params(self.attention_rnn, dec.attention_rnn)
self.attention_rnn.flatten_parameters()
self.attention_layer = dec.attention_layer
self.decoder_rnn = nn.LSTM(dec.attention_rnn_dim + dec.encoder_embedding_dim,
dec.decoder_rnn_dim, 1)
lstmcell2lstm_params(self.decoder_rnn, dec.decoder_rnn)
self.decoder_rnn.flatten_parameters()
self.linear_projection = dec.linear_projection
self.gate_layer = dec.gate_layer
def decode(self, decoder_input, in_attention_hidden, in_attention_cell,
in_decoder_hidden, in_decoder_cell, in_attention_weights,
in_attention_weights_cum, in_attention_context, memory,
processed_memory, mask):
cell_input = torch.cat((decoder_input, in_attention_context), -1)
_, (out_attention_hidden, out_attention_cell) = self.attention_rnn(
cell_input.unsqueeze(0), (in_attention_hidden.unsqueeze(0),
in_attention_cell.unsqueeze(0)))
out_attention_hidden = out_attention_hidden.squeeze(0)
out_attention_cell = out_attention_cell.squeeze(0)
out_attention_hidden = F.dropout(
out_attention_hidden, self.p_attention_dropout, False)
attention_weights_cat = torch.cat(
(in_attention_weights.unsqueeze(1),
in_attention_weights_cum.unsqueeze(1)), dim=1)
out_attention_context, out_attention_weights = self.attention_layer(
out_attention_hidden, memory, processed_memory,
attention_weights_cat, mask)
out_attention_weights_cum = in_attention_weights_cum + out_attention_weights
decoder_input_tmp = torch.cat(
(out_attention_hidden, out_attention_context), -1)
_, (out_decoder_hidden, out_decoder_cell) = self.decoder_rnn(
decoder_input_tmp.unsqueeze(0), (in_decoder_hidden.unsqueeze(0),
in_decoder_cell.unsqueeze(0)))
out_decoder_hidden = out_decoder_hidden.squeeze(0)
out_decoder_cell = out_decoder_cell.squeeze(0)
out_decoder_hidden = F.dropout(
out_decoder_hidden, self.p_decoder_dropout, False)
decoder_hidden_attention_context = torch.cat(
(out_decoder_hidden, out_attention_context), 1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return (decoder_output, gate_prediction, out_attention_hidden,
out_attention_cell, out_decoder_hidden, out_decoder_cell,
out_attention_weights, out_attention_weights_cum, out_attention_context)
# @torch.jit.script
def forward(self,
decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask):
decoder_input1 = self.prenet.infer(self.prenet, decoder_input)
outputs = self.decode(decoder_input1,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
return outputs
def test_inference(encoder, decoder_iter, postnet):
encoder.eval()
decoder_iter.eval()
postnet.eval()
sys.path.append('./tensorrt')
from inference_trt import init_decoder_inputs
texts = ["Hello World, good day."]
sequences, sequence_lengths = prepare_input_sequence(texts)
measurements = {}
print("Running Tacotron2 Encoder")
with torch.no_grad():
memory, processed_memory, lens = encoder(sequences, sequence_lengths)
print("Running Tacotron2 Decoder")
device = memory.device
dtype = memory.dtype
mel_lengths = torch.zeros([memory.size(0)], dtype=torch.int32, device = device)
not_finished = torch.ones([memory.size(0)], dtype=torch.int32, device = device)
mel_outputs, gate_outputs, alignments = (torch.zeros(1), torch.zeros(1), torch.zeros(1))
gate_threshold = 0.6
max_decoder_steps = 1000
first_iter = True
(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory,
mask) = init_decoder_inputs(memory, processed_memory, sequence_lengths)
while True:
with torch.no_grad():
(mel_output, gate_output,
attention_hidden, attention_cell,
decoder_hidden, decoder_cell,
attention_weights, attention_weights_cum,
attention_context) = decoder_iter(decoder_input, attention_hidden, attention_cell, decoder_hidden,
decoder_cell, attention_weights, attention_weights_cum,
attention_context, memory, processed_memory, mask)
if first_iter:
mel_outputs = torch.unsqueeze(mel_output, 2)
gate_outputs = torch.unsqueeze(gate_output, 2)
alignments = torch.unsqueeze(attention_weights, 2)
first_iter = False
else:
mel_outputs = torch.cat((mel_outputs, torch.unsqueeze(mel_output, 2)), 2)
gate_outputs = torch.cat((gate_outputs, torch.unsqueeze(gate_output, 2)), 2)
alignments = torch.cat((alignments, torch.unsqueeze(attention_weights, 2)), 2)
dec = torch.le(torch.sigmoid(gate_output), gate_threshold).to(torch.int32).squeeze(1)
not_finished = not_finished*dec
mel_lengths += not_finished
if torch.sum(not_finished) == 0:
print("Stopping after ",mel_outputs.size(2)," decoder steps")
break
if mel_outputs.size(2) == max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
print("Running Tacotron2 PostNet")
with torch.no_grad():
mel_outputs_postnet = postnet(mel_outputs)
return mel_outputs_postnet
def main():
parser = argparse.ArgumentParser(
description='PyTorch Tacotron 2 export to TRT')
parser = parse_args(parser)
args, _ = parser.parse_known_args()
tacotron2 = load_and_setup_model('Tacotron2', parser, args.tacotron2,
fp16_run=args.fp16, cpu_run=False)
opset_version = 10
sequences = torch.randint(low=0, high=148, size=(1,50),
dtype=torch.long).cuda()
sequence_lengths = torch.IntTensor([sequences.size(1)]).cuda().long()
dummy_input = (sequences, sequence_lengths)
encoder = Encoder(tacotron2)
encoder.eval()
with torch.no_grad():
encoder(*dummy_input)
torch.onnx.export(encoder, dummy_input, args.output+"/"+"encoder.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["sequences", "sequence_lengths"],
output_names=["memory", "processed_memory", "lens"],
dynamic_axes={"sequences": {1: "text_seq"},
"memory": {1: "mem_seq"},
"processed_memory": {1: "mem_seq"}
})
decoder_iter = DecoderIter(tacotron2)
memory = torch.randn((1,sequence_lengths[0],512)).cuda() #encoder_outputs
if args.fp16:
memory = memory.half()
memory_lengths = sequence_lengths
# initialize decoder states for dummy_input
decoder_input = tacotron2.decoder.get_go_frame(memory)
mask = get_mask_from_lengths(memory_lengths)
(attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
processed_memory) = tacotron2.decoder.initialize_decoder_states(memory)
dummy_input = (decoder_input,
attention_hidden,
attention_cell,
decoder_hidden,
decoder_cell,
attention_weights,
attention_weights_cum,
attention_context,
memory,
processed_memory,
mask)
decoder_iter = DecoderIter(tacotron2)
decoder_iter.eval()
with torch.no_grad():
decoder_iter(*dummy_input)
torch.onnx.export(decoder_iter, dummy_input, args.output+"/"+"decoder_iter.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["decoder_input",
"attention_hidden",
"attention_cell",
"decoder_hidden",
"decoder_cell",
"attention_weights",
"attention_weights_cum",
"attention_context",
"memory",
"processed_memory",
"mask"],
output_names=["decoder_output",
"gate_prediction",
"out_attention_hidden",
"out_attention_cell",
"out_decoder_hidden",
"out_decoder_cell",
"out_attention_weights",
"out_attention_weights_cum",
"out_attention_context"],
dynamic_axes={"attention_weights" : {1: "seq_len"},
"attention_weights_cum" : {1: "seq_len"},
"memory" : {1: "seq_len"},
"processed_memory" : {1: "seq_len"},
"mask" : {1: "seq_len"},
"out_attention_weights" : {1: "seq_len"},
"out_attention_weights_cum" : {1: "seq_len"}
})
postnet = Postnet(tacotron2)
dummy_input = torch.randn((1,80,620)).cuda()
if args.fp16:
dummy_input = dummy_input.half()
torch.onnx.export(postnet, dummy_input, args.output+"/"+"postnet.onnx",
opset_version=opset_version,
do_constant_folding=True,
input_names=["mel_outputs"],
output_names=["mel_outputs_postnet"],
dynamic_axes={"mel_outputs": {2: "mel_seq"},
"mel_outputs_postnet": {2: "mel_seq"}})
mel = test_inference(encoder, decoder_iter, postnet)
torch.save(mel, "mel.pt")
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/convert_tacotron22onnx.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import pycuda.driver as cuda
import pycuda.autoinit
import onnx
import argparse
import tensorrt as trt
import os
import sys
sys.path.append('./')
from trt_utils import build_engine
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-o', '--output', required=True,
help='output folder to save audio (file per phrase)')
parser.add_argument('--encoder', type=str, default="",
help='full path to the Encoder ONNX')
parser.add_argument('--decoder', type=str, default="",
help='full path to the DecoderIter ONNX')
parser.add_argument('--postnet', type=str, default="",
help='full path to the Postnet ONNX')
parser.add_argument('--waveglow', type=str, default="",
help='full path to the WaveGlow ONNX')
parser.add_argument('--fp16', action='store_true',
help='inference with FP16')
return parser
def main():
parser = argparse.ArgumentParser(
description='Export from ONNX to TensorRT for Tacotron 2 and WaveGlow')
parser = parse_args(parser)
args = parser.parse_args()
engine_prec = "_fp16" if args.fp16 else "_fp32"
# Encoder
shapes=[{"name": "sequences", "min": (1,4), "opt": (1,128), "max": (1,256)},
{"name": "sequence_lengths", "min": (1,), "opt": (1,), "max": (1,)}]
if args.encoder != "":
print("Building Encoder ...")
encoder_engine = build_engine(args.encoder, shapes=shapes, fp16=args.fp16)
if encoder_engine is not None:
with open(args.output+"/"+"encoder"+engine_prec+".engine", 'wb') as f:
f.write(encoder_engine.serialize())
else:
print("Failed to build engine from", args.encoder)
sys.exit()
# DecoderIter
shapes=[{"name": "decoder_input", "min": (1,80), "opt": (1,80), "max": (1,80)},
{"name": "attention_hidden", "min": (1,1024), "opt": (1,1024), "max": (1,1024)},
{"name": "attention_cell", "min": (1,1024), "opt": (1,1024), "max": (1,1024)},
{"name": "decoder_hidden", "min": (1,1024), "opt": (1,1024), "max": (1,1024)},
{"name": "decoder_cell", "min": (1,1024), "opt": (1,1024), "max": (1,1024)},
{"name": "attention_weights", "min": (1,4), "opt": (1,128), "max": (1,256)},
{"name": "attention_weights_cum", "min": (1,4), "opt": (1,128), "max": (1,256)},
{"name": "attention_context", "min": (1,512), "opt": (1,512), "max": (1,512)},
{"name": "memory", "min": (1,4,512), "opt": (1,128,512), "max": (1,256,512)},
{"name": "processed_memory", "min": (1,4,128), "opt": (1,128,128), "max": (1,256,128)},
{"name": "mask", "min": (1,4), "opt": (1,128), "max": (1,256)}]
if args.decoder != "":
print("Building Decoder ...")
decoder_iter_engine = build_engine(args.decoder, shapes=shapes, fp16=args.fp16)
if decoder_iter_engine is not None:
with open(args.output+"/"+"decoder_iter"+engine_prec+".engine", 'wb') as f:
f.write(decoder_iter_engine.serialize())
else:
print("Failed to build engine from", args.decoder)
sys.exit()
# Postnet
shapes=[{"name": "mel_outputs", "min": (1,80,32), "opt": (1,80,768), "max": (1,80,1664)}]
if args.postnet != "":
print("Building Postnet ...")
postnet_engine = build_engine(args.postnet, shapes=shapes, fp16=args.fp16)
if postnet_engine is not None:
with open(args.output+"/"+"postnet"+engine_prec+".engine", 'wb') as f:
f.write(postnet_engine.serialize())
else:
print("Failed to build engine from", args.postnet)
sys.exit()
# WaveGlow
shapes=[{"name": "mel", "min": (1,80,32), "opt": (1,80,768), "max": (1,80,1664)},
{"name": "z", "min": (1,8,1024), "opt": (1,8,24576), "max": (1,8,53248)}]
if args.waveglow != "":
print("Building WaveGlow ...")
waveglow_engine = build_engine(args.waveglow, shapes=shapes, fp16=args.fp16)
if waveglow_engine is not None:
engine_path = os.path.join(args.output, "waveglow"+engine_prec+".engine")
with open(engine_path, 'wb') as f:
f.write(waveglow_engine.serialize())
else:
print("Failed to build engine from", args.waveglow)
sys.exit()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/Tacotron2/tensorrt/convert_onnx2trt.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.