python_code
stringlengths 0
679k
| repo_name
stringlengths 9
41
| file_path
stringlengths 6
149
|
---|---|---|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.pnasnet."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets.nasnet import pnasnet
slim = tf.contrib.slim
class PNASNetTest(tf.test.TestCase):
def testBuildLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
logits, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
auxlogits = end_points['AuxLogits']
predictions = end_points['Predictions']
self.assertListEqual(auxlogits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertListEqual(predictions.get_shape().as_list(),
[batch_size, num_classes])
def testBuildNonExistingLayerLargeModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 331, 331, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
pnasnet.build_pnasnet_large(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildNonExistingLayerMobileModel(self):
"""Tests that the model is built correctly without unnecessary layers."""
inputs = tf.random_uniform((5, 224, 224, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
pnasnet.build_pnasnet_mobile(inputs, 1000)
vars_names = [x.op.name for x in tf.trainable_variables()]
self.assertIn('cell_stem_0/1x1/weights', vars_names)
self.assertNotIn('cell_stem_1/comb_iter_0/right/1x1/weights', vars_names)
def testBuildPreLogitsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
net, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 4320])
def testBuildPreLogitsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = None
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
net, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
self.assertFalse('AuxLogits' in end_points)
self.assertFalse('Predictions' in end_points)
self.assertTrue(net.op.name.startswith('final_layer/Mean'))
self.assertListEqual(net.get_shape().as_list(), [batch_size, 1080])
def testAllEndPointsShapesLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes)
endpoints_shapes = {'Stem': [batch_size, 42, 42, 540],
'Cell_0': [batch_size, 42, 42, 1080],
'Cell_1': [batch_size, 42, 42, 1080],
'Cell_2': [batch_size, 42, 42, 1080],
'Cell_3': [batch_size, 42, 42, 1080],
'Cell_4': [batch_size, 21, 21, 2160],
'Cell_5': [batch_size, 21, 21, 2160],
'Cell_6': [batch_size, 21, 21, 2160],
'Cell_7': [batch_size, 21, 21, 2160],
'Cell_8': [batch_size, 11, 11, 4320],
'Cell_9': [batch_size, 11, 11, 4320],
'Cell_10': [batch_size, 11, 11, 4320],
'Cell_11': [batch_size, 11, 11, 4320],
'global_pool': [batch_size, 4320],
# Logits and predictions
'AuxLogits': [batch_size, 1000],
'Predictions': [batch_size, 1000],
'Logits': [batch_size, 1000],
}
self.assertEqual(len(end_points), 17)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testAllEndPointsShapesMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(inputs, num_classes)
endpoints_shapes = {
'Stem': [batch_size, 28, 28, 135],
'Cell_0': [batch_size, 28, 28, 270],
'Cell_1': [batch_size, 28, 28, 270],
'Cell_2': [batch_size, 28, 28, 270],
'Cell_3': [batch_size, 14, 14, 540],
'Cell_4': [batch_size, 14, 14, 540],
'Cell_5': [batch_size, 14, 14, 540],
'Cell_6': [batch_size, 7, 7, 1080],
'Cell_7': [batch_size, 7, 7, 1080],
'Cell_8': [batch_size, 7, 7, 1080],
'global_pool': [batch_size, 1080],
# Logits and predictions
'AuxLogits': [batch_size, num_classes],
'Predictions': [batch_size, num_classes],
'Logits': [batch_size, num_classes],
}
self.assertEqual(len(end_points), 14)
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
tf.logging.info('Endpoint name: {}'.format(endpoint_name))
expected_shape = endpoints_shapes[endpoint_name]
self.assertIn(endpoint_name, end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testNoAuxHeadLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(inputs, num_classes,
config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testNoAuxHeadMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for use_aux_head in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_aux_head', int(use_aux_head))
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertEqual('AuxLogits' in end_points, use_aux_head)
def testOverrideHParamsLargeModel(self):
batch_size = 5
height, width = 331, 331
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.large_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_large_arg_scope()):
_, end_points = pnasnet.build_pnasnet_large(
inputs, num_classes, config=config)
self.assertListEqual(
end_points['Stem'].shape.as_list(), [batch_size, 540, 42, 42])
def testOverrideHParamsMobileModel(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
tf.train.create_global_step()
config = pnasnet.mobile_imagenet_config()
config.set_hparam('data_format', 'NCHW')
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, end_points = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
self.assertListEqual(end_points['Stem'].shape.as_list(),
[batch_size, 135, 28, 28])
def testUseBoundedAcitvationMobileModel(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
for use_bounded_activation in (True, False):
tf.reset_default_graph()
inputs = tf.random_uniform((batch_size, height, width, 3))
config = pnasnet.mobile_imagenet_config()
config.set_hparam('use_bounded_activation', use_bounded_activation)
with slim.arg_scope(pnasnet.pnasnet_mobile_arg_scope()):
_, _ = pnasnet.build_pnasnet_mobile(
inputs, num_classes, config=config)
for node in tf.get_default_graph().as_graph_def().node:
if node.op.startswith('Relu'):
self.assertEqual(node.op == 'Relu6', use_bounded_activation)
if __name__ == '__main__':
tf.test.main()
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/nets/nasnet/pnasnet_test.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition for the NASNet classification networks.
Paper: https://arxiv.org/abs/1707.07012
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import tensorflow as tf
from nets.nasnet import nasnet_utils
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
# Notes for training NASNet Cifar Model
# -------------------------------------
# batch_size: 32
# learning rate: 0.025
# cosine (single period) learning rate decay
# auxiliary head loss weighting: 0.4
# clip global norm of all gradients by 5
def cifar_config():
return tf.contrib.training.HParams(
stem_multiplier=3.0,
drop_path_keep_prob=0.6,
num_cells=18,
use_aux_head=1,
num_conv_filters=32,
dense_dropout_keep_prob=1.0,
filter_scaling_rate=2.0,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
# 600 epochs with a batch size of 32
# This is used for the drop path probabilities since it needs to increase
# the drop out probability over the course of training.
total_training_steps=937500,
use_bounded_activation=False,
)
# Notes for training large NASNet model on ImageNet
# -------------------------------------
# batch size (per replica): 16
# learning rate: 0.015 * 100
# learning rate decay factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 100 replicas
# auxiliary head loss weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def large_imagenet_config():
return tf.contrib.training.HParams(
stem_multiplier=3.0,
dense_dropout_keep_prob=0.5,
num_cells=18,
filter_scaling_rate=2.0,
num_conv_filters=168,
drop_path_keep_prob=0.7,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=1,
total_training_steps=250000,
use_bounded_activation=False,
)
# Notes for training the mobile NASNet ImageNet model
# -------------------------------------
# batch size (per replica): 32
# learning rate: 0.04 * 50
# learning rate scaling factor: 0.97
# num epochs per decay: 2.4
# sync sgd with 50 replicas
# auxiliary head weighting: 0.4
# label smoothing: 0.1
# clip global norm of all gradients by 10
def mobile_imagenet_config():
return tf.contrib.training.HParams(
stem_multiplier=1.0,
dense_dropout_keep_prob=0.5,
num_cells=12,
filter_scaling_rate=2.0,
drop_path_keep_prob=1.0,
num_conv_filters=44,
use_aux_head=1,
num_reduction_layers=2,
data_format='NHWC',
skip_reduction_layer_input=0,
total_training_steps=250000,
use_bounded_activation=False,
)
def _update_hparams(hparams, is_training):
"""Update hparams for given is_training option."""
if not is_training:
hparams.set_hparam('drop_path_keep_prob', 1.0)
def nasnet_cifar_arg_scope(weight_decay=5e-4,
batch_norm_decay=0.9,
batch_norm_epsilon=1e-5):
"""Defines the default arg scope for the NASNet-A Cifar model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Cifar Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_mobile_arg_scope(weight_decay=4e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Mobile ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Mobile Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def nasnet_large_arg_scope(weight_decay=5e-5,
batch_norm_decay=0.9997,
batch_norm_epsilon=1e-3):
"""Defines the default arg scope for the NASNet-A Large ImageNet model.
Args:
weight_decay: The weight decay to use for regularizing the model.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
Returns:
An `arg_scope` to use for the NASNet Large Model.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
'scale': True,
'fused': True,
}
weights_regularizer = tf.contrib.layers.l2_regularizer(weight_decay)
weights_initializer = tf.contrib.layers.variance_scaling_initializer(
mode='FAN_OUT')
with arg_scope([slim.fully_connected, slim.conv2d, slim.separable_conv2d],
weights_regularizer=weights_regularizer,
weights_initializer=weights_initializer):
with arg_scope([slim.fully_connected],
activation_fn=None, scope='FC'):
with arg_scope([slim.conv2d, slim.separable_conv2d],
activation_fn=None, biases_initializer=None):
with arg_scope([slim.batch_norm], **batch_norm_params) as sc:
return sc
def _build_aux_head(net, end_points, num_classes, hparams, scope):
"""Auxiliary head used for all models across all datasets."""
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
with tf.variable_scope(scope):
aux_logits = tf.identity(net)
with tf.variable_scope('aux_logits'):
aux_logits = slim.avg_pool2d(
aux_logits, [5, 5], stride=3, padding='VALID')
aux_logits = slim.conv2d(aux_logits, 128, [1, 1], scope='proj')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn0')
aux_logits = activation_fn(aux_logits)
# Shape of feature map before the final layer.
shape = aux_logits.shape
if hparams.data_format == 'NHWC':
shape = shape[1:3]
else:
shape = shape[2:4]
aux_logits = slim.conv2d(aux_logits, 768, shape, padding='VALID')
aux_logits = slim.batch_norm(aux_logits, scope='aux_bn1')
aux_logits = activation_fn(aux_logits)
aux_logits = tf.contrib.layers.flatten(aux_logits)
aux_logits = slim.fully_connected(aux_logits, num_classes)
end_points['AuxLogits'] = aux_logits
def _imagenet_stem(inputs, hparams, stem_cell, current_step=None):
"""Stem used for models trained on ImageNet."""
num_stem_cells = 2
# 149 x 149 x 32
num_stem_filters = int(32 * hparams.stem_multiplier)
net = slim.conv2d(
inputs, num_stem_filters, [3, 3], stride=2, scope='conv0',
padding='VALID')
net = slim.batch_norm(net, scope='conv0_bn')
# Run the reduction cells
cell_outputs = [None, net]
filter_scaling = 1.0 / (hparams.filter_scaling_rate**num_stem_cells)
for cell_num in range(num_stem_cells):
net = stem_cell(
net,
scope='cell_stem_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=cell_num,
current_step=current_step)
cell_outputs.append(net)
filter_scaling *= hparams.filter_scaling_rate
return net, cell_outputs
def _cifar_stem(inputs, hparams):
"""Stem used for models trained on Cifar."""
num_stem_filters = int(hparams.num_conv_filters * hparams.stem_multiplier)
net = slim.conv2d(
inputs,
num_stem_filters,
3,
scope='l1_stem_3x3')
net = slim.batch_norm(net, scope='l1_stem_bn')
return net, [None, net]
def build_nasnet_cifar(images, num_classes,
is_training=True,
config=None,
current_step=None):
"""Build NASNet model for the Cifar Dataset."""
hparams = cifar_config() if config is None else copy.deepcopy(config)
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='cifar',
current_step=current_step)
build_nasnet_cifar.default_image_size = 32
def build_nasnet_mobile(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Mobile model for the ImageNet Dataset."""
hparams = (mobile_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_mobile.default_image_size = 224
def build_nasnet_large(images, num_classes,
is_training=True,
final_endpoint=None,
config=None,
current_step=None):
"""Build NASNet Large model for the ImageNet Dataset."""
hparams = (large_imagenet_config() if config is None
else copy.deepcopy(config))
_update_hparams(hparams, is_training)
if tf.test.is_gpu_available() and hparams.data_format == 'NHWC':
tf.logging.info('A GPU is available on the machine, consider using NCHW '
'data format for increased speed on GPU.')
if hparams.data_format == 'NCHW':
images = tf.transpose(images, [0, 3, 1, 2])
# Calculate the total number of cells in the network
# Add 2 for the reduction cells
total_num_cells = hparams.num_cells + 2
# If ImageNet, then add an additional two for the stem cells
total_num_cells += 2
normal_cell = nasnet_utils.NasNetANormalCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
reduction_cell = nasnet_utils.NasNetAReductionCell(
hparams.num_conv_filters, hparams.drop_path_keep_prob,
total_num_cells, hparams.total_training_steps,
hparams.use_bounded_activation)
with arg_scope([slim.dropout, nasnet_utils.drop_path, slim.batch_norm],
is_training=is_training):
with arg_scope([slim.avg_pool2d,
slim.max_pool2d,
slim.conv2d,
slim.batch_norm,
slim.separable_conv2d,
nasnet_utils.factorized_reduction,
nasnet_utils.global_avg_pool,
nasnet_utils.get_channel_index,
nasnet_utils.get_channel_dim],
data_format=hparams.data_format):
return _build_nasnet_base(images,
normal_cell=normal_cell,
reduction_cell=reduction_cell,
num_classes=num_classes,
hparams=hparams,
is_training=is_training,
stem_type='imagenet',
final_endpoint=final_endpoint,
current_step=current_step)
build_nasnet_large.default_image_size = 331
def _build_nasnet_base(images,
normal_cell,
reduction_cell,
num_classes,
hparams,
is_training,
stem_type,
final_endpoint=None,
current_step=None):
"""Constructs a NASNet image model."""
end_points = {}
def add_and_check_endpoint(endpoint_name, net):
end_points[endpoint_name] = net
return final_endpoint and (endpoint_name == final_endpoint)
# Find where to place the reduction cells or stride normal cells
reduction_indices = nasnet_utils.calc_reduction_layers(
hparams.num_cells, hparams.num_reduction_layers)
stem_cell = reduction_cell
if stem_type == 'imagenet':
stem = lambda: _imagenet_stem(images, hparams, stem_cell)
elif stem_type == 'cifar':
stem = lambda: _cifar_stem(images, hparams)
else:
raise ValueError('Unknown stem_type: ', stem_type)
net, cell_outputs = stem()
if add_and_check_endpoint('Stem', net): return net, end_points
# Setup for building in the auxiliary head.
aux_head_cell_idxes = []
if len(reduction_indices) >= 2:
aux_head_cell_idxes.append(reduction_indices[1] - 1)
# Run the cells
filter_scaling = 1.0
# true_cell_num accounts for the stem cells
true_cell_num = 2 if stem_type == 'imagenet' else 0
activation_fn = tf.nn.relu6 if hparams.use_bounded_activation else tf.nn.relu
for cell_num in range(hparams.num_cells):
stride = 1
if hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
if cell_num in reduction_indices:
filter_scaling *= hparams.filter_scaling_rate
net = reduction_cell(
net,
scope='reduction_cell_{}'.format(reduction_indices.index(cell_num)),
filter_scaling=filter_scaling,
stride=2,
prev_layer=cell_outputs[-2],
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint(
'Reduction_Cell_{}'.format(reduction_indices.index(cell_num)), net):
return net, end_points
true_cell_num += 1
cell_outputs.append(net)
if not hparams.skip_reduction_layer_input:
prev_layer = cell_outputs[-2]
net = normal_cell(
net,
scope='cell_{}'.format(cell_num),
filter_scaling=filter_scaling,
stride=stride,
prev_layer=prev_layer,
cell_num=true_cell_num,
current_step=current_step)
if add_and_check_endpoint('Cell_{}'.format(cell_num), net):
return net, end_points
true_cell_num += 1
if (hparams.use_aux_head and cell_num in aux_head_cell_idxes and
num_classes and is_training):
aux_net = activation_fn(net)
_build_aux_head(aux_net, end_points, num_classes, hparams,
scope='aux_{}'.format(cell_num))
cell_outputs.append(net)
# Final softmax layer
with tf.variable_scope('final_layer'):
net = activation_fn(net)
net = nasnet_utils.global_avg_pool(net)
if add_and_check_endpoint('global_pool', net) or not num_classes:
return net, end_points
net = slim.dropout(net, hparams.dense_dropout_keep_prob, scope='dropout')
logits = slim.fully_connected(net, num_classes)
if add_and_check_endpoint('Logits', logits):
return net, end_points
predictions = tf.nn.softmax(logits, name='predictions')
if add_and_check_endpoint('Predictions', predictions):
return net, end_points
return logits, end_points
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/nets/nasnet/nasnet.py |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A custom module for some common operations used by NASNet.
Functions exposed in this file:
- calc_reduction_layers
- get_channel_index
- get_channel_dim
- global_avg_pool
- factorized_reduction
- drop_path
Classes exposed in this file:
- NasNetABaseCell
- NasNetANormalCell
- NasNetAReductionCell
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
arg_scope = tf.contrib.framework.arg_scope
slim = tf.contrib.slim
DATA_FORMAT_NCHW = 'NCHW'
DATA_FORMAT_NHWC = 'NHWC'
INVALID = 'null'
# The cap for tf.clip_by_value, it's hinted from the activation distribution
# that the majority of activation values are in the range [-6, 6].
CLIP_BY_VALUE_CAP = 6
def calc_reduction_layers(num_cells, num_reduction_layers):
"""Figure out what layers should have reductions."""
reduction_layers = []
for pool_num in range(1, num_reduction_layers + 1):
layer_num = (float(pool_num) / (num_reduction_layers + 1)) * num_cells
layer_num = int(layer_num)
reduction_layers.append(layer_num)
return reduction_layers
@tf.contrib.framework.add_arg_scope
def get_channel_index(data_format=INVALID):
assert data_format != INVALID
axis = 3 if data_format == 'NHWC' else 1
return axis
@tf.contrib.framework.add_arg_scope
def get_channel_dim(shape, data_format=INVALID):
assert data_format != INVALID
assert len(shape) == 4
if data_format == 'NHWC':
return int(shape[3])
elif data_format == 'NCHW':
return int(shape[1])
else:
raise ValueError('Not a valid data_format', data_format)
@tf.contrib.framework.add_arg_scope
def global_avg_pool(x, data_format=INVALID):
"""Average pool away the height and width spatial dimensions of x."""
assert data_format != INVALID
assert data_format in ['NHWC', 'NCHW']
assert x.shape.ndims == 4
if data_format == 'NHWC':
return tf.reduce_mean(x, [1, 2])
else:
return tf.reduce_mean(x, [2, 3])
@tf.contrib.framework.add_arg_scope
def factorized_reduction(net, output_filters, stride, data_format=INVALID):
"""Reduces the shape of net without information loss due to striding."""
assert data_format != INVALID
if stride == 1:
net = slim.conv2d(net, output_filters, 1, scope='path_conv')
net = slim.batch_norm(net, scope='path_bn')
return net
if data_format == 'NHWC':
stride_spec = [1, stride, stride, 1]
else:
stride_spec = [1, 1, stride, stride]
# Skip path 1
path1 = tf.nn.avg_pool(
net, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
path1 = slim.conv2d(path1, int(output_filters / 2), 1, scope='path1_conv')
# Skip path 2
# First pad with 0's on the right and bottom, then shift the filter to
# include those 0's that were added.
if data_format == 'NHWC':
pad_arr = [[0, 0], [0, 1], [0, 1], [0, 0]]
path2 = tf.pad(net, pad_arr)[:, 1:, 1:, :]
concat_axis = 3
else:
pad_arr = [[0, 0], [0, 0], [0, 1], [0, 1]]
path2 = tf.pad(net, pad_arr)[:, :, 1:, 1:]
concat_axis = 1
path2 = tf.nn.avg_pool(
path2, [1, 1, 1, 1], stride_spec, 'VALID', data_format=data_format)
# If odd number of filters, add an additional one to the second path.
final_filter_size = int(output_filters / 2) + int(output_filters % 2)
path2 = slim.conv2d(path2, final_filter_size, 1, scope='path2_conv')
# Concat and apply BN
final_path = tf.concat(values=[path1, path2], axis=concat_axis)
final_path = slim.batch_norm(final_path, scope='final_path_bn')
return final_path
@tf.contrib.framework.add_arg_scope
def drop_path(net, keep_prob, is_training=True):
"""Drops out a whole example hiddenstate with the specified probability."""
if is_training:
batch_size = tf.shape(net)[0]
noise_shape = [batch_size, 1, 1, 1]
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape, dtype=tf.float32)
binary_tensor = tf.cast(tf.floor(random_tensor), net.dtype)
keep_prob_inv = tf.cast(1.0 / keep_prob, net.dtype)
net = net * keep_prob_inv * binary_tensor
return net
def _operation_to_filter_shape(operation):
splitted_operation = operation.split('x')
filter_shape = int(splitted_operation[0][-1])
assert filter_shape == int(
splitted_operation[1][0]), 'Rectangular filters not supported.'
return filter_shape
def _operation_to_num_layers(operation):
splitted_operation = operation.split('_')
if 'x' in splitted_operation[-1]:
return 1
return int(splitted_operation[-1])
def _operation_to_info(operation):
"""Takes in operation name and returns meta information.
An example would be 'separable_3x3_4' -> (3, 4).
Args:
operation: String that corresponds to convolution operation.
Returns:
Tuple of (filter shape, num layers).
"""
num_layers = _operation_to_num_layers(operation)
filter_shape = _operation_to_filter_shape(operation)
return num_layers, filter_shape
def _stacked_separable_conv(net, stride, operation, filter_size,
use_bounded_activation):
"""Takes in an operations and parses it to the correct sep operation."""
num_layers, kernel_size = _operation_to_info(operation)
activation_fn = tf.nn.relu6 if use_bounded_activation else tf.nn.relu
for layer_num in range(num_layers - 1):
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, layer_num + 1),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, layer_num + 1))
stride = 1
net = activation_fn(net)
net = slim.separable_conv2d(
net,
filter_size,
kernel_size,
depth_multiplier=1,
scope='separable_{0}x{0}_{1}'.format(kernel_size, num_layers),
stride=stride)
net = slim.batch_norm(
net, scope='bn_sep_{0}x{0}_{1}'.format(kernel_size, num_layers))
return net
def _operation_to_pooling_type(operation):
"""Takes in the operation string and returns the pooling type."""
splitted_operation = operation.split('_')
return splitted_operation[0]
def _operation_to_pooling_shape(operation):
"""Takes in the operation string and returns the pooling kernel shape."""
splitted_operation = operation.split('_')
shape = splitted_operation[-1]
assert 'x' in shape
filter_height, filter_width = shape.split('x')
assert filter_height == filter_width
return int(filter_height)
def _operation_to_pooling_info(operation):
"""Parses the pooling operation string to return its type and shape."""
pooling_type = _operation_to_pooling_type(operation)
pooling_shape = _operation_to_pooling_shape(operation)
return pooling_type, pooling_shape
def _pooling(net, stride, operation, use_bounded_activation):
"""Parses operation and performs the correct pooling operation on net."""
padding = 'SAME'
pooling_type, pooling_shape = _operation_to_pooling_info(operation)
if use_bounded_activation:
net = tf.nn.relu6(net)
if pooling_type == 'avg':
net = slim.avg_pool2d(net, pooling_shape, stride=stride, padding=padding)
elif pooling_type == 'max':
net = slim.max_pool2d(net, pooling_shape, stride=stride, padding=padding)
else:
raise NotImplementedError('Unimplemented pooling type: ', pooling_type)
return net
class NasNetABaseCell(object):
"""NASNet Cell class that is used as a 'layer' in image architectures.
Args:
num_conv_filters: The number of filters for each convolution operation.
operations: List of operations that are performed in the NASNet Cell in
order.
used_hiddenstates: Binary array that signals if the hiddenstate was used
within the cell. This is used to determine what outputs of the cell
should be concatenated together.
hiddenstate_indices: Determines what hiddenstates should be combined
together with the specified operations to create the NASNet cell.
use_bounded_activation: Whether or not to use bounded activations. Bounded
activations better lend themselves to quantized inference.
"""
def __init__(self, num_conv_filters, operations, used_hiddenstates,
hiddenstate_indices, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
self._num_conv_filters = num_conv_filters
self._operations = operations
self._used_hiddenstates = used_hiddenstates
self._hiddenstate_indices = hiddenstate_indices
self._drop_path_keep_prob = drop_path_keep_prob
self._total_num_cells = total_num_cells
self._total_training_steps = total_training_steps
self._use_bounded_activation = use_bounded_activation
def _reduce_prev_layer(self, prev_layer, curr_layer):
"""Matches dimension of prev_layer to the curr_layer."""
# Set the prev layer to the current layer if it is none
if prev_layer is None:
return curr_layer
curr_num_filters = self._filter_size
prev_num_filters = get_channel_dim(prev_layer.shape)
curr_filter_shape = int(curr_layer.shape[2])
prev_filter_shape = int(prev_layer.shape[2])
activation_fn = tf.nn.relu6 if self._use_bounded_activation else tf.nn.relu
if curr_filter_shape != prev_filter_shape:
prev_layer = activation_fn(prev_layer)
prev_layer = factorized_reduction(
prev_layer, curr_num_filters, stride=2)
elif curr_num_filters != prev_num_filters:
prev_layer = activation_fn(prev_layer)
prev_layer = slim.conv2d(
prev_layer, curr_num_filters, 1, scope='prev_1x1')
prev_layer = slim.batch_norm(prev_layer, scope='prev_bn')
return prev_layer
def _cell_base(self, net, prev_layer):
"""Runs the beginning of the conv cell before the predicted ops are run."""
num_filters = self._filter_size
# Check to be sure prev layer stuff is setup correctly
prev_layer = self._reduce_prev_layer(prev_layer, net)
net = tf.nn.relu6(net) if self._use_bounded_activation else tf.nn.relu(net)
net = slim.conv2d(net, num_filters, 1, scope='1x1')
net = slim.batch_norm(net, scope='beginning_bn')
# num_or_size_splits=1
net = [net]
net.append(prev_layer)
return net
def __call__(self, net, scope=None, filter_scaling=1, stride=1,
prev_layer=None, cell_num=-1, current_step=None):
"""Runs the conv cell."""
self._cell_num = cell_num
self._filter_scaling = filter_scaling
self._filter_size = int(self._num_conv_filters * filter_scaling)
i = 0
with tf.variable_scope(scope):
net = self._cell_base(net, prev_layer)
for iteration in range(5):
with tf.variable_scope('comb_iter_{}'.format(iteration)):
left_hiddenstate_idx, right_hiddenstate_idx = (
self._hiddenstate_indices[i],
self._hiddenstate_indices[i + 1])
original_input_left = left_hiddenstate_idx < 2
original_input_right = right_hiddenstate_idx < 2
h1 = net[left_hiddenstate_idx]
h2 = net[right_hiddenstate_idx]
operation_left = self._operations[i]
operation_right = self._operations[i+1]
i += 2
# Apply conv operations
with tf.variable_scope('left'):
h1 = self._apply_conv_operation(h1, operation_left,
stride, original_input_left,
current_step)
with tf.variable_scope('right'):
h2 = self._apply_conv_operation(h2, operation_right,
stride, original_input_right,
current_step)
# Combine hidden states using 'add'.
with tf.variable_scope('combine'):
h = h1 + h2
if self._use_bounded_activation:
h = tf.nn.relu6(h)
# Add hiddenstate to the list of hiddenstates we can choose from
net.append(h)
with tf.variable_scope('cell_output'):
net = self._combine_unused_states(net)
return net
def _apply_conv_operation(self, net, operation,
stride, is_from_original_input, current_step):
"""Applies the predicted conv operation to net."""
# Dont stride if this is not one of the original hiddenstates
if stride > 1 and not is_from_original_input:
stride = 1
input_filters = get_channel_dim(net.shape)
filter_size = self._filter_size
if 'separable' in operation:
net = _stacked_separable_conv(net, stride, operation, filter_size,
self._use_bounded_activation)
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif operation in ['none']:
if self._use_bounded_activation:
net = tf.nn.relu6(net)
# Check if a stride is needed, then use a strided 1x1 here
if stride > 1 or (input_filters != filter_size):
if not self._use_bounded_activation:
net = tf.nn.relu(net)
net = slim.conv2d(net, filter_size, 1, stride=stride, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
elif 'pool' in operation:
net = _pooling(net, stride, operation, self._use_bounded_activation)
if input_filters != filter_size:
net = slim.conv2d(net, filter_size, 1, stride=1, scope='1x1')
net = slim.batch_norm(net, scope='bn_1')
if self._use_bounded_activation:
net = tf.clip_by_value(net, -CLIP_BY_VALUE_CAP, CLIP_BY_VALUE_CAP)
else:
raise ValueError('Unimplemented operation', operation)
if operation != 'none':
net = self._apply_drop_path(net, current_step=current_step)
return net
def _combine_unused_states(self, net):
"""Concatenate the unused hidden states of the cell."""
used_hiddenstates = self._used_hiddenstates
final_height = int(net[-1].shape[2])
final_num_filters = get_channel_dim(net[-1].shape)
assert len(used_hiddenstates) == len(net)
for idx, used_h in enumerate(used_hiddenstates):
curr_height = int(net[idx].shape[2])
curr_num_filters = get_channel_dim(net[idx].shape)
# Determine if a reduction should be applied to make the number of
# filters match.
should_reduce = final_num_filters != curr_num_filters
should_reduce = (final_height != curr_height) or should_reduce
should_reduce = should_reduce and not used_h
if should_reduce:
stride = 2 if final_height != curr_height else 1
with tf.variable_scope('reduction_{}'.format(idx)):
net[idx] = factorized_reduction(
net[idx], final_num_filters, stride)
states_to_combine = (
[h for h, is_used in zip(net, used_hiddenstates) if not is_used])
# Return the concat of all the states
concat_axis = get_channel_index()
net = tf.concat(values=states_to_combine, axis=concat_axis)
return net
@tf.contrib.framework.add_arg_scope # No public API. For internal use only.
def _apply_drop_path(self, net, current_step=None,
use_summaries=False, drop_connect_version='v3'):
"""Apply drop_path regularization.
Args:
net: the Tensor that gets drop_path regularization applied.
current_step: a float32 Tensor with the current global_step value,
to be divided by hparams.total_training_steps. Usually None, which
defaults to tf.train.get_or_create_global_step() properly casted.
use_summaries: a Python boolean. If set to False, no summaries are output.
drop_connect_version: one of 'v1', 'v2', 'v3', controlling whether
the dropout rate is scaled by current_step (v1), layer (v2), or
both (v3, the default).
Returns:
The dropped-out value of `net`.
"""
drop_path_keep_prob = self._drop_path_keep_prob
if drop_path_keep_prob < 1.0:
assert drop_connect_version in ['v1', 'v2', 'v3']
if drop_connect_version in ['v2', 'v3']:
# Scale keep prob by layer number
assert self._cell_num != -1
# The added 2 is for the reduction cells
num_cells = self._total_num_cells
layer_ratio = (self._cell_num + 1)/float(num_cells)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('layer_ratio', layer_ratio)
drop_path_keep_prob = 1 - layer_ratio * (1 - drop_path_keep_prob)
if drop_connect_version in ['v1', 'v3']:
# Decrease the keep probability over time
if current_step is None:
current_step = tf.train.get_or_create_global_step()
current_step = tf.cast(current_step, tf.float32)
drop_path_burn_in_steps = self._total_training_steps
current_ratio = current_step / drop_path_burn_in_steps
current_ratio = tf.minimum(1.0, current_ratio)
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('current_ratio', current_ratio)
drop_path_keep_prob = (1 - current_ratio * (1 - drop_path_keep_prob))
if use_summaries:
with tf.device('/cpu:0'):
tf.summary.scalar('drop_path_keep_prob', drop_path_keep_prob)
net = drop_path(net, drop_path_keep_prob)
return net
class NasNetANormalCell(NasNetABaseCell):
"""NASNetA Normal Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_3x3_2',
'separable_5x5_2',
'separable_3x3_2',
'avg_pool_3x3',
'none',
'avg_pool_3x3',
'avg_pool_3x3',
'separable_3x3_2',
'none']
used_hiddenstates = [1, 0, 0, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 1, 1, 0, 1, 1, 1, 0, 0]
super(NasNetANormalCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
class NasNetAReductionCell(NasNetABaseCell):
"""NASNetA Reduction Cell."""
def __init__(self, num_conv_filters, drop_path_keep_prob, total_num_cells,
total_training_steps, use_bounded_activation=False):
operations = ['separable_5x5_2',
'separable_7x7_2',
'max_pool_3x3',
'separable_7x7_2',
'avg_pool_3x3',
'separable_5x5_2',
'none',
'avg_pool_3x3',
'separable_3x3_2',
'max_pool_3x3']
used_hiddenstates = [1, 1, 1, 0, 0, 0, 0]
hiddenstate_indices = [0, 1, 0, 1, 0, 1, 3, 2, 2, 0]
super(NasNetAReductionCell, self).__init__(num_conv_filters, operations,
used_hiddenstates,
hiddenstate_indices,
drop_path_keep_prob,
total_num_cells,
total_training_steps,
use_bounded_activation)
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/nets/nasnet/nasnet_utils.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deploy Slim models across multiple clones and replicas.
# TODO(sguada) docstring paragraph by (a) motivating the need for the file and
# (b) defining clones.
# TODO(sguada) describe the high-level components of model deployment.
# E.g. "each model deployment is composed of several parts: a DeploymentConfig,
# which captures A, B and C, an input_fn which loads data.. etc
To easily train a model on multiple GPUs or across multiple machines this
module provides a set of helper functions: `create_clones`,
`optimize_clones` and `deploy`.
Usage:
g = tf.Graph()
# Set up DeploymentConfig
config = model_deploy.DeploymentConfig(num_clones=2, clone_on_cpu=True)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.create_global_step()
# Define the inputs
with tf.device(config.inputs_device()):
images, labels = LoadData(...)
inputs_queue = slim.data.prefetch_queue((images, labels))
# Define the optimizer.
with tf.device(config.optimizer_device()):
optimizer = tf.train.MomentumOptimizer(FLAGS.learning_rate, FLAGS.momentum)
# Define the model including the loss.
def model_fn(inputs_queue):
images, labels = inputs_queue.dequeue()
predictions = CreateNetwork(images)
slim.losses.log_loss(predictions, labels)
model_dp = model_deploy.deploy(config, model_fn, [inputs_queue],
optimizer=optimizer)
# Run training.
slim.learning.train(model_dp.train_op, my_log_dir,
summary_op=model_dp.summary_op)
The Clone namedtuple holds together the values associated with each call to
model_fn:
* outputs: The return values of the calls to `model_fn()`.
* scope: The scope used to create the clone.
* device: The device used to create the clone.
DeployedModel namedtuple, holds together the values needed to train multiple
clones:
* train_op: An operation that run the optimizer training op and include
all the update ops created by `model_fn`. Present only if an optimizer
was specified.
* summary_op: An operation that run the summaries created by `model_fn`
and process_gradients.
* total_loss: A `Tensor` that contains the sum of all losses created by
`model_fn` plus the regularization losses.
* clones: List of `Clone` tuples returned by `create_clones()`.
DeploymentConfig parameters:
* num_clones: Number of model clones to deploy in each replica.
* clone_on_cpu: True if clones should be placed on CPU.
* replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
* num_replicas: Number of replicas to use.
* num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
* worker_job_name: A name for the worker job.
* ps_job_name: A name for the parameter server job.
TODO(sguada):
- describe side effect to the graph.
- what happens to summaries and update_ops.
- which graph collections are altered.
- write a tutorial on how to use this.
- analyze the possibility of calling deploy more than once.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
slim = tf.contrib.slim
__all__ = ['create_clones',
'deploy',
'optimize_clones',
'DeployedModel',
'DeploymentConfig',
'Clone',
]
# Namedtuple used to represent a clone during deployment.
Clone = collections.namedtuple('Clone',
['outputs', # Whatever model_fn() returned.
'scope', # The scope used to create it.
'device', # The device used to create.
])
# Namedtuple used to represent a DeployedModel, returned by deploy().
DeployedModel = collections.namedtuple('DeployedModel',
['train_op', # The `train_op`
'summary_op', # The `summary_op`
'total_loss', # The loss `Tensor`
'clones', # A list of `Clones` tuples.
])
# Default parameters for DeploymentConfig
_deployment_params = {'num_clones': 1,
'clone_on_cpu': False,
'replica_id': 0,
'num_replicas': 1,
'num_ps_tasks': 0,
'worker_job_name': 'worker',
'ps_job_name': 'ps'}
def create_clones(config, model_fn, args=None, kwargs=None):
"""Creates multiple clones according to config using a `model_fn`.
The returned values of `model_fn(*args, **kwargs)` are collected along with
the scope and device used to created it in a namedtuple
`Clone(outputs, scope, device)`
Note: it is assumed that any loss created by `model_fn` is collected at
the tf.GraphKeys.LOSSES collection.
To recover the losses, summaries or update_ops created by the clone use:
```python
losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, clone.scope)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
```
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A DeploymentConfig object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
Returns:
A list of namedtuples `Clone`.
"""
clones = []
args = args or []
kwargs = kwargs or {}
with slim.arg_scope([slim.model_variable, slim.variable],
device=config.variables_device()):
# Create clones.
for i in range(0, config.num_clones):
with tf.name_scope(config.clone_scope(i)) as clone_scope:
clone_device = config.clone_device(i)
with tf.device(clone_device):
with tf.variable_scope(tf.get_variable_scope(),
reuse=True if i > 0 else None):
outputs = model_fn(*args, **kwargs)
clones.append(Clone(outputs, clone_scope, clone_device))
return clones
def _gather_clone_loss(clone, num_clones, regularization_losses):
"""Gather the loss for a single clone.
Args:
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
Returns:
A tensor for the total loss for the clone. Can be None.
"""
# The return value.
sum_loss = None
# Individual components of the loss that will need summaries.
clone_loss = None
regularization_loss = None
# Compute and aggregate losses on the clone device.
with tf.device(clone.device):
all_losses = []
clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
if clone_losses:
clone_loss = tf.add_n(clone_losses, name='clone_loss')
if num_clones > 1:
clone_loss = tf.div(clone_loss, 1.0 * num_clones,
name='scaled_clone_loss')
all_losses.append(clone_loss)
if regularization_losses:
regularization_loss = tf.add_n(regularization_losses,
name='regularization_loss')
all_losses.append(regularization_loss)
if all_losses:
sum_loss = tf.add_n(all_losses)
# Add the summaries out of the clone device block.
if clone_loss is not None:
tf.summary.scalar('/'.join(filter(None,
['Losses', clone.scope, 'clone_loss'])),
clone_loss)
if regularization_loss is not None:
tf.summary.scalar('Losses/regularization_loss', regularization_loss)
return sum_loss
def _optimize_clone(optimizer, clone, num_clones, regularization_losses,
**kwargs):
"""Compute losses and gradients for a single clone.
Args:
optimizer: A tf.Optimizer object.
clone: A Clone namedtuple.
num_clones: The number of clones being deployed.
regularization_losses: Possibly empty list of regularization_losses
to add to the clone losses.
**kwargs: Dict of kwarg to pass to compute_gradients().
Returns:
A tuple (clone_loss, clone_grads_and_vars).
- clone_loss: A tensor for the total loss for the clone. Can be None.
- clone_grads_and_vars: List of (gradient, variable) for the clone.
Can be empty.
"""
sum_loss = _gather_clone_loss(clone, num_clones, regularization_losses)
clone_grad = None
if sum_loss is not None:
with tf.device(clone.device):
clone_grad = optimizer.compute_gradients(sum_loss, **kwargs)
return sum_loss, clone_grad
def optimize_clones(clones, optimizer,
regularization_losses=None,
**kwargs):
"""Compute clone losses and gradients for the given list of `Clones`.
Note: The regularization_losses are added to the first clone losses.
Args:
clones: List of `Clones` created by `create_clones()`.
optimizer: An `Optimizer` object.
regularization_losses: Optional list of regularization losses. If None it
will gather them from tf.GraphKeys.REGULARIZATION_LOSSES. Pass `[]` to
exclude them.
**kwargs: Optional list of keyword arguments to pass to `compute_gradients`.
Returns:
A tuple (total_loss, grads_and_vars).
- total_loss: A Tensor containing the average of the clone losses including
the regularization loss.
- grads_and_vars: A List of tuples (gradient, variable) containing the sum
of the gradients for each variable.
"""
grads_and_vars = []
clones_losses = []
num_clones = len(clones)
if regularization_losses is None:
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss, clone_grad = _optimize_clone(
optimizer, clone, num_clones, regularization_losses, **kwargs)
if clone_loss is not None:
clones_losses.append(clone_loss)
grads_and_vars.append(clone_grad)
# Only use regularization_losses for the first clone
regularization_losses = None
# Compute the total_loss summing all the clones_losses.
total_loss = tf.add_n(clones_losses, name='total_loss')
# Sum the gradients across clones.
grads_and_vars = _sum_clones_gradients(grads_and_vars)
return total_loss, grads_and_vars
def deploy(config,
model_fn,
args=None,
kwargs=None,
optimizer=None,
summarize_gradients=False):
"""Deploys a Slim-constructed model across multiple clones.
The deployment options are specified by the config object and support
deploying one or several clones on different GPUs and one or several replicas
of such clones.
The argument `model_fn` is called `config.num_clones` times to create the
model clones as `model_fn(*args, **kwargs)`.
The optional argument `optimizer` is an `Optimizer` object. If not `None`,
the deployed model is configured for training with that optimizer.
If `config` specifies deployment on multiple replicas then the default
tensorflow device is set appropriatly for each call to `model_fn` and for the
slim variable creation functions: model and global variables will be created
on the `ps` device, the clone operations will be on the `worker` device.
Args:
config: A `DeploymentConfig` object.
model_fn: A callable. Called as `model_fn(*args, **kwargs)`
args: Optional list of arguments to pass to `model_fn`.
kwargs: Optional list of keyword arguments to pass to `model_fn`.
optimizer: Optional `Optimizer` object. If passed the model is deployed
for training with that optimizer.
summarize_gradients: Whether or not add summaries to the gradients.
Returns:
A `DeployedModel` namedtuple.
"""
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Create Clones.
clones = create_clones(config, model_fn, args, kwargs)
first_clone = clones[0]
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone.scope)
train_op = None
total_loss = None
with tf.device(config.optimizer_device()):
if optimizer:
# Place the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = slim.get_or_create_global_step()
# Compute the gradients for the clones.
total_loss, clones_gradients = optimize_clones(clones, optimizer)
if clones_gradients:
if summarize_gradients:
# Add summaries to the gradients.
summaries |= set(_add_gradients_summaries(clones_gradients))
# Create gradient updates.
grad_updates = optimizer.apply_gradients(clones_gradients,
global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
with tf.control_dependencies([update_op]):
train_op = tf.identity(total_loss, name='train_op')
else:
clones_losses = []
regularization_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
for clone in clones:
with tf.name_scope(clone.scope):
clone_loss = _gather_clone_loss(clone, len(clones),
regularization_losses)
if clone_loss is not None:
clones_losses.append(clone_loss)
# Only use regularization_losses for the first clone
regularization_losses = None
if clones_losses:
total_loss = tf.add_n(clones_losses, name='total_loss')
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES,
first_clone.scope))
if total_loss is not None:
# Add total_loss to summary.
summaries.add(tf.summary.scalar('total_loss', total_loss))
if summaries:
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries), name='summary_op')
else:
summary_op = None
return DeployedModel(train_op, summary_op, total_loss, clones)
def _sum_clones_gradients(clone_grads):
"""Calculate the sum gradient for each shared variable across all clones.
This function assumes that the clone_grads has been scaled appropriately by
1 / num_clones.
Args:
clone_grads: A List of List of tuples (gradient, variable), one list per
`Clone`.
Returns:
List of tuples of (gradient, variable) where the gradient has been summed
across all clones.
"""
sum_grads = []
for grad_and_vars in zip(*clone_grads):
# Note that each grad_and_vars looks like the following:
# ((grad_var0_clone0, var0), ... (grad_varN_cloneN, varN))
grads = []
var = grad_and_vars[0][1]
for g, v in grad_and_vars:
assert v == var
if g is not None:
grads.append(g)
if grads:
if len(grads) > 1:
sum_grad = tf.add_n(grads, name=var.op.name + '/sum_grads')
else:
sum_grad = grads[0]
sum_grads.append((sum_grad, var))
return sum_grads
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
class DeploymentConfig(object):
"""Configuration for deploying a model with `deploy()`.
You can pass an instance of this class to `deploy()` to specify exactly
how to deploy the model to build. If you do not pass one, an instance built
from the default deployment_hparams will be used.
"""
def __init__(self,
num_clones=1,
clone_on_cpu=False,
replica_id=0,
num_replicas=1,
num_ps_tasks=0,
worker_job_name='worker',
ps_job_name='ps'):
"""Create a DeploymentConfig.
The config describes how to deploy a model across multiple clones and
replicas. The model will be replicated `num_clones` times in each replica.
If `clone_on_cpu` is True, each clone will placed on CPU.
If `num_replicas` is 1, the model is deployed via a single process. In that
case `worker_device`, `num_ps_tasks`, and `ps_device` are ignored.
If `num_replicas` is greater than 1, then `worker_device` and `ps_device`
must specify TensorFlow devices for the `worker` and `ps` jobs and
`num_ps_tasks` must be positive.
Args:
num_clones: Number of model clones to deploy in each replica.
clone_on_cpu: If True clones would be placed on CPU.
replica_id: Integer. Index of the replica for which the model is
deployed. Usually 0 for the chief replica.
num_replicas: Number of replicas to use.
num_ps_tasks: Number of tasks for the `ps` job. 0 to not use replicas.
worker_job_name: A name for the worker job.
ps_job_name: A name for the parameter server job.
Raises:
ValueError: If the arguments are invalid.
"""
if num_replicas > 1:
if num_ps_tasks < 1:
raise ValueError('When using replicas num_ps_tasks must be positive')
if num_replicas > 1 or num_ps_tasks > 0:
if not worker_job_name:
raise ValueError('Must specify worker_job_name when using replicas')
if not ps_job_name:
raise ValueError('Must specify ps_job_name when using parameter server')
if replica_id >= num_replicas:
raise ValueError('replica_id must be less than num_replicas')
self._num_clones = num_clones
self._clone_on_cpu = clone_on_cpu
self._replica_id = replica_id
self._num_replicas = num_replicas
self._num_ps_tasks = num_ps_tasks
self._ps_device = '/job:' + ps_job_name if num_ps_tasks > 0 else ''
self._worker_device = '/job:' + worker_job_name if num_ps_tasks > 0 else ''
@property
def num_clones(self):
return self._num_clones
@property
def clone_on_cpu(self):
return self._clone_on_cpu
@property
def replica_id(self):
return self._replica_id
@property
def num_replicas(self):
return self._num_replicas
@property
def num_ps_tasks(self):
return self._num_ps_tasks
@property
def ps_device(self):
return self._ps_device
@property
def worker_device(self):
return self._worker_device
def caching_device(self):
"""Returns the device to use for caching variables.
Variables are cached on the worker CPU when using replicas.
Returns:
A device string or None if the variables do not need to be cached.
"""
if self._num_ps_tasks > 0:
return lambda op: op.device
else:
return None
def clone_device(self, clone_index):
"""Device used to create the clone and all the ops inside the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A value suitable for `tf.device()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
if self._clone_on_cpu:
device += '/device:CPU:0'
else:
device += '/device:GPU:%d' % clone_index
return device
def clone_scope(self, clone_index):
"""Name scope to create the clone.
Args:
clone_index: Int, representing the clone_index.
Returns:
A name_scope suitable for `tf.name_scope()`.
Raises:
ValueError: if `clone_index` is greater or equal to the number of clones".
"""
if clone_index >= self._num_clones:
raise ValueError('clone_index must be less than num_clones')
scope = ''
if self._num_clones > 1:
scope = 'clone_%d' % clone_index
return scope
def optimizer_device(self):
"""Device to use with the optimizer.
Returns:
A value suitable for `tf.device()`.
"""
if self._num_ps_tasks > 0 or self._num_clones > 0:
return self._worker_device + '/device:CPU:0'
else:
return ''
def inputs_device(self):
"""Device to use to build the inputs.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._worker_device
device += '/device:CPU:0'
return device
def variables_device(self):
"""Returns the device to use for variables created inside the clone.
Returns:
A value suitable for `tf.device()`.
"""
device = ''
if self._num_ps_tasks > 0:
device += self._ps_device
device += '/device:CPU:0'
class _PSDeviceChooser(object):
"""Slim device chooser for variables when using PS."""
def __init__(self, device, tasks):
self._device = device
self._tasks = tasks
self._task = 0
def choose(self, op):
if op.device:
return op.device
node_def = op if isinstance(op, tf.NodeDef) else op.node_def
if node_def.op.startswith('Variable'):
t = self._task
self._task = (self._task + 1) % self._tasks
d = '%s/task:%d' % (self._device, t)
return d
else:
return op.device
if not self._num_ps_tasks:
return device
else:
chooser = _PSDeviceChooser(device, self._num_ps_tasks)
return chooser.choose
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/deployment/model_deploy.py |
DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/deployment/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for model_deploy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from deployment import model_deploy
slim = tf.contrib.slim
class DeploymentConfigTest(tf.test.TestCase):
def testDefaults(self):
deploy_config = model_deploy.DeploymentConfig()
self.assertEqual(slim.get_variables(), [])
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testCPUonly(self):
deploy_config = model_deploy.DeploymentConfig(clone_on_cpu=True)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'CPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testMultiGPU(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
self.assertEqual(deploy_config.caching_device(), None)
self.assertDeviceEqual(deploy_config.clone_device(0), 'GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1), 'GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(), 'CPU:0')
self.assertDeviceEqual(deploy_config.variables_device(), 'CPU:0')
def testPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=1, num_ps_tasks=1)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def testMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_clones=2, num_ps_tasks=1)
self.assertEqual(deploy_config.caching_device()(tf.no_op()), '')
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertEqual(deploy_config.clone_scope(0), '')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testReplicasMultiGPUPS(self):
deploy_config = model_deploy.DeploymentConfig(num_replicas=2,
num_clones=2,
num_ps_tasks=2)
self.assertDeviceEqual(deploy_config.clone_device(0),
'/job:worker/device:GPU:0')
self.assertDeviceEqual(deploy_config.clone_device(1),
'/job:worker/device:GPU:1')
self.assertEqual(deploy_config.clone_scope(0), 'clone_0')
self.assertEqual(deploy_config.clone_scope(1), 'clone_1')
self.assertDeviceEqual(deploy_config.optimizer_device(),
'/job:worker/device:CPU:0')
self.assertDeviceEqual(deploy_config.inputs_device(),
'/job:worker/device:CPU:0')
def testVariablesPS(self):
deploy_config = model_deploy.DeploymentConfig(num_ps_tasks=2)
with tf.device(deploy_config.variables_device()):
a = tf.Variable(0)
b = tf.Variable(0)
c = tf.no_op()
d = slim.variable('a', [],
caching_device=deploy_config.caching_device())
self.assertDeviceEqual(a.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(a.device, a.value().device)
self.assertDeviceEqual(b.device, '/job:ps/task:1/device:CPU:0')
self.assertDeviceEqual(b.device, b.value().device)
self.assertDeviceEqual(c.device, '')
self.assertDeviceEqual(d.device, '/job:ps/task:0/device:CPU:0')
self.assertDeviceEqual(d.value().device, '')
def LogisticClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'LogisticClassifier', [inputs, labels],
reuse=reuse):
predictions = slim.fully_connected(inputs, 1, activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
def BatchNormClassifier(inputs, labels, scope=None, reuse=None):
with tf.variable_scope(scope, 'BatchNormClassifier', [inputs, labels],
reuse=reuse):
inputs = slim.batch_norm(inputs, decay=0.1, fused=True)
predictions = slim.fully_connected(inputs, 1,
activation_fn=tf.sigmoid,
scope='fully_connected')
slim.losses.log_loss(predictions, labels)
return predictions
class CreatecloneTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 2)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'LogisticClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
clone = clones[0]
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertEqual(clone.scope, '')
self.assertDeviceEqual(clone.device, 'GPU:0')
self.assertEqual(len(slim.losses.get_losses()), 1)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, 'CPU:0')
self.assertDeviceEqual(v.value().device, 'CPU:0')
self.assertEqual(len(clones), num_clones)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, clone.scope)
self.assertEqual(len(update_ops), 2)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, 'GPU:%d' % i)
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(clones), 1)
clone = clones[0]
self.assertEqual(clone.outputs.op.name,
'BatchNormClassifier/fully_connected/Sigmoid')
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:0')
self.assertEqual(clone.scope, '')
self.assertEqual(len(slim.get_variables()), 5)
for v in slim.get_variables():
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
self.assertDeviceEqual(v.device, v.value().device)
def testCreateMulticloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
num_ps_tasks=2)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
for i, v in enumerate(slim.get_variables()):
t = i % 2
self.assertDeviceEqual(v.device, '/job:ps/task:%d/device:CPU:0' % t)
self.assertDeviceEqual(v.device, v.value().device)
self.assertEqual(len(clones), 2)
for i, clone in enumerate(clones):
self.assertEqual(
clone.outputs.op.name,
'clone_%d/BatchNormClassifier/fully_connected/Sigmoid' % i)
self.assertEqual(clone.scope, 'clone_%d/' % i)
self.assertDeviceEqual(clone.device, '/job:worker/device:GPU:%d' % i)
class OptimizeclonesTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def testCreateLogisticClassifier(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = LogisticClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 2)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(update_ops, [])
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateSingleclone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, 'GPU:0')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticlone(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
clone_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, clone_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateMulticloneCPU(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
num_clones = 4
deploy_config = model_deploy.DeploymentConfig(num_clones=num_clones,
clone_on_cpu=True)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), num_clones * 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '')
self.assertDeviceEqual(v.device, 'CPU:0')
def testCreateOnecloneWithPS(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=1,
num_ps_tasks=1)
self.assertEqual(slim.get_variables(), [])
clones = model_deploy.create_clones(deploy_config, model_fn, model_args)
self.assertEqual(len(slim.get_variables()), 5)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 2)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
total_loss, grads_and_vars = model_deploy.optimize_clones(clones,
optimizer)
self.assertEqual(len(grads_and_vars), len(tf.trainable_variables()))
self.assertEqual(total_loss.op.name, 'total_loss')
for g, v in grads_and_vars:
self.assertDeviceEqual(g.device, '/job:worker/device:GPU:0')
self.assertDeviceEqual(v.device, '/job:ps/task:0/CPU:0')
class DeployTest(tf.test.TestCase):
def setUp(self):
# Create an easy training set:
np.random.seed(0)
self._inputs = np.zeros((16, 4))
self._labels = np.random.randint(0, 2, size=(16, 1)).astype(np.float32)
self._logdir = self.get_temp_dir()
for i in range(16):
j = int(2 * self._labels[i] + np.random.randint(0, 2))
self._inputs[i, j] = 1
def _addBesselsCorrection(self, sample_size, expected_var):
correction_factor = sample_size / (sample_size - 1)
expected_var *= correction_factor
return expected_var
def testLocalTrainOp(self):
g = tf.Graph()
with g.as_default():
tf.set_random_seed(0)
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
model_fn = BatchNormClassifier
model_args = (tf_inputs, tf_labels)
deploy_config = model_deploy.DeploymentConfig(num_clones=2,
clone_on_cpu=True)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
self.assertEqual(slim.get_variables(), [])
model = model_deploy.deploy(deploy_config, model_fn, model_args,
optimizer=optimizer)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
self.assertEqual(len(update_ops), 4)
self.assertEqual(len(model.clones), 2)
self.assertEqual(model.total_loss.op.name, 'total_loss')
self.assertEqual(model.summary_op.op.name, 'summary_op/summary_op')
self.assertEqual(model.train_op.op.name, 'train_op')
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
moving_mean = tf.contrib.framework.get_variables_by_name(
'moving_mean')[0]
moving_variance = tf.contrib.framework.get_variables_by_name(
'moving_variance')[0]
initial_loss = sess.run(model.total_loss)
initial_mean, initial_variance = sess.run([moving_mean,
moving_variance])
self.assertAllClose(initial_mean, [0.0, 0.0, 0.0, 0.0])
self.assertAllClose(initial_variance, [1.0, 1.0, 1.0, 1.0])
for _ in range(10):
sess.run(model.train_op)
final_loss = sess.run(model.total_loss)
self.assertLess(final_loss, initial_loss / 5.0)
final_mean, final_variance = sess.run([moving_mean,
moving_variance])
expected_mean = np.array([0.125, 0.25, 0.375, 0.25])
expected_var = np.array([0.109375, 0.1875, 0.234375, 0.1875])
expected_var = self._addBesselsCorrection(16, expected_var)
self.assertAllClose(final_mean, expected_mean)
self.assertAllClose(final_variance, expected_var)
def testNoSummariesOnGPU(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
model = model_deploy.deploy(
deploy_config, ModelFn,
optimizer=tf.train.GradientDescentOptimizer(1.0))
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
def testNoSummariesOnGPUForEvals(self):
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=2)
# clone function creates a fully_connected layer with a regularizer loss.
def ModelFn():
inputs = tf.constant(1.0, shape=(10, 20), dtype=tf.float32)
reg = tf.contrib.layers.l2_regularizer(0.001)
tf.contrib.layers.fully_connected(inputs, 30, weights_regularizer=reg)
# No optimizer here, it's an eval.
model = model_deploy.deploy(deploy_config, ModelFn)
# The model summary op should have a few summary inputs and all of them
# should be on the CPU.
self.assertTrue(model.summary_op.op.inputs)
for inp in model.summary_op.op.inputs:
self.assertEqual('/device:CPU:0', inp.device)
if __name__ == '__main__':
tf.test.main()
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/deployment/model_deploy_test.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains a factory for building various models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'mobilenet_v1': inception_preprocessing,
'mobilenet_v2': inception_preprocessing,
'mobilenet_v2_035': inception_preprocessing,
'mobilenet_v2_140': inception_preprocessing,
'nasnet_mobile': inception_preprocessing,
'nasnet_large': inception_preprocessing,
'pnasnet_mobile': inception_preprocessing,
'pnasnet_large': inception_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v1_200': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'resnet_v2_200': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/preprocessing_factory.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images in CIFAR-10.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
_PADDING = 4
slim = tf.contrib.slim
def preprocess_for_train(image,
output_height,
output_width,
padding=_PADDING,
add_image_summaries=True):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
padding: The amound of padding before and after each dimension of the image.
add_image_summaries: Enable image summaries.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
if padding > 0:
image = tf.pad(image, [[padding, padding], [padding, padding], [0, 0]])
# Randomly crop a [height, width] section of the image.
distorted_image = tf.random_crop(image,
[output_height, output_width, 3])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
if add_image_summaries:
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
# Because these operations are not commutative, consider randomizing
# the order their operation.
distorted_image = tf.image.random_brightness(distorted_image,
max_delta=63)
distorted_image = tf.image.random_contrast(distorted_image,
lower=0.2, upper=1.8)
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(distorted_image)
def preprocess_for_eval(image, output_height, output_width,
add_image_summaries=True):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
add_image_summaries: Enable image summaries.
Returns:
A preprocessed image.
"""
if add_image_summaries:
tf.summary.image('image', tf.expand_dims(image, 0))
# Transform the image to floats.
image = tf.to_float(image)
# Resize and crop if needed.
resized_image = tf.image.resize_image_with_crop_or_pad(image,
output_width,
output_height)
if add_image_summaries:
tf.summary.image('resized_image', tf.expand_dims(resized_image, 0))
# Subtract off the mean and divide by the variance of the pixels.
return tf.image.per_image_standardization(resized_image)
def preprocess_image(image, output_height, output_width, is_training=False,
add_image_summaries=True):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
add_image_summaries: Enable image summaries.
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(
image, output_height, output_width,
add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(
image, output_height, output_width,
add_image_summaries=add_image_summaries)
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/cifarnet_preprocessing.py |
DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/__init__.py |
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images.
The preprocessing steps for VGG were introduced in the following technical
report:
Very Deep Convolutional Networks For Large-Scale Image Recognition
Karen Simonyan and Andrew Zisserman
arXiv technical report, 2015
PDF: http://arxiv.org/pdf/1409.1556.pdf
ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf
CC-BY-4.0
More information can be obtained from the VGG website:
www.robots.ox.ac.uk/~vgg/research/very_deep/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _random_crop(image_list, crop_height, crop_width):
"""Crops the given list of images.
The function applies the same crop to each image in the list. This can be
effectively applied when there are multiple image inputs of the same
dimension such as:
image, depths, normals = _random_crop([image, depths, normals], 120, 150)
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the new height.
crop_width: the new width.
Returns:
the image_list with cropped images.
Raises:
ValueError: if there are multiple image inputs provided with different size
or the images are smaller than the crop dimensions.
"""
if not image_list:
raise ValueError('Empty image_list.')
# Compute the rank assertions.
rank_assertions = []
for i in range(len(image_list)):
image_rank = tf.rank(image_list[i])
rank_assert = tf.Assert(
tf.equal(image_rank, 3),
['Wrong rank for tensor %s [expected] [actual]',
image_list[i].name, 3, image_rank])
rank_assertions.append(rank_assert)
with tf.control_dependencies([rank_assertions[0]]):
image_shape = tf.shape(image_list[0])
image_height = image_shape[0]
image_width = image_shape[1]
crop_size_assert = tf.Assert(
tf.logical_and(
tf.greater_equal(image_height, crop_height),
tf.greater_equal(image_width, crop_width)),
['Crop size greater than the image size.'])
asserts = [rank_assertions[0], crop_size_assert]
for i in range(1, len(image_list)):
image = image_list[i]
asserts.append(rank_assertions[i])
with tf.control_dependencies([rank_assertions[i]]):
shape = tf.shape(image)
height = shape[0]
width = shape[1]
height_assert = tf.Assert(
tf.equal(height, image_height),
['Wrong height for tensor %s [expected][actual]',
image.name, height, image_height])
width_assert = tf.Assert(
tf.equal(width, image_width),
['Wrong width for tensor %s [expected][actual]',
image.name, width, image_width])
asserts.extend([height_assert, width_assert])
# Create a random bounding box.
#
# Use tf.random_uniform and not numpy.random.rand as doing the former would
# generate random numbers at graph eval time, unlike the latter which
# generates random numbers at graph definition time.
with tf.control_dependencies(asserts):
max_offset_height = tf.reshape(image_height - crop_height + 1, [])
with tf.control_dependencies(asserts):
max_offset_width = tf.reshape(image_width - crop_width + 1, [])
offset_height = tf.random_uniform(
[], maxval=max_offset_height, dtype=tf.int32)
offset_width = tf.random_uniform(
[], maxval=max_offset_width, dtype=tf.int32)
return [_crop(image, offset_height, offset_width,
crop_height, crop_width) for image in image_list]
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.to_float(height)
width = tf.to_float(width)
smallest_side = tf.to_float(smallest_side)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.to_int32(tf.rint(height * scale))
new_width = tf.to_int32(tf.rint(width * scale))
return new_height, new_width
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def preprocess_for_train(image,
output_height,
output_width,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image for training.
Note that the actual resizing scale is sampled from
[`resize_size_min`, `resize_size_max`].
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing.
Returns:
A preprocessed image.
"""
resize_side = tf.random_uniform(
[], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32)
image = _aspect_preserving_resize(image, resize_side)
image = _random_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
image = tf.image.random_flip_left_right(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_for_eval(image, output_height, output_width, resize_side):
"""Preprocesses the given image for evaluation.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
resize_side: The smallest side of the image for aspect-preserving resizing.
Returns:
A preprocessed image.
"""
image = _aspect_preserving_resize(image, resize_side)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.to_float(image)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN])
def preprocess_image(image, output_height, output_width, is_training=False,
resize_side_min=_RESIZE_SIDE_MIN,
resize_side_max=_RESIZE_SIDE_MAX):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
resize_side_min: The lower bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, then this value
is used for rescaling.
resize_side_max: The upper bound for the smallest side of the image for
aspect-preserving resizing. If `is_training` is `False`, this value is
ignored. Otherwise, the resize side is sampled from
[resize_size_min, resize_size_max].
Returns:
A preprocessed image.
"""
if is_training:
return preprocess_for_train(image, output_height, output_width,
resize_side_min, resize_side_max)
else:
return preprocess_for_eval(image, output_height, output_width,
resize_side_min)
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/vgg_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.clip_by_value(image, 0.0, 1.0)
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(0.75, 1.33),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
if add_image_summaries:
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
if add_image_summaries:
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
distorted_image = tf.subtract(distorted_image, 0.5)
distorted_image = tf.multiply(distorted_image, 2.0)
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def preprocess_image(image, height, width,
is_training=False,
bbox=None,
fast_mode=True,
add_image_summaries=True):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
height: integer, image expected height.
width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
return preprocess_for_train(image, height, width, bbox, fast_mode,
add_image_summaries=add_image_summaries)
else:
return preprocess_for_eval(image, height, width)
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/inception_preprocessing.py |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities for preprocessing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
def preprocess_image(image, output_height, output_width, is_training):
"""Preprocesses the given image.
Args:
image: A `Tensor` representing an image of arbitrary size.
output_height: The height of the image after preprocessing.
output_width: The width of the image after preprocessing.
is_training: `True` if we're preprocessing the image for training and
`False` otherwise.
Returns:
A preprocessed image.
"""
image = tf.to_float(image)
image = tf.image.resize_image_with_crop_or_pad(
image, output_width, output_height)
image = tf.subtract(image, 128.0)
image = tf.div(image, 128.0)
return image
| DeepLearningExamples-master | TensorFlow/Detection/SSD/models/research/slim/preprocessing/lenet_preprocessing.py |
#
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from absl import flags
from time import time
import tensorflow as tf
import dllogger
from object_detection import model_hparams
from object_detection import model_lib
from object_detection.utils.exp_utils import setup_dllogger
import numpy as np
flags.DEFINE_string('checkpoint_dir', None, 'Path to directory holding a checkpoint. If '
'`checkpoint_dir` is not provided, benchmark is running on random model')
flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config file.')
flags.DEFINE_string("raport_file", default="summary.json",
help="Path to dlloger json")
flags.DEFINE_integer('warmup_iters', 100, 'Number of iterations skipped during benchmark')
flags.DEFINE_integer('benchmark_iters', 300, 'Number of iterations measured by benchmark')
flags.DEFINE_integer('batch_size', 1, 'Number of inputs processed paralelly')
flags.DEFINE_list("percentiles", default=['90', '95', '99'],
help="percentiles for latency confidence intervals")
FLAGS = flags.FLAGS
flags.mark_flag_as_required('pipeline_config_path')
def build_estimator():
session_config = tf.ConfigProto()
config = tf.estimator.RunConfig(session_config=session_config)
train_and_eval_dict = model_lib.create_estimator_and_inputs(
run_config=config,
hparams=model_hparams.create_hparams(None),
pipeline_config_path=FLAGS.pipeline_config_path)
estimator = train_and_eval_dict['estimator']
eval_input_fns = train_and_eval_dict['eval_input_fns']
return estimator, eval_input_fns[0]
def build_benchmark_input_fn(input_fn):
def benchmark_input_fn(params={}):
params['batch_size'] = FLAGS.batch_size
return input_fn(params).repeat().take(FLAGS.warmup_iters + FLAGS.benchmark_iters)
return benchmark_input_fn
class TimingHook(tf.train.SessionRunHook):
def __init__(self):
super(TimingHook, self).__init__()
setup_dllogger(enabled=True, filename=FLAGS.raport_file)
self.times = []
def before_run(self, *args, **kwargs):
super(TimingHook, self).before_run(*args, **kwargs)
self.start_time = time()
def log_progress(self):
if sys.stdout.isatty():
print(len(self.times) - FLAGS.warmup_iters, '/', FLAGS.benchmark_iters, ' '*10, end='\r')
def after_run(self, *args, **kwargs):
super(TimingHook, self).after_run(*args, **kwargs)
self.times.append(time() - self.start_time)
self.log_progress()
def end(self, *args, **kwargs):
super(TimingHook, self).end(*args, **kwargs)
throughput = sum([1/x for x in self.times[FLAGS.warmup_iters:]]) * FLAGS.batch_size / FLAGS.benchmark_iters
latency_avg = 1000 * sum(self.times[FLAGS.warmup_iters:]) / FLAGS.benchmark_iters
latency_data = 1000 * np.array(self.times[FLAGS.warmup_iters:])
summary = {
'infer_throughput': throughput,
'eval_avg_latency': latency_avg
}
print()
print('Benchmark result:', throughput, 'img/s')
for p in FLAGS.percentiles:
p = int(p)
tf.logging.info("Latency {}%: {:>4.2f} ms".format(
p, np.percentile(latency_data, p)))
summary[f'eval_{p}%_latency'] = np.percentile(latency_data, p)
dllogger.log(step=tuple(), data=summary)
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
estimator, eval_input_fn = build_estimator()
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_dir) \
if FLAGS.checkpoint_dir \
else None
results = estimator.predict(
input_fn=build_benchmark_input_fn(eval_input_fn),
checkpoint_path=checkpoint_path,
hooks=[ TimingHook() ],
yield_single_examples=False
)
list(results)
if __name__ == '__main__':
tf.app.run()
| DeepLearningExamples-master | TensorFlow/Detection/SSD/examples/SSD320_inference.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup
setup(
name='moflow_pyt',
packages=[
'moflow',
'moflow.data',
'moflow.model',
'moflow.runtime'
],
version='0.0.1',
description='MoFlow: an invertible flow model for generating molecular graphs',
)
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/setup.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import asdict, dataclass, field
import json
from typing import Dict, List, Optional
from rdkit import Chem
_VALID_IDX_FILE = 'valid_idx_{}.json'
_CSV_FILE = '{}.csv'
_DATASET_FILE = '{}_relgcn_kekulized_ggnp.npz'
DUMMY_CODE = 0
CODE_TO_BOND = dict(enumerate([
'DUMMY',
Chem.rdchem.BondType.SINGLE,
Chem.rdchem.BondType.DOUBLE,
Chem.rdchem.BondType.TRIPLE,
]))
BOND_TO_CODE = {v: k for k, v in CODE_TO_BOND.items()}
ATOM_VALENCY = {6: 4, 7: 3, 8: 2, 9: 1, 15: 3, 16: 2, 17: 1, 35: 1, 53: 1}
@dataclass
class DatasetConfig:
dataset_name: str
atomic_num_list: List[int]
max_num_atoms: int
labels: List[str]
smiles_col: str
code_to_atomic: Dict[int, int] = field(init=False)
atomic_to_code: Dict[int, int] = field(init=False)
valid_idx_file: str = field(init=False)
csv_file: str = field(init=False)
dataset_file: str = field(init=False)
def __post_init__(self):
self.valid_idx_file = _VALID_IDX_FILE.format(self.dataset_name)
self.csv_file = _CSV_FILE.format(self.dataset_name)
self.dataset_file = _DATASET_FILE.format(self.dataset_name)
self.code_to_atomic = dict(enumerate(sorted([DUMMY_CODE] + self.atomic_num_list)))
self.atomic_to_code = {v: k for k, v in self.code_to_atomic.items()}
@dataclass
class AtomFlowConfig:
n_flow: int
hidden_gnn: List[int]
hidden_lin: List[int]
n_block: int = 1
mask_row_size_list: List[int] = field(default_factory=lambda: [1])
mask_row_stride_list: List[int] = field(default_factory=lambda: [1])
@dataclass
class BondFlowConfig:
hidden_ch: List[int]
conv_lu: int
n_squeeze: int
n_block: int = 1
n_flow: int = 10
@dataclass
class ModelConfig:
atom_config: AtomFlowConfig
bond_config: BondFlowConfig
noise_scale: float = 0.6
learn_dist: bool = True
@dataclass
class Config:
dataset_config: DatasetConfig
model_config: ModelConfig
max_num_nodes: Optional[int] = None
num_node_features: Optional[int] = None
num_edge_features: int = len(CODE_TO_BOND)
z_dim: int = field(init=False)
def __post_init__(self):
if self.max_num_nodes is None:
self.max_num_nodes = self.dataset_config.max_num_atoms
if self.num_node_features is None:
self.num_node_features = len(self.dataset_config.code_to_atomic)
bonds_dim = self.max_num_nodes * self.max_num_nodes * self.num_edge_features
atoms_dim = self.max_num_nodes * self.num_node_features
self.z_dim = bonds_dim + atoms_dim
def save(self, path):
self.path = path
with open(path, 'w') as f:
json.dump(asdict(self), f, indent=4, sort_keys=True)
@classmethod
def load(cls, path):
with open(path, 'r') as f:
data = json.load(f)
return cls(**data)
def __repr__(self) -> str:
return json.dumps(asdict(self), indent=4, separators=(',', ': '))
ZINC250K_CONFIG = Config(
max_num_nodes=40,
dataset_config=DatasetConfig(
dataset_name='zinc250k',
atomic_num_list=[6, 7, 8, 9, 15, 16, 17, 35, 53],
max_num_atoms=38,
labels=['logP', 'qed', 'SAS'],
smiles_col='smiles',
),
model_config=ModelConfig(
AtomFlowConfig(
n_flow=38,
hidden_gnn=[256],
hidden_lin=[512, 64],
),
BondFlowConfig(
n_squeeze=20,
hidden_ch=[512, 512],
conv_lu=2
),
)
)
CONFIGS = {'zinc250k': ZINC250K_CONFIG}
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/config.py |
DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import re
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from rdkit import Chem
import torch
from moflow.config import Config, ATOM_VALENCY, CODE_TO_BOND, DUMMY_CODE
def postprocess_predictions(x: Union[torch.Tensor, np.ndarray], adj: Union[torch.Tensor, np.ndarray], config: Config) -> Tuple[np.ndarray, np.ndarray]:
assert x.ndim == 3 and adj.ndim == 4, 'expected batched predictions'
n = config.dataset_config.max_num_atoms
adj = adj[:, :, :n, :n]
x = x[:, :n]
atoms = torch.argmax(x, dim=2)
atoms = _to_numpy_array(atoms)
adj = torch.argmax(adj, dim=1)
adj = _to_numpy_array(adj)
decoded = np.zeros_like(atoms)
for code, atomic_num in config.dataset_config.code_to_atomic.items():
decoded[atoms == code] = atomic_num
return decoded, adj
def convert_predictions_to_mols(adj: np.ndarray, x: np.ndarray, correct_validity: bool = False) -> List[Chem.Mol]:
molecules = [construct_mol(x_elem, adj_elem) for x_elem, adj_elem in zip(x, adj)]
if correct_validity:
molecules = [correct_mol(mol) for mol in molecules]
return molecules
def construct_mol(atoms: np.ndarray, adj: np.ndarray) -> Chem.Mol:
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
atoms_exist = (atoms != 0)
atoms = atoms[atoms_exist]
adj = adj[atoms_exist][:, atoms_exist]
mol = Chem.RWMol()
for atom in atoms:
mol.AddAtom(Chem.Atom(int(atom)))
for start, end in zip(*np.where(adj != DUMMY_CODE)):
if start > end:
mol.AddBond(int(start), int(end), CODE_TO_BOND[int(adj[start, end])])
# add formal charge to atom: e.g. [O+], [N+] [S+]
# not support [O-], [N-] [S-] [NH+] etc.
flag, atomid_valence = check_valency(mol)
if flag:
continue
else:
assert len(atomid_valence) == 2
idx = atomid_valence[0]
v = atomid_valence[1]
an = mol.GetAtomWithIdx(idx).GetAtomicNum()
if an in (7, 8, 16) and (v - ATOM_VALENCY[an]) == 1:
mol.GetAtomWithIdx(idx).SetFormalCharge(1)
return mol
def valid_mol(x: Optional[Chem.Mol]) -> Optional[Chem.Mol]:
if x is None:
# RDKit wasn't able to create the mol
return None
smi = Chem.MolToSmiles(x, isomericSmiles=True)
if len(smi) == 0 or '.' in smi:
# Mol is empty or fragmented
return None
reloaded = Chem.MolFromSmiles(smi)
# if smiles is invalid - it will be None, otherwise mol is valid
return reloaded
def check_valency(mol: Chem.Mol) -> Tuple[bool, List[int]]:
"""Checks that no atoms in the mol have exceeded their possible
valency. Returns True if no valency issues, False otherwise
plus information about problematic atom.
"""
try:
Chem.SanitizeMol(mol, sanitizeOps=Chem.SanitizeFlags.SANITIZE_PROPERTIES)
return True, None
except ValueError as e:
e = str(e)
p = e.find('#')
e_sub = e[p:]
atomid_valence = list(map(int, re.findall(r'\d+', e_sub)))
return False, atomid_valence
def correct_mol(mol: Chem.Mol) -> Chem.Mol:
flag, atomid_valence = check_valency(mol)
while not flag:
assert len(atomid_valence) == 2
idx = atomid_valence[0]
v = atomid_valence[1]
queue = []
for b in mol.GetAtomWithIdx(idx).GetBonds():
queue.append(
(b.GetIdx(), int(b.GetBondType()), b.GetBeginAtomIdx(), b.GetEndAtomIdx())
)
queue.sort(key=lambda tup: tup[1], reverse=True)
if len(queue) > 0:
start = queue[0][2]
end = queue[0][3]
t = queue[0][1] - 1
mol.RemoveBond(start, end)
if t >= 1:
mol.AddBond(start, end, CODE_TO_BOND[t])
flag, atomid_valence = check_valency(mol)
# if mol is fragmented, select the largest fragment
mols = Chem.GetMolFrags(mol, asMols=True)
mol = max(mols, key=lambda m: m.GetNumAtoms())
return mol
def predictions_to_smiles(adj: torch.Tensor, x: torch.Tensor, config: Config) -> List[str]:
x, adj = postprocess_predictions(x, adj, config=config)
valid = [Chem.MolToSmiles(construct_mol(x_elem, adj_elem), isomericSmiles=True)
for x_elem, adj_elem in zip(x, adj)]
return valid
def check_validity(molecules: List[Chem.Mol]) -> dict:
valid = [valid_mol(mol) for mol in molecules]
valid = [mol for mol in valid if mol is not None]
n_mols = len(molecules)
valid_ratio = len(valid) / n_mols
valid_smiles = [Chem.MolToSmiles(mol, isomericSmiles=False) for mol in valid]
unique_smiles = list(set(valid_smiles))
unique_ratio = 0.
if len(valid) > 0:
unique_ratio = len(unique_smiles) / len(valid)
valid_mols = [Chem.MolFromSmiles(s) for s in valid_smiles]
abs_unique_ratio = len(unique_smiles) / n_mols
results = dict()
results['valid_mols'] = valid_mols
results['valid_smiles'] = valid_smiles
results['valid_ratio'] = valid_ratio * 100
results['unique_ratio'] = unique_ratio * 100
results['abs_unique_ratio'] = abs_unique_ratio * 100
return results
def check_novelty(gen_smiles: List[str], train_smiles: List[str], n_generated_mols: int):
if len(gen_smiles) == 0:
novel_ratio = 0.
abs_novel_ratio = 0.
else:
duplicates = [1 for mol in gen_smiles if mol in train_smiles]
novel = len(gen_smiles) - sum(duplicates)
novel_ratio = novel * 100. / len(gen_smiles)
abs_novel_ratio = novel * 100. / n_generated_mols
return novel_ratio, abs_novel_ratio
def _to_numpy_array(a):
if isinstance(a, torch.Tensor):
a = a.cpu().detach().numpy()
elif isinstance(a, np.ndarray):
pass
else:
raise TypeError("a ({}) is not a torch.Tensor".format(type(a)))
return a
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional, Tuple
import numpy as np
from torch.cuda.amp import autocast
import torch
from moflow.config import CONFIGS, Config
from moflow.model.model import MoFlow
from moflow.utils import convert_predictions_to_mols, postprocess_predictions
from moflow.runtime.arguments import PARSER
from moflow.runtime.common import get_newest_checkpoint, load_state
from moflow.runtime.distributed_utils import get_device
from moflow.runtime.logger import PerformanceLogger, setup_logging
def infer(model: MoFlow, config: Config, device: torch.device, *,
ln_var: float = 0, temp: float = 0.6, mu: Optional[torch.Tensor] = None,
batch_size: int = 20) -> Tuple[np.ndarray, np.ndarray]:
if mu is None:
mu = torch.zeros(config.z_dim, dtype=torch.float32, device=device)
sigma = temp * np.sqrt(np.exp(ln_var))
with torch.no_grad():
z = torch.normal(mu.reshape(-1, config.z_dim).repeat((batch_size, 1)), sigma)
adj, x = model.reverse(z)
x, adj = postprocess_predictions(x, adj, config=config)
return adj, x
if __name__ == '__main__':
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
args = PARSER.parse_args()
logger = setup_logging(args)
perf_logger = PerformanceLogger(logger, args.batch_size, args.warmup_steps, mode='generate')
if args.predictions_path:
from rdkit.Chem import SmilesWriter
smiles_writer = SmilesWriter(args.predictions_path)
snapshot_path = get_newest_checkpoint(args.results_dir)
config = CONFIGS[args.config_name]
model = MoFlow(config)
device = get_device(args.local_rank)
if snapshot_path is not None:
epoch, ln_var = load_state(snapshot_path, model, device=device)
elif args.allow_untrained:
epoch, ln_var = 0, 0
else:
raise RuntimeError('Generating molecules from an untrained network! '
'If this was intentional, pass --allow_untrained flag.')
model.to(device=device, memory_format=torch.channels_last)
model.eval()
if args.jit:
model.atom_model = torch.jit.script(model.atom_model)
model.bond_model = torch.jit.script(model.bond_model)
if args.steps == -1:
args.steps = 1
with autocast(enabled=args.amp):
for i in range(args.steps):
perf_logger.update()
results = infer(
model, config, ln_var=ln_var, temp=args.temperature, batch_size=args.batch_size,
device=device)
if (i + 1) % args.log_interval == 0:
perf_logger.summarize(step=(0, i, i))
if args.predictions_path:
mols_batch = convert_predictions_to_mols(*results, correct_validity=args.correct_validity)
for mol in mols_batch:
smiles_writer.write(mol)
perf_logger.summarize(step=tuple())
if args.predictions_path:
smiles_writer.close()
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/generate.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from moflow.config import CONFIGS
from moflow.runtime.logger import LOGGING_LEVELS
PARSER = argparse.ArgumentParser()
PARSER.add_argument('--data_dir', type=str, default='/data', help='Location for the dataset.')
PARSER.add_argument('--config_name', type=str, default='zinc250k', choices=list(CONFIGS),
help='The config to choose. This parameter allows one to switch between different datasets '
'and their dedicated configurations of the neural network. By default, a pre-defined "zinc250k" config is used.')
PARSER.add_argument('--results_dir', type=str, default='/results', help='Directory where checkpoints are stored.')
PARSER.add_argument('--predictions_path', type=str, default='/results/predictions.smi',
help='Path to store generated molecules. If an empty string is provided, predictions will not be '
'saved (useful for benchmarking and debugging).')
PARSER.add_argument('--log_path', type=str, default=None,
help='Path for DLLogger log. This file will contain information about the speed and '
'accuracy of the model during training and inference. Note that if the file '
'already exists, new logs will be added at the end.')
PARSER.add_argument('--log_interval', type=int, default=20, help='Frequency for writing logs, expressed in steps.')
PARSER.add_argument('--warmup_steps', type=int, default=20,
help='Number of warmup steps. This value is used for benchmarking and for CUDA graph capture.')
PARSER.add_argument('--steps', type=int, default=-1,
help='Number of steps used for training/inference. This parameter allows finishing '
'training earlier than the specified number of epochs. If used with inference, '
'it allows generating more molecules (by default only a single batch of molecules is generated).')
PARSER.add_argument('--save_epochs', type=int, default=5,
help='Frequency for saving checkpoints, expressed in epochs. If -1 is provided, checkpoints will not be saved.')
PARSER.add_argument('--eval_epochs', type=int, default=5,
help='Evaluation frequency, expressed in epochs. If -1 is provided, an evaluation will not be performed.')
PARSER.add_argument('--learning_rate', type=float, default=0.0005, help='Base learning rate.')
PARSER.add_argument('--beta1', type=float, default=0.9, help='beta1 parameter for the optimizer.')
PARSER.add_argument('--beta2', type=float, default=0.99, help='beta2 parameter for the optimizer.')
PARSER.add_argument('--clip', type=float, default=1, help='Gradient clipping norm.')
PARSER.add_argument('--epochs', type=int, default=300,
help='Number of training epochs. Note that you can finish training mid-epoch by using "--steps" flag.')
PARSER.add_argument('--batch_size', type=int, default=512, help='Batch size per GPU.')
PARSER.add_argument('--num_workers', type=int, default=4, help='Number of workers in the data loader.')
PARSER.add_argument('--seed', type=int, default=1, help='Random seed used to initialize the distributed loaders.')
PARSER.add_argument('--local_rank', default=os.environ.get('LOCAL_RANK', 0), type=int,
help='rank of the GPU, used to launch distributed training. This argument is specified '
'automatically by `torchrun` and does not have to be provided by the user.')
PARSER.add_argument('--temperature', type=float, default=0.3, help='Temperature used for sampling.')
PARSER.add_argument('--val_batch_size', type=int, default=100, help='Number of molecules to generate during validation step.')
PARSER.add_argument('--allow_untrained', action='store_true',
help='Allow sampling molecules from an untrained network. Useful for performance benchmarking or debugging purposes.')
PARSER.add_argument('--correct_validity', action='store_true', help='Apply validity correction after the generation of the molecules.')
PARSER.add_argument('--amp', action='store_true', help='Use Automatic Mixed Precision.')
PARSER.add_argument('--cuda_graph', action='store_true', help='Capture GPU kernels with CUDA graphs. This option allows to speed up training.')
PARSER.add_argument('--jit', action='store_true', help='Compile the model with `torch.jit.script`. Can be used to speed up training or inference.')
PARSER.add_argument('--verbosity', type=int, default=1, choices=list(LOGGING_LEVELS),
help='Verbosity level. Specify the following values: 0, 1, 2, 3, where 0 means minimal '
'verbosity (errors only) and 3 - maximal (debugging).')
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/arguments.py |
DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABC, abstractmethod
import logging
import time
import dllogger
from dllogger import JSONStreamBackend, StdOutBackend, Verbosity
import numpy as np
LOGGING_LEVELS = dict(enumerate([logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG]))
def get_dllogger(args):
backends = []
if args.local_rank == 0:
backends.append(StdOutBackend(Verbosity.VERBOSE))
if args.log_path is not None:
backends.append(JSONStreamBackend(Verbosity.VERBOSE, args.log_path, append=True))
dllogger.init(backends=backends)
return dllogger
def setup_logging(args):
logging.basicConfig(
format='%(asctime)s %(levelname)s:\t%(message)s', datefmt='%H:%M:%S', level=LOGGING_LEVELS[args.verbosity], force=True
)
return get_dllogger(args)
class BaseLogger(ABC):
@abstractmethod
def update(self, **kwargs) -> None:
pass
@abstractmethod
def process_stats(self) -> dict:
return {}
@abstractmethod
def reset(self) -> None:
pass
def summarize(self, step: tuple) -> None:
stats = self.process_stats()
if len(stats) == 0:
logging.warn('Empty stats for logging, skipping')
return
self.logger.log(step=step, data=stats)
self.logger.flush()
class PerformanceLogger(BaseLogger):
def __init__(self, logger, batch_size: int, warmup_steps: int = 100, mode: str = 'train'):
self.logger = logger
self.batch_size = batch_size
self.warmup_steps = warmup_steps
self._step = 0
self._timestamps = []
self.mode = mode
def update(self, **kwargs) -> None:
self._step += 1
if self._step >= self.warmup_steps:
self._timestamps.append(time.time())
def reset(self) -> None:
self._step = 0
self._timestamps = []
def process_stats(self) -> dict:
if len(self._timestamps) < 2:
logging.warn('Cannot process performance stats - less than 2 measurements collected')
return {}
timestamps = np.asarray(self._timestamps)
deltas = np.diff(timestamps)
throughput = (self.batch_size / deltas).mean()
stats = {
f'throughput_{self.mode}': throughput,
f'latency_{self.mode}_mean': deltas.mean(),
f'total_time_{self.mode}': timestamps[-1] - timestamps[0],
}
for level in [90, 95, 99]:
stats.update({f'latency_{self.mode}_{level}': np.percentile(deltas, level)})
return stats
class MetricsLogger(BaseLogger):
def __init__(self, logger, mode: str = 'train'):
self.logger = logger
self.mode = mode
self._metrics_dict = {}
def update(self, metrics: dict, **kwargs) -> None:
for metrics_name, metric_val in metrics.items():
if metrics_name not in self._metrics_dict:
self._metrics_dict[metrics_name] = []
self._metrics_dict[metrics_name].append(float(metric_val))
def reset(self) -> None:
self._metrics_dict = {}
def process_stats(self) -> dict:
stats = {}
for metric_name, metric_val in self._metrics_dict.items():
stats[metric_name] = np.mean(metric_val)
return stats
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/logger.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from glob import glob
import logging
import os
from typing import List, Optional, Tuple
import torch
from moflow.model.model import MoFlow
CHECKPOINT_PATTERN = 'model_snapshot_epoch_%s'
def _sort_checkpoints(paths: List[str]) -> List[str]:
return sorted(paths, key=lambda x: int(x.split('_')[-1]))
def save_state(dir: str, model: MoFlow, optimizer: torch.optim.Optimizer, ln_var: float, epoch: int, keep: int = 1) -> None:
"""Save training state in a given dir. This checkpoint can be used to resume training or run inference
with the trained model. This function will keep up to <keep> newest checkpoints and remove the oldest ones.
"""
save_path = os.path.join(dir, CHECKPOINT_PATTERN % (epoch + 1))
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'ln_var': ln_var,
'epoch': epoch,
}
torch.save(state, save_path)
if keep > 0:
filenames = glob(os.path.join(dir, CHECKPOINT_PATTERN % '*'))
if len(filenames) <= keep:
return
to_del = _sort_checkpoints(filenames)[:-keep]
for path in to_del:
os.remove(path)
def load_state(path: str, model: MoFlow, device: torch.device, optimizer: Optional[torch.optim.Optimizer] = None) -> Tuple[int, float]:
"""Load model's and optimizer's state from a given file.
This function returns the number of epochs the model was trained for and natural logarithm of variance
the for the distribution of the latent space.
"""
state = torch.load(path, map_location=device)
model.load_state_dict(state['model'])
if optimizer is not None:
optimizer.load_state_dict(state['optimizer'])
return state['epoch'], state['ln_var']
def get_newest_checkpoint(model_dir: str, validate: bool = True) -> str:
"""Find newest checkpoint in a given directory.
If validate is set to True, this function will also verify that the file can be loaded and
select older checkpoint if neccessary.
"""
filenames = glob(os.path.join(model_dir, CHECKPOINT_PATTERN % '*'))
if len(filenames) == 0:
logging.info(f'No checkpoints available')
return None
paths = _sort_checkpoints(filenames)
if validate:
for latest_path in paths[::-1]:
try:
torch.load(latest_path, map_location='cpu')
break
except:
logging.info(f'Checkpoint {latest_path} is corrupted')
else:
logging.info(f'All available checkpoints were corrupted')
return None
else:
latest_path = paths[-1]
logging.info(f'Found checkpoint {latest_path}')
return latest_path
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/common.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import torch
import torch.distributed as dist
def get_device(local_rank: int) -> torch.device:
if torch.cuda.is_available():
torch.cuda.set_device(local_rank % torch.cuda.device_count())
device = torch.device("cuda")
else:
device = torch.device("cpu")
logging.warning("not using a(ny) GPU(s)!")
return device
def get_world_size() -> int:
return int(os.environ.get("WORLD_SIZE", 1))
def reduce_tensor(tensor: torch.Tensor, num_gpus: int) -> torch.Tensor:
if num_gpus > 1:
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
if rt.is_floating_point():
rt = rt / num_gpus
else:
rt = rt // num_gpus
return rt
return tensor
def init_distributed() -> bool:
world_size = int(os.environ.get("WORLD_SIZE", 1))
distributed = world_size > 1
if distributed:
backend = "nccl" if torch.cuda.is_available() else "gloo"
os.environ["NCCL_ASYNC_ERROR_HANDLING"] = "0" # Needed for CUDA graphs
dist.init_process_group(backend=backend, init_method="env://")
assert dist.is_initialized()
if get_rank() == 0:
logging.info(f"Distributed initialized. World size: {world_size}")
return distributed
def get_rank() -> int:
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/distributed_utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import argparse
import functools
import json
import logging
import os
import signal
from typing import Dict
from apex.contrib.clip_grad import clip_grad_norm_
from apex.optimizers import FusedAdam as Adam
import torch
from torch.cuda.amp import autocast, GradScaler
from torch.utils.data.distributed import DistributedSampler
from moflow.config import CONFIGS, Config
from moflow.data.data_loader import NumpyTupleDataset
from moflow.data import transform
from moflow.model.model import MoFlow, MoFlowLoss
from moflow.model.utils import initialize
from moflow.runtime.logger import MetricsLogger, PerformanceLogger, setup_logging
from moflow.runtime.arguments import PARSER
from moflow.runtime.common import get_newest_checkpoint, load_state, save_state
from moflow.runtime.distributed_utils import (
get_device, get_rank, get_world_size, init_distributed, reduce_tensor
)
from moflow.runtime.generate import infer
from moflow.utils import check_validity, convert_predictions_to_mols
torch._C._jit_set_autocast_mode(True)
def run_validation(model: MoFlow, config: Config, ln_var: float, args: argparse.Namespace,
is_distributed: bool, world_size: int, device: torch.device) -> Dict[str, float]:
model.eval()
if is_distributed:
model_callable = model.module
else:
model_callable = model
result = infer(model_callable, config, device=device, ln_var=ln_var, batch_size=args.val_batch_size,
temp=args.temperature)
mols = convert_predictions_to_mols(*result, correct_validity=args.correct_validity)
validity_info = check_validity(mols)
valid_ratio = torch.tensor(validity_info['valid_ratio'], dtype=torch.float32, device=device)
unique_ratio = torch.tensor(validity_info['unique_ratio'], dtype=torch.float32, device=device)
valid_value = reduce_tensor(valid_ratio, world_size).detach().cpu().numpy()
unique_value = reduce_tensor(unique_ratio, world_size).detach().cpu().numpy()
model.train()
return {'valid': valid_value, 'unique': unique_value}
def train(args: argparse.Namespace) -> None:
os.makedirs(args.results_dir, exist_ok=True)
# Device configuration
device = get_device(args.local_rank)
torch.cuda.set_stream(torch.cuda.Stream())
is_distributed = init_distributed()
world_size = get_world_size()
local_rank = get_rank()
logger = setup_logging(args)
if local_rank == 0:
perf_logger = PerformanceLogger(logger, args.batch_size * world_size, args.warmup_steps)
acc_logger = MetricsLogger(logger)
if local_rank == 0:
logging.info('Input args:')
logging.info(json.dumps(vars(args), indent=4, separators=(',', ':')))
# Model configuration
assert args.config_name in CONFIGS
config = CONFIGS[args.config_name]
data_file = config.dataset_config.dataset_file
transform_fn = functools.partial(transform.transform_fn, config=config)
valid_idx = transform.get_val_ids(config, args.data_dir)
if local_rank == 0:
logging.info('Config:')
logging.info(str(config))
model = MoFlow(config)
model.to(device)
loss_module = MoFlowLoss(config)
loss_module.to(device)
# Datasets:
dataset = NumpyTupleDataset.load(
os.path.join(args.data_dir, data_file),
transform=transform_fn,
)
if len(valid_idx) == 0:
raise ValueError('Empty validation set!')
train_idx = [t for t in range(len(dataset)) if t not in valid_idx]
train = torch.utils.data.Subset(dataset, train_idx)
test = torch.utils.data.Subset(dataset, valid_idx)
if world_size > 1:
sampler = DistributedSampler(train, seed=args.seed, drop_last=False)
else:
sampler = None
train_dataloader = torch.utils.data.DataLoader(
train,
batch_size=args.batch_size,
shuffle=sampler is None,
sampler=sampler,
num_workers=args.num_workers,
drop_last=True,
)
if local_rank == 0:
logging.info(f'Using {world_size} GPUs')
logging.info(f'Num training samples: {len(train)}')
logging.info(f'Minibatch-size: {args.batch_size}')
logging.info(f'Num Iter/Epoch: {len(train_dataloader)}')
logging.info(f'Num epoch: {args.epochs}')
if is_distributed:
train_dataloader.sampler.set_epoch(-1)
x, adj, *_ = next(iter(train_dataloader))
x = x.to(device)
adj = adj.to(device)
with autocast(enabled=args.amp):
initialize(model, (adj, x))
model.to(memory_format=torch.channels_last)
adj.to(memory_format=torch.channels_last)
if args.jit:
model.bond_model = torch.jit.script(model.bond_model)
model.atom_model = torch.jit.script(model.atom_model)
# make one pass in both directions to make sure that model works
with torch.no_grad():
_ = model(adj, x)
_ = model.reverse(torch.randn(args.batch_size, config.z_dim, device=device))
if is_distributed:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[local_rank],
output_device=local_rank,
)
loss_module = torch.nn.parallel.DistributedDataParallel(
loss_module,
device_ids=[local_rank],
output_device=local_rank,
)
model_callable = model.module
loss_callable = loss_module.module
else:
model_callable = model
loss_callable = loss_module
# Loss and optimizer
optimizer = Adam((*model.parameters(), *loss_module.parameters()), lr=args.learning_rate, betas=(args.beta1, args.beta2))
scaler = GradScaler()
if args.save_epochs == -1:
args.save_epochs = args.epochs
if args.eval_epochs == -1:
args.eval_epochs = args.epochs
if args.steps == -1:
args.steps = args.epochs * len(train_dataloader)
snapshot_path = get_newest_checkpoint(args.results_dir)
if snapshot_path is not None:
snapshot_epoch, ln_var = load_state(snapshot_path, model_callable, optimizer=optimizer, device=device)
loss_callable.ln_var = torch.nn.Parameter(torch.tensor(ln_var))
first_epoch = snapshot_epoch + 1
step = first_epoch * len(train_dataloader)
else:
first_epoch = 0
step = 0
if first_epoch >= args.epochs:
logging.info(f'Model was already trained for {first_epoch} epochs')
exit(0)
for epoch in range(first_epoch, args.epochs):
if local_rank == 0:
acc_logger.reset()
if is_distributed:
train_dataloader.sampler.set_epoch(epoch)
for i, batch in enumerate(train_dataloader):
if local_rank == 0:
perf_logger.update()
step += 1
optimizer.zero_grad()
x = batch[0].to(device)
adj = batch[1].to(device=device,memory_format=torch.channels_last)
# Forward, backward and optimize
with_cuda_graph = (
args.cuda_graph
and step >= args.warmup_steps
and x.size(0) == args.batch_size
)
with autocast(enabled=args.amp, cache_enabled=not with_cuda_graph):
output = model(adj, x, with_cuda_graph=with_cuda_graph)
nll_x, nll_adj = loss_module(*output)
loss = nll_x + nll_adj
if args.amp:
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
# Print log info
if (i + 1) % args.log_interval == 0:
nll_x_value = reduce_tensor(nll_x, world_size).item()
nll_adj_value = reduce_tensor(nll_adj, world_size).item()
loss_value = nll_x_value + nll_adj_value
if local_rank == 0:
acc_logger.update({
'loglik': loss_value,
'nll_x': nll_x_value,
'nll_adj': nll_adj_value
})
acc_logger.summarize(step=(epoch, i, i))
perf_logger.summarize(step=(epoch, i, i))
if step >= args.steps:
break
if (epoch + 1) % args.eval_epochs == 0:
with autocast(enabled=args.amp):
metrics = run_validation(model, config, loss_callable.ln_var.item(), args, is_distributed, world_size, device)
if local_rank == 0:
acc_logger.update(metrics)
# The same report for each epoch
if local_rank == 0:
acc_logger.summarize(step=(epoch,))
perf_logger.summarize(step=(epoch,))
# Save the model checkpoints
if (epoch + 1) % args.save_epochs == 0:
if local_rank == 0 or not is_distributed:
save_state(args.results_dir, model_callable, optimizer, loss_callable.ln_var.item(), epoch, keep=5)
if step >= args.steps:
break
if local_rank == 0:
acc_logger.summarize(step=tuple())
perf_logger.summarize(step=tuple())
if __name__ == '__main__':
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
args = PARSER.parse_args()
train(args)
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/train.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import os
import numpy as np
import torch
from torch.cuda.amp import autocast
from moflow.config import CONFIGS
from moflow.data import transform
from moflow.data.data_loader import NumpyTupleDataset
from moflow.model.model import MoFlow
from moflow.utils import check_validity, convert_predictions_to_mols, predictions_to_smiles, check_novelty
from moflow.runtime.arguments import PARSER
from moflow.runtime.common import get_newest_checkpoint, load_state
from moflow.runtime.distributed_utils import get_device
from moflow.runtime.generate import infer
from moflow.runtime.logger import MetricsLogger, setup_logging
if __name__ == '__main__':
from rdkit import RDLogger
RDLogger.DisableLog('rdApp.*')
args = PARSER.parse_args()
logger = setup_logging(args)
snapshot_path = get_newest_checkpoint(args.results_dir)
config = CONFIGS[args.config_name]
model = MoFlow(config)
device = get_device(args.local_rank)
if snapshot_path is not None:
epoch, ln_var = load_state(snapshot_path, model, device=device)
elif args.allow_untrained:
epoch, ln_var = 0, 0
else:
raise RuntimeError('Generating molecules from an untrained network! '
'If this was intentional, pass --allow_untrained flag.')
model.to(device)
model.eval()
if args.steps == -1:
args.steps = 1
acc_logger = MetricsLogger(logger)
valid_idx = transform.get_val_ids(config, args.data_dir)
dataset = NumpyTupleDataset.load(
os.path.join(args.data_dir, config.dataset_config.dataset_file),
transform=partial(transform.transform_fn, config=config),
)
train_idx = [t for t in range(len(dataset)) if t not in valid_idx]
n_train = len(train_idx)
train_dataset = torch.utils.data.Subset(dataset, train_idx)
train_x = torch.Tensor(np.array([a[0] for a in train_dataset]))
train_adj = torch.Tensor(np.array([a[1] for a in train_dataset]))
train_smiles = set(predictions_to_smiles(train_adj, train_x, config))
with autocast(enabled=args.amp):
for i in range(args.steps):
results = infer(
model, config, ln_var=ln_var, temp=args.temperature, batch_size=args.batch_size,
device=device)
mols_batch = convert_predictions_to_mols(*results, correct_validity=args.correct_validity)
validity_info = check_validity(mols_batch)
novel_r, abs_novel_r = check_novelty(validity_info['valid_smiles'], train_smiles, len(mols_batch))
_, nuv = check_novelty(list(set(validity_info['valid_smiles'])), train_smiles, len(mols_batch))
metrics = {
'validity': validity_info['valid_ratio'],
'novelty': novel_r,
'uniqueness': validity_info['unique_ratio'],
'abs_novelty': abs_novel_r,
'abs_uniqueness': validity_info['abs_unique_ratio'],
'nuv': nuv,
}
acc_logger.update(metrics)
acc_logger.summarize(step=tuple())
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/runtime/evaluate.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from typing import Tuple
import torch
import torch.nn as nn
from moflow.model.basic import ActNorm, InvConv2dLU, InvConv2d
from moflow.model.coupling import AffineCoupling, GraphAffineCoupling
class Flow(nn.Module):
def __init__(self, in_channel, hidden_channels, conv_lu=2, mask_swap=False):
super(Flow, self).__init__()
# More stable to support more flows
self.actnorm = ActNorm(num_channels=in_channel, num_dims=4)
if conv_lu == 0:
self.invconv = InvConv2d(in_channel)
elif conv_lu == 1:
self.invconv = InvConv2dLU(in_channel)
elif conv_lu == 2:
self.invconv = None
else:
raise ValueError("conv_lu in {0,1,2}, 0:InvConv2d, 1:InvConv2dLU, 2:none-just swap to update in coupling")
self.coupling = AffineCoupling(in_channel, hidden_channels, mask_swap=mask_swap)
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out, logdet = self.actnorm(input)
if self.invconv is not None:
out, det1 = self.invconv(out)
else:
det1 = 0
out, det2 = self.coupling(out)
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
@torch.jit.export
def reverse(self, output: torch.Tensor) -> torch.Tensor:
input = self.coupling.reverse(output)
if self.invconv is not None:
input = self.invconv.reverse(input)
input = self.actnorm.reverse(input)
return input
class FlowOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row):
super(FlowOnGraph, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.actnorm = ActNorm(num_channels=n_node, num_dims=3)
self.coupling = GraphAffineCoupling(n_node, in_dim, hidden_dim_dict, masked_row)
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, input = graph
out, logdet = self.actnorm(input)
det1 = 0
out, det2 = self.coupling((adj, out))
logdet = logdet + det1
if det2 is not None:
logdet = logdet + det2
return out, logdet
@torch.jit.export
def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, output = graph
input = self.coupling.reverse((adj, output))
input = self.actnorm.reverse(input)
return input
class Block(nn.Module):
def __init__(self, in_channel, n_flow, squeeze_fold, hidden_channels, conv_lu=2):
super(Block, self).__init__()
self.squeeze_fold = squeeze_fold
squeeze_dim = in_channel * self.squeeze_fold * self.squeeze_fold
self.flows = nn.ModuleList()
for i in range(n_flow):
if conv_lu in (0, 1):
self.flows.append(Flow(squeeze_dim, hidden_channels,
conv_lu=conv_lu, mask_swap=False))
else:
self.flows.append(Flow(squeeze_dim, hidden_channels,
conv_lu=2, mask_swap=bool(i % 2)))
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
out = self._squeeze(input)
logdet = 0
for flow in self.flows:
out, det = flow(out)
logdet = logdet + det
out = self._unsqueeze(out)
return out, logdet
@torch.jit.export
def reverse(self, output: torch.Tensor) -> torch.Tensor:
input = self._squeeze(output)
for flow in self.flows[::-1]:
input = flow.reverse(input)
unsqueezed = self._unsqueeze(input)
return unsqueezed
def _squeeze(self, x: torch.Tensor) -> torch.Tensor:
"""Trade spatial extent for channels. In forward direction, convert each
1x4x4 volume of input into a 4x1x1 volume of output.
Args:
x (torch.Tensor): Input to squeeze or unsqueeze.
reverse (bool): Reverse the operation, i.e., unsqueeze.
Returns:
x (torch.Tensor): Squeezed or unsqueezed tensor.
"""
assert len(x.shape) == 4
b_size, n_channel, height, width = x.shape
fold = self.squeeze_fold
squeezed = x.view(b_size, n_channel, height // fold, fold, width // fold, fold)
squeezed = squeezed.permute(0, 1, 3, 5, 2, 4).contiguous()
out = squeezed.view(b_size, n_channel * fold * fold, height // fold, width // fold)
return out
def _unsqueeze(self, x: torch.Tensor) -> torch.Tensor:
assert len(x.shape) == 4
b_size, n_channel, height, width = x.shape
fold = self.squeeze_fold
unsqueezed = x.view(b_size, n_channel // (fold * fold), fold, fold, height, width)
unsqueezed = unsqueezed.permute(0, 1, 4, 2, 5, 3).contiguous()
out = unsqueezed.view(b_size, n_channel // (fold * fold), height * fold, width * fold)
return out
class BlockOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size=1, mask_row_stride=1):
super(BlockOnGraph, self).__init__()
assert 0 < mask_row_size < n_node
self.flows = nn.ModuleList()
for i in range(n_flow):
start = i * mask_row_stride
masked_row =[r % n_node for r in range(start, start+mask_row_size)]
self.flows.append(FlowOnGraph(n_node, in_dim, hidden_dim_dict, masked_row=masked_row))
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, input = graph
out = input
logdet = 0
for flow in self.flows:
out, det = flow((adj, out))
logdet = logdet + det
return out, logdet
@torch.jit.export
def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, output = graph
input = output
for flow in self.flows[::-1]:
input = flow.reverse((adj, input))
return input
class Glow(nn.Module):
def __init__(self, in_channel, n_flow, n_block, squeeze_fold, hidden_channel, conv_lu=2):
super(Glow, self).__init__()
self.blocks = nn.ModuleList()
n_channel = in_channel
for i in range(n_block):
self.blocks.append(Block(n_channel, n_flow, squeeze_fold, hidden_channel, conv_lu=conv_lu))
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet = 0
out = input
for block in self.blocks:
out, det = block(out)
logdet = logdet + det
return out, logdet
@torch.jit.export
def reverse(self, z: torch.Tensor) -> torch.Tensor:
h = z
for i, block in enumerate(self.blocks[::-1]):
h = block.reverse(h)
return h
class GlowOnGraph(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, n_flow, n_block,
mask_row_size_list=(2,), mask_row_stride_list=(1,)):
super(GlowOnGraph, self).__init__()
assert len(mask_row_size_list) == n_block or len(mask_row_size_list) == 1
assert len(mask_row_stride_list) == n_block or len(mask_row_stride_list) == 1
if len(mask_row_size_list) == 1:
mask_row_size_list = mask_row_size_list * n_block
if len(mask_row_stride_list) == 1:
mask_row_stride_list = mask_row_stride_list * n_block
self.blocks = nn.ModuleList()
for i in range(n_block):
mask_row_size = mask_row_size_list[i]
mask_row_stride = mask_row_stride_list[i]
self.blocks.append(BlockOnGraph(n_node, in_dim, hidden_dim_dict, n_flow, mask_row_size, mask_row_stride))
def forward(self, adj: torch.Tensor, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
logdet = 0
out = x
for block in self.blocks:
out, det = block((adj, out))
logdet = logdet + det
return out, logdet
@torch.jit.export
def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, z = graph
input = z
for i, block in enumerate(self.blocks[::-1]):
input = block.reverse((adj, input))
return input
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/glow.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from typing import Tuple
import torch
import torch.nn as nn
from torch.nn.functional import logsigmoid
from moflow.model.basic import GraphConv
def sigmoid_inverse(x):
"""Calculates 1/sigmoid(x) in a more numerically stable way"""
return 1 + torch.exp(-x)
class AffineCoupling(nn.Module): # delete
def __init__(self, in_channel, hidden_channels, mask_swap=False): # filter_size=512, --> hidden_channels =(512, 512)
super(AffineCoupling, self).__init__()
self.mask_swap=mask_swap
# self.norms_in = nn.ModuleList()
last_h = in_channel // 2
vh = tuple(hidden_channels)
layers = []
for h in vh:
layers.append(nn.Conv2d(last_h, h, kernel_size=3, padding=1))
layers.append(nn.BatchNorm2d(h))
layers.append(nn.ReLU(inplace=True))
last_h = h
layers.append(nn.Conv2d(last_h, in_channel, kernel_size=3, padding=1))
self.layers = nn.Sequential(*layers)
def forward(self, input: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
in_a, in_b = input.chunk(2, 1) # (2,12,32,32) --> (2,6,32,32), (2,6,32,32)
if self.mask_swap:
in_a, in_b = in_b, in_a
s_logits, t = self._s_t_function(in_a)
s = torch.sigmoid(s_logits)
out_b = (in_b + t) * s
logdet = torch.sum(logsigmoid(s_logits).reshape(input.shape[0], -1), 1)
if self.mask_swap:
result = torch.cat([out_b, in_a], 1)
else:
result = torch.cat([in_a, out_b], 1)
return result, logdet
@torch.jit.export
def reverse(self, output: torch.Tensor) -> torch.Tensor:
out_a, out_b = output.chunk(2, 1)
if self.mask_swap:
out_a, out_b = out_b, out_a
s_logits, t = self._s_t_function(out_a)
s_inverse = sigmoid_inverse(s_logits)
in_b = out_b * s_inverse - t
if self.mask_swap:
result = torch.cat([in_b, out_a], 1)
else:
result = torch.cat([out_a, in_b], 1)
return result
def _s_t_function(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h = self.layers(x)
s_logits, t = h.chunk(2, 1)
return s_logits, t
class ConvCouplingBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, n_node: int) -> None:
super().__init__()
self.graph_conv = GraphConv(in_dim, out_dim, n_node)
self.bn = nn.BatchNorm2d(n_node)
self.relu = nn.ReLU(inplace=True)
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, nodes = graph
h = self.graph_conv(graph)
h = h.to(memory_format=torch.channels_last)
h = self.bn(h)
h = self.relu(h)
return adj, h
class LinCouplingBlock(nn.Module):
def __init__(self, in_dim: int, out_dim: int, n_node: int) -> None:
super().__init__()
self.lin = nn.Linear(in_dim, out_dim)
self.bn = nn.BatchNorm2d(n_node)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.lin(x)
h = h.to(memory_format=torch.channels_last)
h = self.bn(h)
h = self.relu(h)
return h
class GraphAffineCoupling(nn.Module):
def __init__(self, n_node, in_dim, hidden_dim_dict, masked_row):
super(GraphAffineCoupling, self).__init__()
self.n_node = n_node
self.in_dim = in_dim
self.hidden_dim_dict = hidden_dim_dict
self.masked_row = masked_row
self.hidden_dim_gnn = hidden_dim_dict['gnn']
self.hidden_dim_linear = hidden_dim_dict['linear']
conv_layers = []
last_dim = in_dim
for out_dim in self.hidden_dim_gnn:
conv_layers.append(ConvCouplingBlock(last_dim, out_dim, n_node))
last_dim = out_dim
self.net_conv = nn.ModuleList(conv_layers)
lin_layers = []
for out_dim in self.hidden_dim_linear:
lin_layers.append(LinCouplingBlock(last_dim, out_dim, n_node))
last_dim = out_dim
lin_layers.append(nn.Linear(last_dim, in_dim*2))
self.net_lin = nn.Sequential(*lin_layers)
mask = torch.ones(n_node, in_dim)
mask[masked_row, :] = 0 # masked_row are kept same, and used for _s_t for updating the left rows
self.register_buffer('mask', mask)
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
adj, input = graph
masked_x = self.mask * input
masked_x_sq = masked_x.unsqueeze(2)
s_logits, t = self._s_t_function((adj, masked_x_sq))
s = torch.sigmoid(s_logits)
out = masked_x + (1-self.mask) * (input + t) * s
logdet = torch.sum(logsigmoid(s_logits).reshape(input.shape[0], -1), 1)
return out, logdet
@torch.jit.export
def reverse(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, output = graph
masked_y = self.mask * output
masked_y_sq = masked_y.unsqueeze(2)
s_logits, t = self._s_t_function((adj, masked_y_sq))
s_inverse = sigmoid_inverse(s_logits)
input = masked_y + (1 - self.mask) * (output * s_inverse - t)
return input
def _s_t_function(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:
for l in self.net_conv:
graph = l(graph)
adj, h = graph
h = self.net_lin(h)
h = h.squeeze(2)
s_logits, t = h.chunk(2, dim=-1)
return s_logits, t
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/coupling.py |
DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import math
import torch
import torch.nn as nn
from moflow.config import Config
from moflow.model.glow import Glow, GlowOnGraph
def gaussian_nll(x, mean, ln_var):
"""Computes the negative log-likelihood of a Gaussian distribution.
Given two variable ``mean`` representing :math:`\\mu` and ``ln_var``
representing :math:`\\log(\\sigma^2)`, this function computes in
elementwise manner the negative log-likelihood of :math:`x` on a
Gaussian distribution :math:`N(\\mu, S)`,
.. math::
-\\log N(x; \\mu, \\sigma^2) =
\\log\\left(\\sqrt{(2\\pi)^D |S|}\\right) +
\\frac{1}{2}(x - \\mu)^\\top S^{-1}(x - \\mu),
where :math:`D` is a dimension of :math:`x` and :math:`S` is a diagonal
matrix where :math:`S_{ii} = \\sigma_i^2`.
Args:
x: Input variable.
mean: Mean of a Gaussian distribution, :math:`\\mu`.
ln_var: Logarithm of variance of a Gaussian distribution,
:math:`\\log(\\sigma^2)`.
Returns:
torch.Tensor:
Negative log-likelihood.
"""
x_prec = torch.exp(-ln_var)
x_diff = x - mean
x_power = (x_diff * x_diff) * x_prec * -0.5
loss = (ln_var + math.log(2 * (math.pi))) / 2 - x_power
return loss
class MoFlowLoss(nn.Module):
def __init__(self, config: Config) -> None:
super().__init__()
self.b_n_type = config.num_edge_features
self.a_n_node = config.max_num_nodes
self.a_n_type = config.num_node_features
self.b_size = self.a_n_node * self.a_n_node * self.b_n_type
self.a_size = self.a_n_node * self.a_n_type
if config.model_config.learn_dist:
self.ln_var = nn.Parameter(torch.zeros(1))
else:
self.register_buffer('ln_var', torch.zeros(1))
def forward(self, h, adj_h, sum_log_det_jacs_x, sum_log_det_jacs_adj):
z = [h, adj_h]
logdet = [sum_log_det_jacs_x, sum_log_det_jacs_adj]
device = z[0].device
dtype = z[0].dtype
z[0] = z[0].reshape(z[0].shape[0],-1)
z[1] = z[1].reshape(z[1].shape[0], -1)
logdet[0] = logdet[0] - self.a_size * math.log(2.)
logdet[1] = logdet[1] - self.b_size * math.log(2.)
ln_var_adj = self.ln_var * torch.ones([self.b_size], device=device, dtype=dtype)
ln_var_x = self.ln_var * torch.ones([self.a_size], device=device, dtype=dtype)
nll_adj = torch.mean(
torch.sum(gaussian_nll(z[1], torch.zeros(self.b_size, device=device, dtype=dtype), ln_var_adj), dim=1)
- logdet[1])
nll_adj = nll_adj / (self.b_size * math.log(2.)) # the negative log likelihood per dim with log base 2
nll_x = torch.mean(torch.sum(
gaussian_nll(z[0], torch.zeros(self.a_size, device=device, dtype=dtype), ln_var_x),
dim=1) - logdet[0])
nll_x = nll_x / (self.a_size * math.log(2.)) # the negative log likelihood per dim with log base 2
return nll_x, nll_adj
class MoFlow(nn.Module):
def __init__(self, config: Config):
super(MoFlow, self).__init__()
self.config = config
self.b_n_type = config.num_edge_features
self.a_n_node = config.max_num_nodes
self.a_n_type = config.num_node_features
self.b_size = self.a_n_node * self.a_n_node * self.b_n_type
self.a_size = self.a_n_node * self.a_n_type
self.noise_scale = config.model_config.noise_scale
self.bond_model = Glow(
in_channel=self.b_n_type,
n_flow=config.model_config.bond_config.n_flow,
n_block=config.model_config.bond_config.n_block,
squeeze_fold=config.model_config.bond_config.n_squeeze,
hidden_channel=config.model_config.bond_config.hidden_ch,
conv_lu=config.model_config.bond_config.conv_lu
)
self.atom_model = GlowOnGraph(
n_node=self.a_n_node,
in_dim=self.a_n_type,
hidden_dim_dict={
'gnn': config.model_config.atom_config.hidden_gnn,
'linear': config.model_config.atom_config.hidden_lin
},
n_flow=config.model_config.atom_config.n_flow,
n_block=config.model_config.atom_config.n_block,
mask_row_size_list=config.model_config.atom_config.mask_row_size_list,
mask_row_stride_list=config.model_config.atom_config.mask_row_stride_list,
)
self._cuda_graphs = dict()
self.atom_stream = None
self.bond_stream = None
@torch.jit.ignore
def forward(self, adj: torch.Tensor, x: torch.Tensor, with_cuda_graph: bool = False):
"""
:param adj: (256,4,9,9)
:param x: (256,9,5)
:return:
"""
if with_cuda_graph and self.atom_stream is None:
self.atom_stream = torch.cuda.Stream()
self.bond_stream = torch.cuda.Stream()
h = x
# add uniform noise to node feature matrices
if self.training:
if self.noise_scale == 0:
h = h/2.0 - 0.5 + torch.rand_like(x) * 0.4
else:
h = h + torch.rand_like(x) * self.noise_scale
if with_cuda_graph:
if self.atom_model not in self._cuda_graphs:
h, sum_log_det_jacs_x = self._forward_graph(self.atom_model, adj, h)
else:
self.atom_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.atom_stream):
h, sum_log_det_jacs_x = self._forward_graph(self.atom_model, adj, h)
else:
h, sum_log_det_jacs_x = self.atom_model(adj, h)
# add uniform noise to adjacency tensors
if self.training:
if self.noise_scale == 0:
adj_bond = adj/2.0 - 0.5 + torch.rand_like(adj) * 0.4
else:
adj_bond = adj + torch.rand_like(adj) * self.noise_scale
else:
adj_bond = adj
if with_cuda_graph:
if self.bond_model not in self._cuda_graphs:
adj_h, sum_log_det_jacs_adj = self._forward_graph(self.bond_model, adj_bond)
else:
self.bond_stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.bond_stream):
adj_h, sum_log_det_jacs_adj = self._forward_graph(self.bond_model, adj_bond)
else:
adj_h, sum_log_det_jacs_adj = self.bond_model(adj_bond)
if with_cuda_graph:
torch.cuda.current_stream().wait_stream(self.atom_stream)
torch.cuda.current_stream().wait_stream(self.bond_stream)
return h, adj_h, sum_log_det_jacs_x, sum_log_det_jacs_adj
@torch.jit.export
def reverse(self, z):
"""
Returns a molecule, given its latent vector.
:param z: latent vector. Shape: [B, N*N*M + N*T]
B = Batch size, N = number of atoms, M = number of bond types,
T = number of atom types (Carbon, Oxygen etc.)
:return: adjacency matrix and feature matrix of a molecule
"""
batch_size = z.shape[0]
z_x = z[:, :self.a_size]
z_adj = z[:, self.a_size:]
h_adj = z_adj.reshape(batch_size, self.b_n_type, self.a_n_node, self.a_n_node)
h_adj = h_adj.to(memory_format=torch.channels_last)
h_adj = self.bond_model.reverse(h_adj)
if self.noise_scale == 0:
h_adj = (h_adj + 0.5) * 2
adj = h_adj
adj = adj + adj.permute(0, 1, 3, 2)
adj = adj / 2
adj = adj.softmax(dim=1)
max_bond = adj.max(dim=1).values.reshape(batch_size, -1, self.a_n_node, self.a_n_node)
adj = torch.floor(adj / max_bond)
adj = adj.to(memory_format=torch.channels_last)
h_x = z_x.reshape(batch_size, self.a_n_node, self.a_n_type)
h_x = self.atom_model.reverse((adj, h_x))
if self.noise_scale == 0:
h_x = (h_x + 0.5) * 2
return adj, h_x
@torch.jit.ignore
def _forward_graph(self, model, *args):
if model not in self._cuda_graphs:
if torch.distributed.is_initialized():
torch.distributed.barrier()
torch.cuda.synchronize()
self._cuda_graphs[model] = torch.cuda.make_graphed_callables(
model,
args,
)
torch.cuda.synchronize()
if torch.distributed.is_initialized():
torch.distributed.barrier()
return self._cuda_graphs[model](*args)
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/model.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import math
from typing import Tuple
import numpy as np
from scipy import linalg as la
import torch
from torch import nn
from torch.nn import functional as F
from moflow.runtime.distributed_utils import get_world_size, reduce_tensor
class ActNorm(nn.Module):
def __init__(self, num_channels, num_dims, channels_dim=1):
super().__init__()
self.num_channels = num_channels
self.num_dims = num_dims
self.channels_dim = channels_dim
self.shape = [1] * num_dims
self.shape[channels_dim] = num_channels
self.loc = nn.Parameter(torch.zeros(*self.shape))
self.scale = nn.Parameter(torch.ones(*self.shape))
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
self.register_buffer('num_elements', torch.tensor(0, dtype=torch.uint8))
@torch.jit.ignore
def initialize(self, input):
if self.initialized.item() == 1:
return
dims = list(input.shape[1:])
del dims[self.channels_dim -1]
num_elems = math.prod(dims)
permutation = [self.channels_dim] + [i for i in range(self.num_dims) if i != self.channels_dim]
with torch.no_grad():
flatten = input.permute(*permutation).contiguous().view(self.num_channels, -1)
mean = flatten.mean(1).view(self.shape)
std = flatten.std(1).view(self.shape)
num_gpus = get_world_size()
mean = reduce_tensor(mean, num_gpus)
std = reduce_tensor(std, num_gpus)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
self.initialized.fill_(1)
self.num_elements.fill_(num_elems)
def forward(self, input):
log_abs = torch.log(torch.abs(self.scale))
logdet = self.num_elements * torch.sum(log_abs)
return self.scale * (input + self.loc), logdet
@torch.jit.export
def reverse(self, output):
return output / self.scale - self.loc
class InvConv2d(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = torch.randn(in_channel, in_channel)
q, _ = torch.qr(weight)
weight = q.unsqueeze(2).unsqueeze(3)
self.weight = nn.Parameter(weight)
def forward(self, input):
_, _, height, width = input.shape
out = F.conv2d(input, self.weight)
logdet = (
height * width * torch.slogdet(self.weight.squeeze().double())[1].float()
)
return out, logdet
def reverse(self, output):
return F.conv2d(
output, self.weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
)
class InvConv2dLU(nn.Module):
def __init__(self, in_channel):
super().__init__()
weight = np.random.randn(in_channel, in_channel)
q, _ = la.qr(weight)
w_p, w_l, w_u = la.lu(q.astype(np.float32))
w_s = np.diag(w_u)
w_u = np.triu(w_u, 1)
u_mask = np.triu(np.ones_like(w_u), 1)
l_mask = u_mask.T
w_p = torch.from_numpy(w_p)
w_l = torch.from_numpy(w_l).contiguous()
w_s = torch.from_numpy(w_s)
w_u = torch.from_numpy(w_u)
self.register_buffer('w_p', w_p)
self.register_buffer('u_mask', torch.from_numpy(u_mask))
self.register_buffer('l_mask', torch.from_numpy(l_mask))
self.register_buffer('s_sign', torch.sign(w_s))
self.register_buffer('l_eye', torch.eye(l_mask.shape[0]))
self.w_l = nn.Parameter(w_l)
self.w_s = nn.Parameter(torch.log(torch.abs(w_s)))
self.w_u = nn.Parameter(w_u)
def forward(self, input):
_, _, height, width = input.shape
weight = self.calc_weight()
out = F.conv2d(input, weight)
logdet = height * width * torch.sum(self.w_s)
return out, logdet
def calc_weight(self):
weight = (
self.w_p
@ (self.w_l * self.l_mask + self.l_eye)
@ ((self.w_u * self.u_mask) + torch.diag(self.s_sign * torch.exp(self.w_s)))
)
return weight.unsqueeze(2).unsqueeze(3)
def reverse(self, output):
weight = self.calc_weight()
dtype = weight.dtype
weight = weight.float()
weight_inv = weight.squeeze().inverse().unsqueeze(2).unsqueeze(3)
weight_inv = weight_inv.to(dtype=dtype)
return F.conv2d(output, weight_inv)
class GraphConv(nn.Module):
def __init__(self, in_channels, out_channels, num_atoms, num_edge_type=4):
super(GraphConv, self).__init__()
self.graph_linear_self = nn.Linear(in_channels, out_channels)
self.graph_linear_edge = nn.Linear(in_channels, out_channels * num_edge_type)
self.num_edge_type = num_edge_type
self.in_ch = in_channels
self.out_ch = out_channels
self.num_atoms = num_atoms
def forward(self, graph: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
adj, nodes = graph
hs = self.graph_linear_self(nodes)
m = self.graph_linear_edge(nodes)
m = m.view(-1, self.num_atoms, self.out_ch, self.num_edge_type)
hr = torch.einsum('bemn,bnce->bmc', adj, m)
hr = hr.unsqueeze(2)
return hs + hr
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/basic.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import Iterable
import torch
def initialize_module(module: torch.nn.Module, inputs: Iterable[torch.Tensor]) -> None:
"""Use given sample input to initialize the module.
Module must implement method called `initialize` which takes list of input tensors
"""
assert hasattr(module, 'initialize')
assert len(inputs) == 1, f'{len(inputs)} inputs'
assert module.initialized.item() == 0, 'initialized'
module.initialize(*inputs)
assert module.initialized.item() == 1, 'not initialized'
def initialize(model: torch.nn.Module, single_batch: Iterable[torch.Tensor]) -> None:
"""Initialize all sub-modules in the model given the sample input batch."""
hooks = []
for name, module in model.named_modules():
if hasattr(module, 'initialize'):
logging.info(f'marking {name} for initialization')
hook = module.register_forward_pre_hook(initialize_module)
hooks.append(hook)
_ = model(*single_batch)
logging.info('all modules initialized, removing hooks')
for hook in hooks:
hook.remove()
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/model/utils.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from typing import Tuple
import numpy as np
from rdkit import Chem
from moflow.config import BOND_TO_CODE, DUMMY_CODE
class MolEncoder:
"""Encodes atoms and adjecency matrix.
Args:
out_size (int): It specifies the size of array returned by
`get_input_features`.
If the number of atoms in the molecule is less than this value,
the returned arrays is padded to have fixed size.
"""
def __init__(self, out_size: int):
super(MolEncoder, self).__init__()
self.out_size = out_size
def encode_mol(self, mol: Chem.Mol) -> Tuple[np.ndarray, np.ndarray]:
"""get input features
Args:
mol (Mol):
Returns:
"""
mol = self._standardize_mol(mol)
self._check_num_atoms(mol)
atom_array = self.construct_atomic_number_array(mol)
adj_array = self.construct_discrete_edge_matrix(mol)
return atom_array, adj_array
def _standardize_mol(self, mol: Chem.Mol) -> Chem.Mol:
canonical_smiles = Chem.MolToSmiles(mol, isomericSmiles=False,
canonical=True)
mol = Chem.MolFromSmiles(canonical_smiles)
Chem.Kekulize(mol)
return mol
def _check_num_atoms(self, mol: Chem.Mol) -> None:
"""Check number of atoms in `mol` does not exceed `out_size`"""
num_atoms = mol.GetNumAtoms()
if num_atoms > self.out_size:
raise EncodingError(f'Number of atoms in mol {num_atoms} exceeds num_max_atoms {self.out_size}')
def construct_atomic_number_array(self, mol: Chem.Mol) -> np.ndarray:
"""Returns atomic numbers of atoms consisting a molecule.
Args:
mol (rdkit.Chem.Mol): Input molecule.
Returns:
numpy.ndarray: an array consisting of atomic numbers
of atoms in the molecule.
"""
atom_list = [a.GetAtomicNum() for a in mol.GetAtoms()]
n_atom = len(atom_list)
if self.out_size < n_atom:
raise EncodingError(f'out_size {self.out_size} is smaller than number of atoms in mol {n_atom}')
atom_array = np.full(self.out_size, DUMMY_CODE, dtype=np.uint8)
atom_array[:n_atom] = atom_list
return atom_array
def construct_discrete_edge_matrix(self, mol: Chem.Mol) -> np.ndarray:
"""Returns the edge-type dependent adjacency matrix of the given molecule.
Args:
mol (rdkit.Chem.Mol): Input molecule.
Returns:
adj_array (numpy.ndarray): The adjacent matrix of the input molecule.
It is symmetrical 2-dimensional array with shape (out_size, out_size),
filled with integers representing bond types. It two atoms are not
conncted, DUMMY_CODE is used instead.
"""
if mol is None:
raise EncodingError('mol is None')
n_atom = mol.GetNumAtoms()
if self.out_size < n_atom:
raise EncodingError(f'out_size {self.out_size} is smaller than number of atoms in mol {n_atom}')
adjs = np.full((self.out_size, self.out_size), DUMMY_CODE, dtype=np.uint8)
for bond in mol.GetBonds():
bond_type = bond.GetBondType()
# we need to use code here - bond types are rdkit objects
code = BOND_TO_CODE[bond_type]
i = bond.GetBeginAtomIdx()
j = bond.GetEndAtomIdx()
adjs[[i, j], [j, i]] = code
return adjs
class EncodingError(Exception):
pass
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/data/encoding.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import logging
from typing import Any, Callable, Iterable, Optional, Tuple
import numpy as np
from torch.utils.data import Dataset
class NumpyTupleDataset(Dataset):
"""Dataset of a tuple of datasets.
It combines multiple datasets into one dataset. Each example is represented
by a tuple whose ``i``-th item corresponds to the i-th dataset.
And each ``i``-th dataset is expected to be an instance of numpy.ndarray.
Args:
datasets: Underlying datasets. The ``i``-th one is used for the
``i``-th item of each example. All datasets must have the same
length.
transform: An optional function applied to an item bofre returning
"""
def __init__(self, datasets: Iterable[np.ndarray], transform: Optional[Callable] = None) -> None:
if not datasets:
raise ValueError('no datasets are given')
length = len(datasets[0])
for i, dataset in enumerate(datasets):
if len(dataset) != length:
raise ValueError(
'dataset of the index {} has a wrong length'.format(i))
self._datasets = datasets
self._length = length
self.transform = transform
def __len__(self) -> int:
return self._length
def __getitem__(self, index: int) -> Tuple[Any]:
item = [dataset[index] for dataset in self._datasets]
if self.transform:
item = self.transform(item)
return item
def get_datasets(self) -> Tuple[np.ndarray]:
return self._datasets
def save(self, filepath: str) -> None:
"""save the dataset to filepath in npz format
Args:
filepath (str): filepath to save dataset. It is recommended to end
with '.npz' extension.
"""
np.savez(filepath, *self._datasets)
logging.info('Save {} done.'.format(filepath))
@classmethod
def load(cls, filepath: str, transform: Optional[Callable] = None):
logging.info('Loading file {}'.format(filepath))
if not os.path.exists(filepath):
raise ValueError('Invalid filepath {} for dataset'.format(filepath))
load_data = np.load(filepath)
result = []
i = 0
while True:
key = 'arr_{}'.format(i)
if key in load_data.keys():
result.append(load_data[key])
i += 1
else:
break
return cls(result, transform)
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/data/data_loader.py |
DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/data/__init__.py |
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import json
import logging
import numpy as np
import os
from typing import Dict, Tuple
from moflow.config import CODE_TO_BOND, DUMMY_CODE, Config
def _onehot(data: np.ndarray, codes_dict: Dict[int, int], dtype=np.float32) -> np.ndarray:
shape = [len(codes_dict), *data.shape]
encoded = np.zeros(shape, dtype=dtype)
for obj_key, code in codes_dict.items():
encoded[code, data == obj_key] = 1
return encoded
def encode_nodes(atomic_nums: np.ndarray, config: Config) -> np.ndarray:
padded_data = np.full(config.max_num_nodes, DUMMY_CODE, dtype=np.uint8)
padded_data[:len(atomic_nums)] = atomic_nums
encoded = _onehot(padded_data, config.dataset_config.atomic_to_code).T
return encoded
def encode_edges(adj: np.ndarray, config: Config) -> np.ndarray:
padded_data = np.full((config.max_num_nodes, config.max_num_nodes), DUMMY_CODE, dtype=np.uint8)
n, m = adj.shape
assert n == m, 'adjecency matrix should be square'
padded_data[:n, :n] = adj
# we already store codes in the file - bond types are rdkit objects
encoded = _onehot(padded_data, {k:k for k in CODE_TO_BOND})
return encoded
def transform_fn(data: Tuple[np.ndarray], config: Config) -> Tuple[np.ndarray]:
node, adj, *labels = data
node = encode_nodes(node, config)
adj = encode_edges(adj, config)
return (node, adj, *labels)
def get_val_ids(config: Config, data_dir: str):
file_path = os.path.join(data_dir, config.dataset_config.valid_idx_file)
logging.info('loading train/valid split information from: {}'.format(file_path))
with open(file_path) as json_data:
data = json.load(json_data)
val_ids = [int(idx)-1 for idx in data]
return val_ids
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/data/transform.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from logging import getLogger
import traceback
from typing import List
import numpy as np
import pandas as pd
from rdkit import Chem
from tqdm import tqdm
from moflow.data.encoding import MolEncoder, EncodingError
from moflow.data.data_loader import NumpyTupleDataset
class DataFrameParser:
"""
This DataFrameParser parses pandas dataframe containing SMILES and, optionally, some additional features.
Args:
encoder (MolEncoder): encoder instance
labels (list): labels column that should be loaded
smiles_col (str): smiles column
"""
def __init__(self, encoder: MolEncoder,
labels: List[str],
smiles_col: str = 'smiles'):
super(DataFrameParser, self).__init__()
self.labels = labels
self.smiles_col = smiles_col
self.logger = getLogger(__name__)
self.encoder = encoder
def parse(self, df: pd.DataFrame) -> NumpyTupleDataset:
"""Parse DataFrame using `encoder` and prepare a dataset instance
Labels are extracted from `labels` columns and input features are
extracted from smiles information in `smiles` column.
"""
all_nodes = []
all_edges = []
total_count = df.shape[0]
fail_count = 0
success_count = 0
for smiles in tqdm(df[self.smiles_col], total=df.shape[0]):
try:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
fail_count += 1
continue
# Note that smiles expression is not unique.
# we obtain canonical smiles
nodes, edges = self.encoder.encode_mol(mol)
except EncodingError as e:
fail_count += 1
continue
except Exception as e:
self.logger.warning('parse(), type: {}, {}'
.format(type(e).__name__, e.args))
self.logger.info(traceback.format_exc())
fail_count += 1
continue
all_nodes.append(nodes)
all_edges.append(edges)
success_count += 1
result = [np.array(all_nodes), np.array(all_edges), *(df[label_col].values for label_col in self.labels)]
self.logger.info('Preprocess finished. FAIL {}, SUCCESS {}, TOTAL {}'
.format(fail_count, success_count, total_count))
dataset = NumpyTupleDataset(result)
return dataset
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/moflow/data/data_frame_parser.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2020 Chengxi Zang
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import pandas as pd
import argparse
import time
from moflow.config import CONFIGS
from moflow.data.data_frame_parser import DataFrameParser
from moflow.data.encoding import MolEncoder
def parse_args():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--data_name', type=str,
choices=list(CONFIGS),
help='dataset to be downloaded')
parser.add_argument('--data_dir', type=str, default='/data')
args = parser.parse_args()
return args
def main(args):
start_time = time.time()
args = parse_args()
print('args', vars(args))
assert args.data_name in CONFIGS
dataset_config = CONFIGS[args.data_name].dataset_config
preprocessor = MolEncoder(out_size=dataset_config.max_num_atoms)
input_path = os.path.join(args.data_dir, dataset_config.csv_file)
output_path = os.path.join(args.data_dir, dataset_config.dataset_file)
print(f'Preprocessing {args.data_name} data:')
df = pd.read_csv(input_path, index_col=0)
parser = DataFrameParser(preprocessor, labels=dataset_config.labels, smiles_col=dataset_config.smiles_col)
dataset = parser.parse(df)
dataset.save(output_path)
print('Total time:', time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time)))
if __name__ == '__main__':
args = parse_args()
main(args)
| DeepLearningExamples-master | PyTorch/DrugDiscovery/MoFlow/scripts/data_preprocess.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
from pathlib import Path
import torch
import tqdm
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from torch.utils.data import DataLoader
from fastpitch.data_function import TTSCollate, TTSDataset
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('--wav-text-filelists', required=True, nargs='+',
type=str, help='Files with audio paths and text')
parser.add_argument('--extract-mels', action='store_true',
help='Calculate spectrograms from .wav files')
parser.add_argument('--extract-pitch', action='store_true',
help='Extract pitch')
parser.add_argument('--log-file', type=str, default='preproc_log.json',
help='Filename for logging')
parser.add_argument('--n-speakers', type=int, default=1)
# Mel extraction
parser.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
parser.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
parser.add_argument('--win-length', default=1024, type=int,
help='Window length')
parser.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
parser.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
parser.add_argument('--n-mel-channels', type=int, default=80)
# Pitch extraction
parser.add_argument('--f0-method', default='pyin', type=str,
choices=('pyin',), help='F0 estimation method')
# Performance
parser.add_argument('-b', '--batch-size', default=1, type=int)
parser.add_argument('--n-workers', type=int, default=16)
return parser
def main():
parser = argparse.ArgumentParser(description='TTS Data Pre-processing')
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
DLLogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT,
Path(args.dataset_path, args.log_file)),
StdOutBackend(Verbosity.VERBOSE)])
for k, v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k: v})
DLLogger.flush()
if args.extract_mels:
Path(args.dataset_path, 'mels').mkdir(parents=False, exist_ok=True)
if args.extract_pitch:
Path(args.dataset_path, 'pitch').mkdir(parents=False, exist_ok=True)
for filelist in args.wav_text_filelists:
print(f'Processing {filelist}...')
dataset = TTSDataset(
args.dataset_path,
filelist,
text_cleaners=['english_cleaners_v2'],
n_mel_channels=args.n_mel_channels,
p_arpabet=0.0,
n_speakers=args.n_speakers,
load_mel_from_disk=False,
load_pitch_from_disk=False,
pitch_mean=None,
pitch_std=None,
max_wav_value=args.max_wav_value,
sampling_rate=args.sampling_rate,
filter_length=args.filter_length,
hop_length=args.hop_length,
win_length=args.win_length,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
betabinomial_online_dir=None,
pitch_online_dir=None,
pitch_online_method=args.f0_method if args.extract_pitch else None)
data_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=None,
num_workers=args.n_workers,
collate_fn=TTSCollate(),
pin_memory=False,
drop_last=False)
all_filenames = set()
for i, batch in enumerate(tqdm.tqdm(data_loader)):
_, input_lens, mels, mel_lens, _, pitch, _, _, attn_prior, fpaths = batch
# Ensure filenames are unique
for p in fpaths:
fname = Path(p).name
if fname in all_filenames:
raise ValueError(f'Filename is not unique: {fname}')
all_filenames.add(fname)
if args.extract_mels:
for j, mel in enumerate(mels):
fname = Path(fpaths[j]).with_suffix('.pt').name
fpath = Path(args.dataset_path, 'mels', fname)
torch.save(mel[:, :mel_lens[j]], fpath)
if args.extract_pitch:
for j, p in enumerate(pitch):
fname = Path(fpaths[j]).with_suffix('.pt').name
fpath = Path(args.dataset_path, 'pitch', fname)
torch.save(p[:mel_lens[j]], fpath)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/prepare_dataset.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import sys
import torch
from common.text.symbols import get_symbols, get_pad_idx
from common.utils import DefaultAttrDict, AttrDict
from fastpitch.model import FastPitch
from fastpitch.model_jit import FastPitchJIT
from hifigan.models import Generator
try:
from waveglow.model import WaveGlow
from waveglow import model as glow
from waveglow.denoiser import Denoiser
sys.modules['glow'] = glow
except ImportError:
print("WARNING: Couldn't import WaveGlow")
def parse_model_args(model_name, parser, add_help=False):
if model_name == 'FastPitch':
from fastpitch import arg_parser
return arg_parser.parse_fastpitch_args(parser, add_help)
elif model_name == 'HiFi-GAN':
from hifigan import arg_parser
return arg_parser.parse_hifigan_args(parser, add_help)
elif model_name == 'WaveGlow':
from waveglow.arg_parser import parse_waveglow_args
return parse_waveglow_args(parser, add_help)
else:
raise NotImplementedError(model_name)
def get_model(model_name, model_config, device, bn_uniform_init=False,
forward_is_infer=False, jitable=False):
"""Chooses a model based on name"""
del bn_uniform_init # unused (old name: uniform_initialize_bn_weight)
if model_name == 'FastPitch':
if jitable:
model = FastPitchJIT(**model_config)
else:
model = FastPitch(**model_config)
elif model_name == 'HiFi-GAN':
model = Generator(model_config)
elif model_name == 'WaveGlow':
model = WaveGlow(**model_config)
else:
raise NotImplementedError(model_name)
if forward_is_infer and hasattr(model, 'infer'):
model.forward = model.infer
return model.to(device)
def get_model_config(model_name, args, ckpt_config=None):
""" Get config needed to instantiate the model """
# Mark keys missing in `args` with an object (None is ambiguous)
_missing = object()
args = DefaultAttrDict(lambda: _missing, vars(args))
# `ckpt_config` is loaded from the checkpoint and has the priority
# `model_config` is based on args and fills empty slots in `ckpt_config`
if model_name == 'FastPitch':
model_config = dict(
# io
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=(len(get_symbols(args.symbol_set))
if args.symbol_set is not _missing else _missing),
padding_idx=(get_pad_idx(args.symbol_set)
if args.symbol_set is not _missing else _missing),
symbols_embedding_dim=args.symbols_embedding_dim,
# input FFT
in_fft_n_layers=args.in_fft_n_layers,
in_fft_n_heads=args.in_fft_n_heads,
in_fft_d_head=args.in_fft_d_head,
in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,
in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,
in_fft_output_size=args.in_fft_output_size,
p_in_fft_dropout=args.p_in_fft_dropout,
p_in_fft_dropatt=args.p_in_fft_dropatt,
p_in_fft_dropemb=args.p_in_fft_dropemb,
# output FFT
out_fft_n_layers=args.out_fft_n_layers,
out_fft_n_heads=args.out_fft_n_heads,
out_fft_d_head=args.out_fft_d_head,
out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,
out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,
out_fft_output_size=args.out_fft_output_size,
p_out_fft_dropout=args.p_out_fft_dropout,
p_out_fft_dropatt=args.p_out_fft_dropatt,
p_out_fft_dropemb=args.p_out_fft_dropemb,
# duration predictor
dur_predictor_kernel_size=args.dur_predictor_kernel_size,
dur_predictor_filter_size=args.dur_predictor_filter_size,
p_dur_predictor_dropout=args.p_dur_predictor_dropout,
dur_predictor_n_layers=args.dur_predictor_n_layers,
# pitch predictor
pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,
pitch_predictor_filter_size=args.pitch_predictor_filter_size,
p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,
pitch_predictor_n_layers=args.pitch_predictor_n_layers,
# pitch conditioning
pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,
# speakers parameters
n_speakers=args.n_speakers,
speaker_emb_weight=args.speaker_emb_weight,
# energy predictor
energy_predictor_kernel_size=args.energy_predictor_kernel_size,
energy_predictor_filter_size=args.energy_predictor_filter_size,
p_energy_predictor_dropout=args.p_energy_predictor_dropout,
energy_predictor_n_layers=args.energy_predictor_n_layers,
# energy conditioning
energy_conditioning=args.energy_conditioning,
energy_embedding_kernel_size=args.energy_embedding_kernel_size,
)
elif model_name == 'HiFi-GAN':
if args.hifigan_config is not None:
assert ckpt_config is None, (
"Supplied --hifigan-config, but the checkpoint has a config. "
"Drop the flag or remove the config from the checkpoint file.")
print(f'HiFi-GAN: Reading model config from {args.hifigan_config}')
with open(args.hifigan_config) as f:
args = AttrDict(json.load(f))
model_config = dict(
# generator architecture
upsample_rates=args.upsample_rates,
upsample_kernel_sizes=args.upsample_kernel_sizes,
upsample_initial_channel=args.upsample_initial_channel,
resblock=args.resblock,
resblock_kernel_sizes=args.resblock_kernel_sizes,
resblock_dilation_sizes=args.resblock_dilation_sizes,
)
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
else:
raise NotImplementedError(model_name)
# Start with ckpt_config, and fill missing keys from model_config
final_config = {} if ckpt_config is None else ckpt_config.copy()
missing_keys = set(model_config.keys()) - set(final_config.keys())
final_config.update({k: model_config[k] for k in missing_keys})
# If there was a ckpt_config, it should have had all args
if ckpt_config is not None and len(missing_keys) > 0:
print(f'WARNING: Keys {missing_keys} missing from the loaded config; '
'using args instead.')
assert all(v is not _missing for v in final_config.values())
return final_config
def get_model_train_setup(model_name, args):
""" Dump train setup for documentation purposes """
if model_name == 'FastPitch':
return dict()
elif model_name == 'HiFi-GAN':
return dict(
# audio
segment_size=args.segment_size,
filter_length=args.filter_length,
num_mels=args.num_mels,
hop_length=args.hop_length,
win_length=args.win_length,
sampling_rate=args.sampling_rate,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
mel_fmax_loss=args.mel_fmax_loss,
max_wav_value=args.max_wav_value,
# other
seed=args.seed,
# optimization
base_lr=args.learning_rate,
lr_decay=args.lr_decay,
epochs_all=args.epochs,
)
elif model_name == 'WaveGlow':
return dict()
else:
raise NotImplementedError(model_name)
def load_model_from_ckpt(checkpoint_data, model, key='state_dict'):
if key is None:
return checkpoint_data['model'], None
sd = checkpoint_data[key]
sd = {re.sub('^module\.', '', k): v for k, v in sd.items()}
status = model.load_state_dict(sd, strict=False)
return model, status
def load_and_setup_model(model_name, parser, checkpoint, amp, device,
unk_args=[], forward_is_infer=False, jitable=False):
if checkpoint is not None:
ckpt_data = torch.load(checkpoint)
print(f'{model_name}: Loading {checkpoint}...')
ckpt_config = ckpt_data.get('config')
if ckpt_config is None:
print(f'{model_name}: No model config in the checkpoint; using args.')
else:
print(f'{model_name}: Found model config saved in the checkpoint.')
else:
ckpt_config = None
ckpt_data = {}
model_parser = parse_model_args(model_name, parser, add_help=False)
model_args, model_unk_args = model_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(model_unk_args))
model_config = get_model_config(model_name, model_args, ckpt_config)
model = get_model(model_name, model_config, device,
forward_is_infer=forward_is_infer,
jitable=jitable)
if checkpoint is not None:
key = 'generator' if model_name == 'HiFi-GAN' else 'state_dict'
model, status = load_model_from_ckpt(ckpt_data, model, key)
missing = [] if status is None else status.missing_keys
unexpected = [] if status is None else status.unexpected_keys
# Attention is only used during training, we won't miss it
if model_name == 'FastPitch':
missing = [k for k in missing if not k.startswith('attention.')]
unexpected = [k for k in unexpected if not k.startswith('attention.')]
assert len(missing) == 0 and len(unexpected) == 0, (
f'Mismatched keys when loading parameters. Missing: {missing}, '
f'unexpected: {unexpected}.')
if model_name == "WaveGlow":
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
model = model.remove_weightnorm(model)
elif model_name == 'HiFi-GAN':
assert model_args.hifigan_config is not None or ckpt_config is not None, (
'Use a HiFi-GAN checkpoint from NVIDIA DeepLearningExamples with '
'saved config or supply --hifigan-config <json_file>.')
model.remove_weight_norm()
if amp:
model.half()
model.eval()
return model.to(device), model_config, ckpt_data.get('train_setup', {})
def load_and_setup_ts_model(model_name, checkpoint, amp, device=None):
print(f'{model_name}: Loading TorchScript checkpoint {checkpoint}...')
model = torch.jit.load(checkpoint).eval()
if device is not None:
model = model.to(device)
if amp:
model.half()
elif next(model.parameters()).dtype == torch.float16:
raise ValueError('Trying to load FP32 model,'
'TS checkpoint is in FP16 precision.')
return model
def convert_ts_to_trt(model_name, ts_model, parser, amp, unk_args=[]):
trt_parser = _parse_trt_compilation_args(model_name, parser, add_help=False)
trt_args, trt_unk_args = trt_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(trt_unk_args))
if model_name == 'HiFi-GAN':
return _convert_ts_to_trt_hifigan(
ts_model, amp, trt_args.trt_min_opt_max_batch,
trt_args.trt_min_opt_max_hifigan_length)
else:
raise NotImplementedError
def _parse_trt_compilation_args(model_name, parent, add_help=False):
"""
Parse model and inference specific commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
trt = parser.add_argument_group(f'{model_name} Torch-TensorRT compilation parameters')
trt.add_argument('--trt-min-opt-max-batch', nargs=3, type=int,
default=(1, 8, 16),
help='Torch-TensorRT min, optimal and max batch size')
if model_name == 'HiFi-GAN':
trt.add_argument('--trt-min-opt-max-hifigan-length', nargs=3, type=int,
default=(100, 800, 1200),
help='Torch-TensorRT min, optimal and max audio length (in frames)')
return parser
def _convert_ts_to_trt_hifigan(ts_model, amp, trt_min_opt_max_batch,
trt_min_opt_max_hifigan_length, num_mels=80):
import torch_tensorrt
trt_dtype = torch.half if amp else torch.float
print(f'Torch TensorRT: compiling HiFi-GAN for dtype {trt_dtype}.')
min_shp, opt_shp, max_shp = zip(trt_min_opt_max_batch,
(num_mels,) * 3,
trt_min_opt_max_hifigan_length)
compile_settings = {
"inputs": [torch_tensorrt.Input(
min_shape=min_shp,
opt_shape=opt_shp,
max_shape=max_shp,
dtype=trt_dtype,
)],
"enabled_precisions": {trt_dtype},
"require_full_compilation": True,
}
trt_model = torch_tensorrt.compile(ts_model, **compile_settings)
print('Torch TensorRT: compilation successful.')
return trt_model
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/models.py |
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import torch
import models
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('model_name', type=str,
choices=['HiFi-GAN', 'FastPitch'],
help='Name of the converted model')
parser.add_argument('input_ckpt', type=str,
help='Path to the input checkpoint')
parser.add_argument('output_ckpt', default=None,
help='Path to save the output checkpoint to')
parser.add_argument('--cuda', action='store_true',
help='Move model weights to GPU before export')
parser.add_argument('--amp', action='store_true',
help='Convert model to FP16 prior to saving')
parser.add_argument('--load-from', type=str, default='pyt',
choices=['pyt', 'ts'],
help='Source checkpoint format')
parser.add_argument('--convert-to', type=str, default='ts',
choices=['ts', 'ttrt'],
help='Output checkpoint format')
return parser
def main():
"""
Exports PyT or TorchScript checkpoint to TorchScript or Torch-TensorRT.
"""
parser = argparse.ArgumentParser(description='PyTorch model export',
allow_abbrev=False)
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
device = torch.device('cuda' if args.cuda else 'cpu')
assert args.load_from != args.convert_to, \
'Load and convert formats must be different'
print(f'Converting {args.model_name} from "{args.load_from}"'
f' to "{args.convert_to}" ({device}).')
if args.load_from == 'ts':
ts_model, _ = models.load_and_setup_ts_model(args.model_name,
args.input_ckpt, args.amp,
device)
else:
assert args.load_from == 'pyt'
pyt_model, _ = models.load_pyt_model_for_infer(
args.model_name, parser, args.input_ckpt, args.amp, device,
unk_args=unk_args, jitable=True)
ts_model = torch.jit.script(pyt_model)
if args.convert_to == 'ts':
torch.jit.save(ts_model, args.output_ckpt)
else:
assert args.convert_to == 'ttrt'
trt_model = models.convert_ts_to_trt('HiFi-GAN', ts_model, parser,
args.amp, unk_args)
torch.jit.save(trt_model, args.output_ckpt)
print(f'{args.model_name}: checkpoint saved to {args.output_ckpt}.')
if unk_args:
print(f'Warning: encountered unknown program options: {unk_args}')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/export.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import os
from functools import partial
from itertools import islice
import numpy as np
import torch
import torch.nn.functional as F
from torch.cuda import amp
from torch.cuda.amp import autocast
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim import AdamW
from torch.optim.lr_scheduler import ExponentialLR
from apex.optimizers import FusedAdam, FusedLAMB
import models
from common import tb_dllogger as logger, utils, gpu_affinity
from common.utils import (Checkpointer, freeze, init_distributed, print_once,
reduce_tensor, unfreeze, l2_promote)
from hifigan.data_function import get_data_loader, mel_spectrogram
from hifigan.logging import init_logger, Metrics
from hifigan.models import (MultiPeriodDiscriminator, MultiScaleDiscriminator,
feature_loss, generator_loss, discriminator_loss)
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('--log_file', type=str, default=None,
help='Path to a DLLogger log file')
train = parser.add_argument_group('training setup')
train.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
train.add_argument('--epochs_this_job', type=int, default=None,
help='Number of epochs in partial training run')
train.add_argument('--keep_milestones', type=int, nargs='+',
default=[1000, 2000, 3000, 4000, 5000, 6000],
help='Milestone checkpoints to keep from removing')
train.add_argument('--checkpoint_interval', type=int, default=50,
help='Saving checkpoints frequency (in epochs)')
train.add_argument('--step_logs_interval', default=1, type=int,
help='Step logs dumping frequency (in steps)')
train.add_argument('--validation_interval', default=10, type=int,
help='Validation frequency (in epochs)')
train.add_argument('--samples_interval', default=100, type=int,
help='Dumping audio samples frequency (in epochs)')
train.add_argument('--resume', action='store_true',
help='Resume training from the last checkpoint')
train.add_argument('--checkpoint_path_gen', type=str, default=None,
help='Resume training from a selected checkpoint')
train.add_argument('--checkpoint_path_discrim', type=str, default=None,
help='Resume training from a selected checkpoint')
train.add_argument('--seed', type=int, default=1234,
help='Seed for PyTorch random number generators')
train.add_argument('--amp', action='store_true',
help='Enable AMP')
train.add_argument('--autocast_spectrogram', action='store_true',
help='Enable autocast while computing spectrograms')
train.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
train.add_argument('--disable_cudnn_benchmark', action='store_true',
help='Disable cudnn benchmark mode')
train.add_argument('--ema_decay', type=float, default=0,
help='Discounting factor for training weights EMA')
train.add_argument('--grad_accumulation', type=int, default=1,
help='Training steps to accumulate gradients for')
train.add_argument('--num_workers', type=int, default=1,
help='Data loader workers number')
train.add_argument('--fine_tuning', action='store_true',
help='Enable fine-tuning')
train.add_argument('--input_mels_dir', type=str, default=None,
help='Directory with mels for fine-tuning')
train.add_argument('--benchmark_epochs_num', type=int, default=5)
train.add_argument('--no_amp_grouped_conv', action='store_true',
help='Disable AMP on certain convs for better perf')
opt = parser.add_argument_group('optimization setup')
opt.add_argument('--optimizer', type=str, default='adamw',
help='Optimization algorithm')
opt.add_argument('--lr_decay', type=float, default=0.9998,
help='Learning rate decay')
opt.add_argument('-lr', '--learning_rate', type=float, required=True,
help='Learning rate')
opt.add_argument('--fine_tune_lr_factor', type=float, default=1.,
help='Learning rate multiplier for fine-tuning')
opt.add_argument('--adam_betas', type=float, nargs=2, default=(0.8, 0.99),
help='Adam Beta coefficients')
opt.add_argument('--grad_clip_thresh', default=1000.0, type=float,
help='Clip threshold for gradients')
opt.add_argument('-bs', '--batch_size', type=int, required=True,
help=('Batch size per training iter. '
'May be split into grad accumulation steps.'))
opt.add_argument('--warmup_steps', type=int, default=1000,
help='Number of steps for lr warmup')
data = parser.add_argument_group('dataset parameters')
data.add_argument('-d', '--dataset_path', default='data/LJSpeech-1.1',
help='Path to dataset', type=str)
data.add_argument('--training_files', type=str, required=True, nargs='+',
help='Paths to training filelists.')
data.add_argument('--validation_files', type=str, required=True, nargs='+',
help='Paths to validation filelists.')
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max_wav_value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling_rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter_length', default=1024, type=int,
help='Filter length')
audio.add_argument('--num_mels', default=80, type=int,
help='number of Mel bands')
audio.add_argument('--hop_length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win_length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel_fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel_fmax', default=8000.0, type=float,
help='Maximum mel frequency')
audio.add_argument('--mel_fmax_loss', default=None, type=float,
help='Maximum mel frequency used for computing loss')
audio.add_argument('--segment_size', default=8192, type=int,
help='Training segment size')
dist = parser.add_argument_group('distributed setup')
dist.add_argument(
'--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc. Do not set manually.')
dist.add_argument(
'--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc. Do not set manually.')
dist.add_argument('--affinity', type=str,
default='socket_unique_interleaved',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
return parser
def validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics):
gen.eval()
val_metrics.start_val()
with torch.no_grad():
for i, batch in enumerate(val_loader):
x, y, _, y_mel = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
y_mel = y_mel.cuda(non_blocking=True)
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
y_g_hat_mel = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax_loss)
with autocast(enabled=args.amp):
# val_err_tot += F.l1_loss(y_mel, y_g_hat_mel).item() * 45
# NOTE: Scale by 45.0 to match train loss magnitude
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
val_metrics['loss_discrim'] = reduce_tensor(
loss_disc_s + loss_disc_f, args.world_size)
val_metrics['loss_gen'] = reduce_tensor(loss_gen_all,
args.world_size)
val_metrics['loss_mel'] = reduce_tensor(loss_mel, args.world_size)
val_metrics['frames'] = x.size(0) * x.size(1) * args.world_size
val_metrics.accumulate(scopes=['val'])
val_metrics.finish_val()
gen.train()
def main():
parser = argparse.ArgumentParser(description='PyTorch HiFi-GAN Training',
allow_abbrev=False)
parser = models.parse_model_args('HiFi-GAN', parse_args(parser))
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
print(nproc_per_node)
affinity = gpu_affinity.set_affinity(
args.local_rank,
nproc_per_node,
args.affinity
)
print(f'{args.local_rank}: thread affinity: {affinity}')
# seeds, distributed init, logging, cuDNN
distributed_run = args.world_size > 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
metrics = Metrics(scopes=['train', 'train_avg'],
benchmark_epochs=args.benchmark_epochs_num,
cuda=args.cuda)
val_metrics = Metrics(scopes=['val'], cuda=args.cuda)
init_logger(args.output, args.log_file, args.ema_decay)
logger.parameters(vars(args), tb_subset='train')
l2_promote()
torch.backends.cudnn.benchmark = not args.disable_cudnn_benchmark
train_setup = models.get_model_train_setup('HiFi-GAN', args)
gen_config = models.get_model_config('HiFi-GAN', args)
gen = models.get_model('HiFi-GAN', gen_config, 'cuda')
mpd = MultiPeriodDiscriminator(periods=args.mpd_periods,
concat_fwd=args.concat_fwd).cuda()
assert args.amp or not args.no_amp_grouped_conv, \
"--no-amp-grouped-conv is applicable only when AMP is enabled"
msd = MultiScaleDiscriminator(concat_fwd=args.concat_fwd,
no_amp_grouped_conv=args.no_amp_grouped_conv)
msd = msd.cuda()
mel_spec = partial(mel_spectrogram, n_fft=args.filter_length,
num_mels=args.num_mels,
sampling_rate=args.sampling_rate,
hop_size=args.hop_length, win_size=args.win_length,
fmin=args.mel_fmin)
kw = {'lr': args.learning_rate, 'betas': args.adam_betas}
proto = {'adam': FusedAdam, 'lamb': FusedLAMB, 'adamw': AdamW
}[args.optimizer]
optim_g = proto(gen.parameters(), **kw)
optim_d = proto(itertools.chain(msd.parameters(), mpd.parameters()), **kw)
scaler_g = amp.GradScaler(enabled=args.amp)
scaler_d = amp.GradScaler(enabled=args.amp)
# setup EMA
if args.ema_decay > 0:
# burried import, requires apex
from common.ema_utils import (apply_multi_tensor_ema,
init_multi_tensor_ema)
gen_ema = models.get_model('HiFi-GAN', gen_config, 'cuda').cuda()
mpd_ema = MultiPeriodDiscriminator(
periods=args.mpd_periods,
concat_fwd=args.concat_fwd).cuda()
msd_ema = MultiScaleDiscriminator(
concat_fwd=args.concat_fwd,
no_amp_grouped_conv=args.no_amp_grouped_conv).cuda()
else:
gen_ema, mpd_ema, msd_ema = None, None, None
# setup DDP
if distributed_run:
kw = {'device_ids': [args.local_rank],
'output_device': args.local_rank}
gen = DDP(gen, **kw)
msd = DDP(msd, **kw)
# DDP needs nonempty model
mpd = DDP(mpd, **kw) if len(args.mpd_periods) else mpd
# resume from last / load a checkpoint
train_state = {}
checkpointer = Checkpointer(args.output, args.keep_milestones)
checkpointer.maybe_load(
gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d, train_state, args,
gen_ema=None, mpd_ema=None, msd_ema=None)
iters_all = train_state.get('iters_all', 0)
last_epoch = train_state['epoch'] + 1 if 'epoch' in train_state else -1
sched_g = ExponentialLR(optim_g, gamma=args.lr_decay, last_epoch=last_epoch)
sched_d = ExponentialLR(optim_d, gamma=args.lr_decay, last_epoch=last_epoch)
if args.fine_tuning:
print_once('Doing fine-tuning')
train_loader = get_data_loader(args, distributed_run, train=True)
val_loader = get_data_loader(args, distributed_run, train=False,
val_kwargs=dict(repeat=5, split=True))
val_samples_loader = get_data_loader(args, False, train=False,
val_kwargs=dict(split=False),
batch_size=1)
if args.ema_decay > 0.0:
gen_ema_params = init_multi_tensor_ema(gen, gen_ema)
mpd_ema_params = init_multi_tensor_ema(mpd, mpd_ema)
msd_ema_params = init_multi_tensor_ema(msd, msd_ema)
epochs_done = 0
for epoch in range(max(1, last_epoch), args.epochs + 1):
metrics.start_epoch(epoch)
if distributed_run:
train_loader.sampler.set_epoch(epoch)
gen.train()
mpd.train()
msd.train()
iter_ = 0
iters_num = len(train_loader) // args.grad_accumulation
for step, batch in enumerate(train_loader):
if step // args.grad_accumulation >= iters_num:
break # only full effective batches
is_first_accum_step = step % args.grad_accumulation == 0
is_last_accum_step = (step + 1) % args.grad_accumulation == 0
assert (args.grad_accumulation > 1
or (is_first_accum_step and is_last_accum_step))
if is_first_accum_step:
iter_ += 1
iters_all += 1
metrics.start_iter(iter_)
accum_batches = []
optim_d.zero_grad(set_to_none=True)
optim_g.zero_grad(set_to_none=True)
x, y, _, y_mel = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
y_mel = y_mel.cuda(non_blocking=True)
accum_batches.append((x, y, y_mel))
with torch.set_grad_enabled(is_last_accum_step), \
autocast(enabled=args.amp):
y_g_hat = gen(x)
unfreeze(mpd)
unfreeze(msd)
with autocast(enabled=args.amp):
# MPD
y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach())
loss_disc_f = discriminator_loss(y_df_hat_r, y_df_hat_g)
# MSD
y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach())
loss_disc_s = discriminator_loss(y_ds_hat_r, y_ds_hat_g)
loss_disc_all = loss_disc_s + loss_disc_f
metrics['loss_discrim'] = reduce_tensor(loss_disc_all, args.world_size)
metrics['frames'] = x.size(0) * x.size(1) * args.world_size
metrics.accumulate()
loss_disc_all /= args.grad_accumulation
scaler_d.scale(loss_disc_all).backward()
if not is_last_accum_step:
continue
scaler_d.step(optim_d)
scaler_d.update()
# generator
freeze(mpd)
freeze(msd)
for _i, (x, y, y_mel) in enumerate(reversed(accum_batches)):
if _i != 0: # first `y_g_hat` can be reused
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
y_g_hat_mel = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax_loss)
# L1 mel-spectrogram Loss
with autocast(enabled=args.amp):
loss_mel = F.l1_loss(y_mel, y_g_hat_mel) * 45
y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat)
y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat)
loss_fm_f = feature_loss(fmap_f_r, fmap_f_g)
loss_fm_s = feature_loss(fmap_s_r, fmap_s_g)
loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g)
loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g)
loss_gen_all = loss_gen_s + loss_gen_f + loss_fm_s + loss_fm_f + loss_mel
metrics['loss_gen'] = reduce_tensor(loss_gen_all, args.world_size)
metrics['loss_mel'] = reduce_tensor(loss_mel, args.world_size)
metrics.accumulate()
loss_gen_all /= args.grad_accumulation
scaler_g.scale(loss_gen_all).backward()
scaler_g.step(optim_g)
scaler_g.update()
metrics['lrate_gen'] = optim_g.param_groups[0]['lr']
metrics['lrate_discrim'] = optim_d.param_groups[0]['lr']
metrics.accumulate()
if args.ema_decay > 0.0:
apply_multi_tensor_ema(args.ema_decay, *gen_ema_params)
apply_multi_tensor_ema(args.ema_decay, *mpd_ema_params)
apply_multi_tensor_ema(args.ema_decay, *msd_ema_params)
metrics.finish_iter() # done accumulating
if iters_all % args.step_logs_interval == 0:
logger.log((epoch, iter_, iters_num), metrics, scope='train',
tb_iter=iters_all, flush_log=True)
assert is_last_accum_step
metrics.finish_epoch()
logger.log((epoch,), metrics, scope='train_avg', flush_log=True)
if epoch % args.validation_interval == 0:
validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics)
logger.log((epoch,), val_metrics, scope='val', tb_iter=iters_all,
flush_log=True)
# validation samples
if epoch % args.samples_interval == 0 and args.local_rank == 0:
gen.eval()
with torch.no_grad():
for i, batch in enumerate(islice(val_samples_loader, 5)):
x, y, _, _ = batch
x = x.cuda(non_blocking=True)
y = y.cuda(non_blocking=True).unsqueeze(1)
with autocast(enabled=args.amp):
y_g_hat = gen(x)
with autocast(enabled=args.amp and args.autocast_spectrogram):
# args.fmax instead of args.max_for_inference
y_hat_spec = mel_spec(y_g_hat.float().squeeze(1),
fmax=args.mel_fmax)
logger.log_samples_tb(iters_all, i, y_g_hat, y_hat_spec,
args.sampling_rate)
if epoch == args.samples_interval: # ground truth
logger.log_samples_tb(0, i, y, x, args.sampling_rate)
gen.train()
train_state.update({'epoch': epoch, 'iters_all': iters_all})
# save before making sched.step() for proper loading of LR
checkpointer.maybe_save(
gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d, epoch,
train_state, args, gen_config, train_setup,
gen_ema=gen_ema, mpd_ema=mpd_ema, msd_ema=msd_ema)
logger.flush()
sched_g.step()
sched_d.step()
epochs_done += 1
if (args.epochs_this_job is not None
and epochs_done == args.epochs_this_job):
break
# finished training
if epochs_done > 0:
logger.log((), metrics, scope='train_benchmark', flush_log=True)
if epoch % args.validation_interval != 0: # val metrics are not up-to-date
validate(args, gen, mel_spec, mpd, msd, val_loader, val_metrics)
logger.log((), val_metrics, scope='val', flush_log=True)
else:
print_once(f'Finished without training after epoch {args.epochs}.')
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/train.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import sys
import time
import warnings
from pathlib import Path
from tqdm import tqdm
import torch
import numpy as np
from scipy.stats import norm
from scipy.io.wavfile import write
from torch.nn.functional import l1_loss
from torch.nn.utils.rnn import pad_sequence
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import models
from common import gpu_affinity
from common.tb_dllogger import stdout_metric_format, unique_log_fpath
from common.text import cmudict
from common.text.text_processing import TextProcessing
from common.utils import l2_promote
from fastpitch.pitch_transform import pitch_transform_custom
from hifigan.data_function import MAX_WAV_VALUE, mel_spectrogram
from hifigan.logging import init_inference_metadata
from hifigan.models import Denoiser
CHECKPOINT_SPECIFIC_ARGS = [
'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',
'symbol_set', 'max_wav_value', 'prepend_space_to_text',
'append_space_to_text']
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='Full path to the input text '
'(phareses separated by newlines)')
parser.add_argument('-o', '--output', default=None,
help='Output folder to save audio (file per phrase)')
parser.add_argument('--log-file', type=str, default=None,
help='Path to a DLLogger log file')
parser.add_argument('--save-mels', action='store_true',
help='Save generator outputs to disk')
parser.add_argument('--cuda', action='store_true',
help='Run inference on a GPU using CUDA')
parser.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
parser.add_argument('--l2-promote', action='store_true',
help='Increase max fetch granularity of GPU L2 cache')
parser.add_argument('--fastpitch', type=str, default=None, required=False,
help='Full path to the spectrogram generator .pt file '
'(skip to synthesize from ground truth mels)')
parser.add_argument('--waveglow', type=str, default=None, required=False,
help='Full path to a WaveGlow model .pt file')
parser.add_argument('-s', '--waveglow-sigma-infer', default=0.9, type=float,
help='WaveGlow sigma')
parser.add_argument('--hifigan', type=str, default=None, required=False,
help='Full path to a HiFi-GAN model .pt file')
parser.add_argument('-d', '--denoising-strength', default=0.0, type=float,
help='Capture and subtract model bias to enhance audio')
parser.add_argument('--hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--win-length', type=int, default=1024,
help='STFT win length for denoiser and mel loss')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
choices=[22050, 44100], help='Sampling rate')
parser.add_argument('--max_wav_value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--amp', action='store_true',
help='Inference with AMP')
parser.add_argument('-bs', '--batch-size', type=int, default=64)
parser.add_argument('--warmup-steps', type=int, default=0,
help='Warmup iterations before measuring performance')
parser.add_argument('--repeats', type=int, default=1,
help='Repeat inference for benchmarking')
parser.add_argument('--torchscript', action='store_true',
help='Run inference with TorchScript model (convert to TS if needed)')
parser.add_argument('--checkpoint-format', type=str,
choices=['pyt', 'ts'], default='pyt',
help='Input checkpoint format (PyT or TorchScript)')
parser.add_argument('--torch-tensorrt', action='store_true',
help='Run inference with Torch-TensorRT model (compile beforehand)')
parser.add_argument('--report-mel-loss', action='store_true',
help='Report mel loss in metrics')
parser.add_argument('--ema', action='store_true',
help='Use EMA averaged model (if saved in checkpoints)')
parser.add_argument('--dataset-path', type=str,
help='Path to dataset (for loading extra data fields)')
parser.add_argument('--speaker', type=int, default=0,
help='Speaker ID for a multi-speaker model')
parser.add_argument('--affinity', type=str, default='single',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
transf = parser.add_argument_group('transform')
transf.add_argument('--fade-out', type=int, default=6,
help='Number of fadeout frames at the end')
transf.add_argument('--pace', type=float, default=1.0,
help='Adjust the pace of speech')
transf.add_argument('--pitch-transform-flatten', action='store_true',
help='Flatten the pitch')
transf.add_argument('--pitch-transform-invert', action='store_true',
help='Invert the pitch wrt mean value')
transf.add_argument('--pitch-transform-amplify', type=float, default=1.0,
help='Multiplicative amplification of pitch variability. '
'Typical values are in the range (1.0, 3.0).')
transf.add_argument('--pitch-transform-shift', type=float, default=0.0,
help='Raise/lower the pitch by <hz>')
transf.add_argument('--pitch-transform-custom', action='store_true',
help='Apply the transform from pitch_transform.py')
txt = parser.add_argument_group('Text processing parameters')
txt.add_argument('--text-cleaners', type=str, nargs='*',
default=['english_cleaners_v2'],
help='Type of text cleaners for input text')
txt.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
txt.add_argument('--p-arpabet', type=float, default=0.0, help='')
txt.add_argument('--heteronyms-path', type=str,
default='data/cmudict/heteronyms', help='')
txt.add_argument('--cmudict-path', type=str,
default='data/cmudict/cmudict-0.7b', help='')
return parser
def load_fields(fpath):
lines = [l.strip() for l in open(fpath, encoding='utf-8')]
if fpath.endswith('.tsv'):
columns = lines[0].split('\t')
fields = list(zip(*[t.split('\t') for t in lines[1:]]))
else:
columns = ['text']
fields = [lines]
return {c: f for c, f in zip(columns, fields)}
def prepare_input_sequence(fields, device, symbol_set, text_cleaners,
batch_size=128, dataset=None, load_mels=False,
load_pitch=False, p_arpabet=0.0):
tp = TextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
fields['text'] = [torch.LongTensor(tp.encode_text(text))
for text in fields['text']]
order = np.argsort([-t.size(0) for t in fields['text']])
fields['text'] = [fields['text'][i] for i in order]
fields['text_lens'] = torch.LongTensor([t.size(0) for t in fields['text']])
for t in fields['text']:
print(tp.sequence_to_text(t.numpy()))
if load_mels:
assert 'mel' in fields
assert dataset is not None
fields['mel'] = [
torch.load(Path(dataset, fields['mel'][i])).t() for i in order]
fields['mel_lens'] = torch.LongTensor([t.size(0) for t in fields['mel']])
if load_pitch:
assert 'pitch' in fields
fields['pitch'] = [
torch.load(Path(dataset, fields['pitch'][i])) for i in order]
fields['pitch_lens'] = torch.LongTensor([t.size(0) for t in fields['pitch']])
if 'output' in fields:
fields['output'] = [fields['output'][i] for i in order]
# cut into batches & pad
batches = []
for b in range(0, len(order), batch_size):
batch = {f: values[b:b+batch_size] for f, values in fields.items()}
for f in batch:
if f == 'text':
batch[f] = pad_sequence(batch[f], batch_first=True)
elif f == 'mel' and load_mels:
batch[f] = pad_sequence(batch[f], batch_first=True).permute(0, 2, 1)
elif f == 'pitch' and load_pitch:
batch[f] = pad_sequence(batch[f], batch_first=True)
if type(batch[f]) is torch.Tensor:
batch[f] = batch[f].to(device)
batches.append(batch)
return batches
def build_pitch_transformation(args):
if args.pitch_transform_custom:
def custom_(pitch, pitch_lens, mean, std):
return (pitch_transform_custom(pitch * std + mean, pitch_lens)
- mean) / std
return custom_
fun = 'pitch'
if args.pitch_transform_flatten:
fun = f'({fun}) * 0.0'
if args.pitch_transform_invert:
fun = f'({fun}) * -1.0'
if args.pitch_transform_amplify != 1.0:
ampl = args.pitch_transform_amplify
fun = f'({fun}) * {ampl}'
if args.pitch_transform_shift != 0.0:
hz = args.pitch_transform_shift
fun = f'({fun}) + {hz} / std'
if fun == 'pitch':
return None
return eval(f'lambda pitch, pitch_lens, mean, std: {fun}')
def setup_mel_loss_reporting(args, voc_train_setup):
if args.denoising_strength > 0.0:
print('WARNING: denoising will be included in vocoder mel loss')
num_mels = voc_train_setup.get('num_mels', 80)
fmin = voc_train_setup.get('mel_fmin', 0)
fmax = voc_train_setup.get('mel_fmax', 8000) # not mel_fmax_loss
def compute_audio_mel_loss(gen_audios, gt_mels, mel_lens):
gen_audios /= MAX_WAV_VALUE
total_loss = 0
for gen_audio, gt_mel, mel_len in zip(gen_audios, gt_mels, mel_lens):
mel_len = mel_len.item()
gen_audio = gen_audio[None, :mel_len * args.hop_length]
gen_mel = mel_spectrogram(gen_audio, args.win_length, num_mels,
args.sampling_rate, args.hop_length,
args.win_length, fmin, fmax)[0]
total_loss += l1_loss(gen_mel, gt_mel[:, :mel_len])
return total_loss.item()
return compute_audio_mel_loss
def compute_mel_loss(mels, lens, gt_mels, gt_lens):
total_loss = 0
for mel, len_, gt_mel, gt_len in zip(mels, lens, gt_mels, gt_lens):
min_len = min(len_, gt_len)
total_loss += l1_loss(gt_mel[:, :min_len], mel[:, :min_len])
return total_loss.item()
class MeasureTime(list):
def __init__(self, *args, cuda=True, **kwargs):
super(MeasureTime, self).__init__(*args, **kwargs)
self.cuda = cuda
def __enter__(self):
if self.cuda:
torch.cuda.synchronize()
self.t0 = time.time()
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.cuda:
torch.cuda.synchronize()
self.append(time.time() - self.t0)
def __add__(self, other):
assert len(self) == len(other)
return MeasureTime((sum(ab) for ab in zip(self, other)), cuda=self.cuda)
def main():
"""
Launches text-to-speech inference on a single GPU.
"""
parser = argparse.ArgumentParser(description='PyTorch FastPitch Inference',
allow_abbrev=False)
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
# print(nproc_per_node)
affinity = gpu_affinity.set_affinity(
0,
nproc_per_node,
args.affinity
)
print(f'Thread affinity: {affinity}')
if args.l2_promote:
l2_promote()
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.output is not None:
Path(args.output).mkdir(parents=False, exist_ok=True)
log_fpath = args.log_file or str(Path(args.output, 'nvlog_infer.json'))
DLLogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
init_inference_metadata(args.batch_size)
[DLLogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
device = torch.device('cuda' if args.cuda else 'cpu')
gen_train_setup = {}
voc_train_setup = {}
generator = None
vocoder = None
denoiser = None
is_ts_based_infer = args.torch_tensorrt or args.torchscript
assert args.checkpoint_format == 'pyt' or is_ts_based_infer, \
'TorchScript checkpoint can be used only for TS or Torch-TRT' \
' inference. Please set --torchscript or --torch-tensorrt flag.'
assert args.waveglow is None or args.hifigan is None, \
"Specify a single vocoder model"
def _load_pyt_or_ts_model(model_name, ckpt_path):
if args.checkpoint_format == 'ts':
model = models.load_and_setup_ts_model(model_name, ckpt_path,
args.amp, device)
model_train_setup = {}
return model, model_train_setup
model, _, model_train_setup = models.load_and_setup_model(
model_name, parser, ckpt_path, args.amp, device,
unk_args=unk_args, forward_is_infer=True, jitable=is_ts_based_infer)
if is_ts_based_infer:
model = torch.jit.script(model)
return model, model_train_setup
if args.fastpitch is not None:
gen_name = 'fastpitch'
generator, gen_train_setup = _load_pyt_or_ts_model('FastPitch',
args.fastpitch)
if args.waveglow is not None:
voc_name = 'waveglow'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
vocoder, _, voc_train_setup = models.load_and_setup_model(
'WaveGlow', parser, args.waveglow, args.amp, device,
unk_args=unk_args, forward_is_infer=True, jitable=False)
if args.denoising_strength > 0.0:
denoiser = Denoiser(vocoder, sigma=0.0,
win_length=args.win_length).to(device)
# if args.torchscript:
# vocoder = torch.jit.script(vocoder)
def generate_audio(mel):
audios = vocoder(mel, sigma=args.waveglow_sigma_infer)
if denoiser is not None:
audios = denoiser(audios.float(), args.denoising_strength).squeeze(1)
return audios
elif args.hifigan is not None:
voc_name = 'hifigan'
vocoder, voc_train_setup = _load_pyt_or_ts_model('HiFi-GAN',
args.hifigan)
if args.denoising_strength > 0.0:
denoiser = Denoiser(vocoder, win_length=args.win_length).to(device)
if args.torch_tensorrt:
vocoder = models.convert_ts_to_trt('HiFi-GAN', vocoder, parser,
args.amp, unk_args)
def generate_audio(mel):
audios = vocoder(mel).float()
if denoiser is not None:
audios = denoiser(audios.squeeze(1), args.denoising_strength)
return audios.squeeze(1) * args.max_wav_value
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
for k in CHECKPOINT_SPECIFIC_ARGS:
v1 = gen_train_setup.get(k, None)
v2 = voc_train_setup.get(k, None)
assert v1 is None or v2 is None or v1 == v2, \
f'{k} mismatch in spectrogram generator and vocoder'
val = v1 or v2
if val and getattr(args, k) != val:
src = 'generator' if v2 is None else 'vocoder'
print(f'Overwriting args.{k}={getattr(args, k)} with {val} '
f'from {src} checkpoint.')
setattr(args, k, val)
gen_kw = {'pace': args.pace,
'speaker': args.speaker,
'pitch_tgt': None,
'pitch_transform': build_pitch_transformation(args)}
if is_ts_based_infer and generator is not None:
gen_kw.pop('pitch_transform')
print('Note: --pitch-transform-* args are disabled with TorchScript. '
'To condition on pitch, pass pitch_tgt as input.')
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
if args.report_mel_loss:
mel_loss_fn = setup_mel_loss_reporting(args, voc_train_setup)
fields = load_fields(args.input)
batches = prepare_input_sequence(
fields, device, args.symbol_set, args.text_cleaners, args.batch_size,
args.dataset_path, load_mels=(generator is None or args.report_mel_loss),
p_arpabet=args.p_arpabet)
cycle = itertools.cycle(batches)
# Use real data rather than synthetic - FastPitch predicts len
for _ in tqdm(range(args.warmup_steps), 'Warmup'):
with torch.no_grad():
b = next(cycle)
if generator is not None:
mel, *_ = generator(b['text'])
else:
mel, mel_lens = b['mel'], b['mel_lens']
if args.amp:
mel = mel.half()
if vocoder is not None:
audios = generate_audio(mel)
gen_measures = MeasureTime(cuda=args.cuda)
vocoder_measures = MeasureTime(cuda=args.cuda)
all_utterances = 0
all_samples = 0
all_batches = 0
all_letters = 0
all_frames = 0
gen_mel_loss_sum = 0
voc_mel_loss_sum = 0
reps = args.repeats
log_enabled = reps == 1
log = lambda s, d: DLLogger.log(step=s, data=d) if log_enabled else None
for rep in (tqdm(range(reps), 'Inference') if reps > 1 else range(reps)):
for b in batches:
if generator is None:
mel, mel_lens = b['mel'], b['mel_lens']
if args.amp:
mel = mel.half()
else:
with torch.no_grad(), gen_measures:
mel, mel_lens, *_ = generator(b['text'], **gen_kw)
if args.report_mel_loss:
gen_mel_loss_sum += compute_mel_loss(
mel, mel_lens, b['mel'], b['mel_lens'])
gen_infer_perf = mel.size(0) * mel.size(2) / gen_measures[-1]
all_letters += b['text_lens'].sum().item()
all_frames += mel.size(0) * mel.size(2)
log(rep, {f"{gen_name}_frames/s": gen_infer_perf})
log(rep, {f"{gen_name}_latency": gen_measures[-1]})
if args.save_mels:
for i, mel_ in enumerate(mel):
m = mel_[:, :mel_lens[i].item()].permute(1, 0)
fname = b['output'][i] if 'output' in b else f'mel_{i}.npy'
mel_path = Path(args.output, Path(fname).stem + '.npy')
np.save(mel_path, m.cpu().numpy())
if vocoder is not None:
with torch.no_grad(), vocoder_measures:
audios = generate_audio(mel)
vocoder_infer_perf = (
audios.size(0) * audios.size(1) / vocoder_measures[-1])
log(rep, {f"{voc_name}_samples/s": vocoder_infer_perf})
log(rep, {f"{voc_name}_latency": vocoder_measures[-1]})
if args.report_mel_loss:
voc_mel_loss_sum += mel_loss_fn(audios, mel, mel_lens)
if args.output is not None and reps == 1:
for i, audio in enumerate(audios):
audio = audio[:mel_lens[i].item() * args.hop_length]
if args.fade_out:
fade_len = args.fade_out * args.hop_length
fade_w = torch.linspace(1.0, 0.0, fade_len)
audio[-fade_len:] *= fade_w.to(audio.device)
audio = audio / torch.max(torch.abs(audio))
fname = b['output'][i] if 'output' in b else f'audio_{i}.wav'
audio_path = Path(args.output, fname)
write(audio_path, args.sampling_rate, audio.cpu().numpy())
if generator is not None:
log(rep, {"latency": (gen_measures[-1] + vocoder_measures[-1])})
all_utterances += mel.size(0)
all_samples += mel_lens.sum().item() * args.hop_length
all_batches += 1
log_enabled = True
if generator is not None:
gm = np.sort(np.asarray(gen_measures))
rtf = all_samples / (all_utterances * gm.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * gm.mean() * args.sampling_rate)
log((), {f"avg_{gen_name}_tokens/s": all_letters / gm.sum()})
log((), {f"avg_{gen_name}_frames/s": all_frames / gm.sum()})
log((), {f"avg_{gen_name}_latency": gm.mean()})
log((), {f"avg_{gen_name}_RTF": rtf})
log((), {f"avg_{gen_name}_RTF@{args.batch_size}": rtf_at})
log((), {f"90%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.90) / 2) * gm.std()})
log((), {f"95%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.95) / 2) * gm.std()})
log((), {f"99%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.99) / 2) * gm.std()})
if args.report_mel_loss:
log((), {f"avg_{gen_name}_mel-loss": gen_mel_loss_sum / all_utterances})
if vocoder is not None:
vm = np.sort(np.asarray(vocoder_measures))
rtf = all_samples / (all_utterances * vm.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * vm.mean() * args.sampling_rate)
log((), {f"avg_{voc_name}_samples/s": all_samples / vm.sum()})
log((), {f"avg_{voc_name}_latency": vm.mean()})
log((), {f"avg_{voc_name}_RTF": rtf})
log((), {f"avg_{voc_name}_RTF@{args.batch_size}": rtf_at})
log((), {f"90%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.90) / 2) * vm.std()})
log((), {f"95%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.95) / 2) * vm.std()})
log((), {f"99%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.99) / 2) * vm.std()})
if args.report_mel_loss:
log((), {f"avg_{voc_name}_mel-loss": voc_mel_loss_sum / all_utterances})
if generator is not None and vocoder is not None:
m = gm + vm
rtf = all_samples / (all_utterances * m.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * m.mean() * args.sampling_rate)
log((), {"avg_samples/s": all_samples / m.sum()})
log((), {"avg_letters/s": all_letters / m.sum()})
log((), {"avg_latency": m.mean()})
log((), {"avg_RTF": rtf})
log((), {f"avg_RTF@{args.batch_size}": rtf_at})
log((), {"90%_latency": m.mean() + norm.ppf((1.0 + 0.90) / 2) * m.std()})
log((), {"95%_latency": m.mean() + norm.ppf((1.0 + 0.95) / 2) * m.std()})
log((), {"99%_latency": m.mean() + norm.ppf((1.0 + 0.99) / 2) * m.std()})
DLLogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/inference.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import OrderedDict
from copy import copy
from itertools import product
from pathlib import Path
import dllogger
import numpy as np
import torch.distributed as dist
import torch
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from common import tb_dllogger
from common.tb_dllogger import (stdout_metric_format, stdout_step_format,
unique_log_fpath, TBLogger)
def init_logger(output_dir, log_file, ema_decay=0.0):
local_rank = 0 if not dist.is_initialized() else dist.get_rank()
if local_rank == 0:
Path(output_dir).mkdir(parents=False, exist_ok=True)
log_fpath = log_file or Path(output_dir, 'nvlog.json')
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)
])
init_train_metadata()
else:
dllogger.init(backends=[])
tb_train = ['train']
tb_val = ['val']
tb_ema = [k + '_ema' for k in tb_val] if ema_decay > 0.0 else []
tb_dllogger.tb_loggers = {
s: TBLogger(enabled=(local_rank == 0), log_dir=output_dir, name=s)
for s in tb_train + tb_val + tb_ema}
def init_train_metadata():
dllogger.metadata("train_lrate_gen",
{"name": "g lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_lrate_discrim",
{"name": "d lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_avg_lrate_gen",
{"name": "avg g lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_avg_lrate_discrim",
{"name": "avg d lr", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('val', ' avg val '), ('val_ema', ' EMA val ')]:
dllogger.metadata(f"{id_}_loss_gen",
{"name": f"{pref}g loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_loss_discrim",
{"name": f"{pref}d loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_loss_mel",
{"name": f"{pref}mel loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_frames/s",
{"name": None, "unit": "frames/s", "format": ":>8.2f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>3.2f"})
def init_inference_metadata(batch_size=None):
modalities = [('latency', 's', ':>10.5f'), ('RTF', 'x', ':>10.2f'),
('frames/s', 'frames/s', ':>10.2f'), ('samples/s', 'samples/s', ':>10.2f'),
('letters/s', 'letters/s', ':>10.2f'), ('tokens/s', 'tokens/s', ':>10.2f'),
('mel-loss', None, ':>10.5f')]
if batch_size is not None:
modalities.append((f'RTF@{batch_size}', 'x', ':>10.2f'))
percs = ['', 'avg', '90%', '95%', '99%']
models = ['', 'fastpitch', 'waveglow', 'hifigan']
for perc, model, (mod, unit, fmt) in product(percs, models, modalities):
name = f'{perc} {model} {mod}'.strip().replace(' ', ' ')
dllogger.metadata(name.replace(' ', '_'),
{'name': f'{name: <26}', 'unit': unit, 'format': fmt})
class defaultdict(OrderedDict):
"""A simple, ordered defaultdict."""
def __init__(self, type_, *args, **kwargs):
self.type_ = type_
super().__init__(*args, **kwargs)
def __getitem__(self, key):
if key not in self:
self.__setitem__(key, self.type_())
return super().__getitem__(key)
def __copy__(self):
return defaultdict(self.type_, self)
class Metrics(dict):
def __init__(self, scopes=['train', 'train_avg'],
dll_keys=['loss_gen', 'loss_discrim', 'loss_mel',
'frames/s', 'took', 'lrate_gen', 'lrate_discrim'],
benchmark_epochs=0, cuda=True):
super().__init__()
self.dll_keys = dll_keys
self.metrics = {scope: defaultdict(float) for scope in scopes}
self.metric_counts = {scope: defaultdict(int) for scope in scopes}
self.start_time = {scope: None for scope in scopes}
self.benchmark_epochs = benchmark_epochs
if benchmark_epochs > 0:
self.metrics['train_benchmark'] = defaultdict(list)
self.cuda = cuda
def __setitem__(self, key, val):
if type(val) is dict:
for k, v in val.items():
super().__setitem__(k, v)
else:
super().__setitem__(key, val)
def __getitem__(self, key):
if key not in self:
self.__setitem__(key, 0.0)
return super().__getitem__(key)
def start_accumulating(self, step, start_timer=True, scope='train'):
del step # unused
self.clear()
self.metrics[scope].clear()
self.metric_counts[scope].clear()
if start_timer:
self.start_time[scope] = time.time()
def accumulate(self, scopes=['train', 'train_avg']):
for scope in scopes:
for k, v in self.items():
self.metrics[scope][k] += v
self.metric_counts[scope][k] += 1
self.clear()
def finish_accumulating(self, stop_timer=True, scope='train'):
metr = self.metrics[scope]
counts = self.metric_counts[scope]
for k, v in metr.items():
if type(v) is torch.Tensor:
v = v.item()
metr[k] = v / counts[k]
if stop_timer:
took = time.time() - self.start_time[scope]
if 'frames' in metr:
metr['frames/s'] = metr.pop('frames') * counts['frames'] / took
metr['took'] = took
def start_iter(self, iter, start_timer=True):
self.start_accumulating(iter, start_timer, 'train')
def start_epoch(self, epoch, start_timer=True):
if self.cuda:
torch.cuda.synchronize()
self.start_accumulating(epoch, start_timer, 'train_avg')
def start_val(self, start_timer=True):
if self.cuda:
torch.cuda.synchronize()
self.start_accumulating(None, start_timer, 'val')
def finish_iter(self, stop_timer=True):
self.finish_accumulating(stop_timer, 'train')
def finish_epoch(self, stop_timer=True):
if self.cuda:
torch.cuda.synchronize()
self.finish_accumulating(stop_timer, 'train_avg')
metr = self.metrics['train_benchmark']
for k in ('took', 'frames/s', 'loss_gen', 'loss_discrim', 'loss_mel'):
metr[k].append(self.metrics['train_avg'][k])
if len(metr[k]) > self.benchmark_epochs:
metr[k].pop(0)
def finish_val(self, stop_timer=True):
if self.cuda:
torch.cuda.synchronize()
self.finish_accumulating(stop_timer, 'val')
def get_metrics(self, scope='train', target='dll'):
if scope == 'train_benchmark':
metr = self.metrics[scope]
ret = {'train_' + k: np.mean(v) for k, v in metr.items()}
ret['benchmark_epochs_num'] = len(list(metr.values())[0])
return ret
ret = copy(self.metrics[scope])
if scope == 'train':
ret.update(self)
if target == 'dll':
ret = {f'{scope}_{k}': v
for k, v in ret.items() if k in self.dll_keys}
elif target == 'tb':
# Rename keys so they would group nicely inside TensorBoard
def split_key(k):
pos = k.rfind('_')
return k[:pos] + '/' + k[pos+1:] if pos >= 0 else k
ret = {split_key(k): v for k, v in ret.items()}
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/logging.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
# MultiPeriodDiscriminator, feature_loss, discriminator_loss, generator_loss,
# init_weights, get_padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from common import filter_warnings
from common.stft import STFT
from common.utils import AttrDict, init_weights, get_padding
LRELU_SLOPE = 0.1
class NoAMPConv1d(Conv1d):
def __init__(self, *args, no_amp=False, **kwargs):
super().__init__(*args, **kwargs)
self.no_amp = no_amp
def _cast(self, x, dtype):
if isinstance(x, (list, tuple)):
return [self._cast(t, dtype) for t in x]
else:
return x.to(dtype)
def forward(self, *args):
if not self.no_amp:
return super().forward(*args)
with torch.cuda.amp.autocast(enabled=False):
return self._cast(
super().forward(*self._cast(args, torch.float)), args[0].dtype)
class ResBlock1(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.conf = conf
self.lrelu_slope = LRELU_SLOPE
ch, ks = channels, kernel_size
self.convs1 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[1]), dilation[1])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[2]), dilation[2])),
])
self.convs2 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
])
self.convs1.apply(init_weights)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.conf = conf
ch, ks = channels, kernel_size
self.convs = nn.ModuleList([
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[1]), dilation[1])),
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(nn.Module):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(self, conf):
super().__init__()
conf = AttrDict(conf)
self.conf = conf
self.num_kernels = len(conf.resblock_kernel_sizes)
self.num_upsamples = len(conf.upsample_rates)
self.conv_pre = weight_norm(
Conv1d(80, conf.upsample_initial_channel, 7, 1, padding=3))
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if conf.resblock == '1' else ResBlock2
self.ups = []
for i, (u, k) in enumerate(zip(conf.upsample_rates,
conf.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(conf.upsample_initial_channel // (2 ** i),
conf.upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k-u)//2)))
self.ups = nn.Sequential(*self.ups)
self.resblocks = []
for i in range(len(self.ups)):
resblock_list = []
ch = conf.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(conf.resblock_kernel_sizes,
conf.resblock_dilation_sizes)):
resblock_list.append(resblock(conf, ch, k, d))
resblock_list = nn.Sequential(*resblock_list)
self.resblocks.append(resblock_list)
self.resblocks = nn.Sequential(*self.resblocks)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def load_state_dict(self, state_dict, strict=True):
# Fallback for old checkpoints (pre-ONNX fix)
new_sd = {}
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 5:
layer = int(parts[1])
new_layer = f"{layer//3}.{layer%3}"
new_k = f"resblocks.{new_layer}.{'.'.join(parts[2:])}"
new_sd[new_k] = v
# Fix for conv1d/conv2d/NHWC
curr_sd = self.state_dict()
for key in new_sd:
len_diff = len(new_sd[key].size()) - len(curr_sd[key].size())
if len_diff == -1:
new_sd[key] = new_sd[key].unsqueeze(-1)
elif len_diff == 1:
new_sd[key] = new_sd[key].squeeze(-1)
super().load_state_dict(new_sd, strict=strict)
def forward(self, x):
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = 0
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('HiFi-GAN: Removing weight norm.')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Denoiser(nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros', **infer_kw):
super().__init__()
w = next(p for name, p in hifigan.named_parameters()
if name.endswith('.weight'))
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(w.device)
mel_init = {'zeros': torch.zeros, 'normal': torch.randn}[mode]
mel_input = mel_init((1, 80, 88), dtype=w.dtype, device=w.device)
with torch.no_grad():
bias_audio = hifigan(mel_input, **infer_kw).float()
if len(bias_audio.size()) > 2:
bias_audio = bias_audio.squeeze(0)
elif len(bias_audio.size()) < 2:
bias_audio = bias_audio.unsqueeze(0)
assert len(bias_audio.size()) == 2
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
class DiscriminatorP(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = spectral_norm if use_spectral_norm else weight_norm
ks = kernel_size
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (ks, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(nn.Module):
def __init__(self, periods, concat_fwd=False):
super().__init__()
layers = [DiscriminatorP(p) for p in periods]
self.discriminators = nn.ModuleList(layers)
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
y_ds, fmaps = d(concat_discr_input(y, y_hat))
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(nn.Module):
def __init__(self, use_spectral_norm=False, no_amp_grouped_conv=False):
super().__init__()
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(NoAMPConv1d(128, 256, 41, 2, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(256, 512, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(512, 1024, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(1024, 1024, 41, 1, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
# x = l(x.unsqueeze(-1)).squeeze(-1)
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(nn.Module):
def __init__(self, no_amp_grouped_conv=False, concat_fwd=False):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=1),
AvgPool1d(4, 2, padding=1)
])
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
ys = concat_discr_input(y, y_hat)
if i != 0:
ys = self.meanpools[i-1](ys)
y_ds, fmaps = d(ys)
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def concat_discr_input(y, y_hat):
return torch.cat((y, y_hat), dim=0)
def split_discr_output(y_ds, fmaps):
y_d_r, y_d_g = torch.chunk(y_ds, 2, dim=0)
fmap_r, fmap_g = zip(*(torch.chunk(f, 2, dim=0) for f in fmaps))
return y_d_r, y_d_g, fmap_r, fmap_g
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
return loss
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/models.py |
from .entrypoints import nvidia_hifigan
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# mel_spectrogram, MelDataset
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
from numpy import random
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from common.audio_processing import dynamic_range_compression
from common.utils import load_filepaths_and_text, load_wav
MAX_WAV_VALUE = 32768.0
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size,
fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
fmax_key = f'{fmax}_{y.device}'
if fmax_key not in mel_basis:
mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels,
fmin=fmin, fmax=fmax)
mel_basis[fmax_key] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
pad = int((n_fft-hop_size)/2)
y = F.pad(y.unsqueeze(1), (pad, pad), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size,
window=hann_window[str(y.device)], center=center,
pad_mode='reflect', normalized=False, onesided=True,
return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = dynamic_range_compression(spec) # spectral normalize
return spec
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True,
device=None, fmax_loss=None, fine_tuning=False,
base_mels_path=None, repeat=1, deterministic=False,
max_wav_value=MAX_WAV_VALUE):
self.audio_files = training_files
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.max_wav_value = max_wav_value
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
self.repeat = repeat
self.deterministic = deterministic
self.rng = random.default_rng()
def __getitem__(self, index):
if index >= len(self):
raise IndexError('Dataset index out of range')
rng = random.default_rng(index) if self.deterministic else self.rng
index = index % len(self.audio_files) # collapse **after** setting seed
filename = self.audio_files[index]
audio, sampling_rate = load_wav(filename)
audio = audio / self.max_wav_value
if not self.fine_tuning:
audio = normalize(audio) * 0.95
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = rng.integers(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path,
os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel).float()
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = rng.integers(0, mel.size(2) - frames_per_seg)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
a = mel_start * self.hop_size
b = (mel_start + frames_per_seg) * self.hop_size
audio = audio[:, a:b]
else:
mel = F.pad(mel, (0, frames_per_seg - mel.size(2)))
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files) * self.repeat
def get_data_loader(args, distributed_run, train=True, batch_size=None,
val_kwargs=None):
filelists = args.training_files if train else args.validation_files
files = load_filepaths_and_text(args.dataset_path, filelists)
files = list(zip(*files))[0]
dataset_kw = {
'segment_size': args.segment_size,
'n_fft': args.filter_length,
'num_mels': args.num_mels,
'hop_size': args.hop_length,
'win_size': args.win_length,
'sampling_rate': args.sampling_rate,
'fmin': args.mel_fmin,
'fmax': args.mel_fmax,
'fmax_loss': args.mel_fmax_loss,
'max_wav_value': args.max_wav_value,
'fine_tuning': args.fine_tuning,
'base_mels_path': args.input_mels_dir,
'deterministic': not train
}
if train:
dataset = MelDataset(files, **dataset_kw)
sampler = DistributedSampler(dataset) if distributed_run else None
else:
dataset_kw.update(val_kwargs or {})
dataset = MelDataset(files, **dataset_kw)
sampler = (DistributedSampler(dataset, shuffle=False)
if distributed_run else None)
loader = DataLoader(dataset,
num_workers=args.num_workers if train else 1,
shuffle=(train and not distributed_run),
sampler=sampler,
batch_size=batch_size or args.batch_size,
pin_memory=True,
persistent_workers=True,
drop_last=train)
return loader
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/data_function.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ast import literal_eval
def parse_hifigan_args(parent, add_help=False):
"""Parse model specific commandline arguments."""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
hfg = parser.add_argument_group('HiFi-GAN generator parameters')
hfg.add_argument('--upsample_rates', default=[8, 8, 2, 2],
type=literal_eval_arg,
help='Upsample rates')
hfg.add_argument('--upsample_kernel_sizes', default=[16, 16, 4, 4],
type=literal_eval_arg,
help='Upsample kernel sizes')
hfg.add_argument('--upsample_initial_channel', default=512, type=int,
help='Upsample initial channel')
hfg.add_argument('--resblock', default='1', type=str,
help='Resblock module version')
hfg.add_argument('--resblock_kernel_sizes', default=[11, 7, 3],
type=literal_eval_arg,
help='Resblock kernel sizes')
hfg.add_argument('--resblock_dilation_sizes', type=literal_eval_arg,
default=[[1, 3, 5], [1, 3, 5], [1, 3, 5]],
help='Resblock dilation sizes'),
hfg = parser.add_argument_group('HiFi-GAN discriminator parameters')
hfg.add_argument('--mpd_periods', default=[2, 3, 5, 7, 11],
type=literal_eval_arg,
help='Periods of MultiPeriodDiscriminator')
hfg.add_argument('--concat_fwd', action='store_true',
help='Faster Discriminators (requires more GPU memory)')
hfg.add_argument('--hifigan-config', type=str, default=None, required=False,
help='Path to a HiFi-GAN config .json'
' (if provided, overrides model architecture flags)')
return parser
def literal_eval_arg(val):
try:
return literal_eval(val)
except SyntaxError as e: # Argparse does not handle SyntaxError
raise ValueError(str(e)) from e
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/arg_parser.py |
# *****************************************************************************
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
#from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_hifigan(pretrained=True, **kwargs):
"""TODO
"""
from hifigan import models as vocoder
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
if pretrained:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/hifigan__pyt_ckpt_mode-finetune_ds-ljs22khz/versions/21.08.0_amp/files/hifigan_gen_checkpoint_10000_ft.pt'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['generator']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
train_setup = ckpt.get('train_setup', {})
else:
config = {'upsample_rates': [8, 8, 2, 2], 'upsample_kernel_sizes': [16, 16, 4, 4],
'upsample_initial_channel': 512, 'resblock': '1', 'resblock_kernel_sizes': [3, 7, 11],
'resblock_dilation_sizes': [[1, 3, 5], [1, 3, 5], [1, 3, 5]]}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
train_setup = {}
hifigan = vocoder.Generator(config)
denoiser = None
if pretrained:
hifigan.load_state_dict(state_dict)
hifigan.remove_weight_norm()
denoiser = vocoder.Denoiser(hifigan, win_length=1024)
if fp16:
hifigan.half()
denoiser.half()
return hifigan, train_setup, denoiser | DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/hifigan/entrypoints.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class Invertible1x1ConvLUS(torch.nn.Module):
def __init__(self, c):
super(Invertible1x1ConvLUS, self).__init__()
# Sample a random orthonormal matrix to initialize weights
W, _ = torch.linalg.qr(torch.randn(c, c))
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1*W[:, 0]
p, lower, upper = torch.lu_unpack(*torch.lu(W))
self.register_buffer('p', p)
# diagonals of lower will always be 1s anyway
lower = torch.tril(lower, -1)
lower_diag = torch.diag(torch.eye(c, c))
self.register_buffer('lower_diag', lower_diag)
self.lower = nn.Parameter(lower)
self.upper_diag = nn.Parameter(torch.diag(upper))
self.upper = nn.Parameter(torch.triu(upper, 1))
def forward(self, z, reverse=False):
U = torch.triu(self.upper, 1) + torch.diag(self.upper_diag)
L = torch.tril(self.lower, -1) + torch.diag(self.lower_diag)
W = torch.mm(self.p, torch.mm(L, U))
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse[..., None]
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
W = W[..., None]
z = F.conv1d(z, W, bias=None, stride=1, padding=0)
log_det_W = torch.sum(torch.log(torch.abs(self.upper_diag)))
return z, log_det_W
class ConvAttention(torch.nn.Module):
def __init__(self, n_mel_channels=80, n_speaker_dim=128,
n_text_channels=512, n_att_channels=80, temperature=1.0,
n_mel_convs=2, align_query_enc_type='3xconv',
use_query_proj=True):
super(ConvAttention, self).__init__()
self.temperature = temperature
self.att_scaling_factor = np.sqrt(n_att_channels)
self.softmax = torch.nn.Softmax(dim=3)
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
self.attn_proj = torch.nn.Conv2d(n_att_channels, 1, kernel_size=1)
self.align_query_enc_type = align_query_enc_type
self.use_query_proj = bool(use_query_proj)
self.key_proj = nn.Sequential(
ConvNorm(n_text_channels,
n_text_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_text_channels * 2,
n_att_channels,
kernel_size=1,
bias=True))
self.align_query_enc_type = align_query_enc_type
if align_query_enc_type == "inv_conv":
self.query_proj = Invertible1x1ConvLUS(n_mel_channels)
elif align_query_enc_type == "3xconv":
self.query_proj = nn.Sequential(
ConvNorm(n_mel_channels,
n_mel_channels * 2,
kernel_size=3,
bias=True,
w_init_gain='relu'),
torch.nn.ReLU(),
ConvNorm(n_mel_channels * 2,
n_mel_channels,
kernel_size=1,
bias=True),
torch.nn.ReLU(),
ConvNorm(n_mel_channels,
n_att_channels,
kernel_size=1,
bias=True))
else:
raise ValueError("Unknown query encoder type specified")
def run_padded_sequence(self, sorted_idx, unsort_idx, lens, padded_data,
recurrent_model):
"""Sorts input data by previded ordering (and un-ordering) and runs the
packed data through the recurrent model
Args:
sorted_idx (torch.tensor): 1D sorting index
unsort_idx (torch.tensor): 1D unsorting index (inverse of sorted_idx)
lens: lengths of input data (sorted in descending order)
padded_data (torch.tensor): input sequences (padded)
recurrent_model (nn.Module): recurrent model to run data through
Returns:
hidden_vectors (torch.tensor): outputs of the RNN, in the original,
unsorted, ordering
"""
# sort the data by decreasing length using provided index
# we assume batch index is in dim=1
padded_data = padded_data[:, sorted_idx]
padded_data = nn.utils.rnn.pack_padded_sequence(padded_data, lens)
hidden_vectors = recurrent_model(padded_data)[0]
hidden_vectors, _ = nn.utils.rnn.pad_packed_sequence(hidden_vectors)
# unsort the results at dim=1 and return
hidden_vectors = hidden_vectors[:, unsort_idx]
return hidden_vectors
def encode_query(self, query, query_lens):
query = query.permute(2, 0, 1) # seq_len, batch, feature dim
lens, ids = torch.sort(query_lens, descending=True)
original_ids = [0] * lens.size(0)
for i in range(len(ids)):
original_ids[ids[i]] = i
query_encoded = self.run_padded_sequence(ids, original_ids, lens,
query, self.query_lstm)
query_encoded = query_encoded.permute(1, 2, 0)
return query_encoded
def forward(self, queries, keys, query_lens, mask=None, key_lens=None,
keys_encoded=None, attn_prior=None):
"""Attention mechanism for flowtron parallel
Unlike in Flowtron, we have no restrictions such as causality etc,
since we only need this during training.
Args:
queries (torch.tensor): B x C x T1 tensor
(probably going to be mel data)
keys (torch.tensor): B x C2 x T2 tensor (text data)
query_lens: lengths for sorting the queries in descending order
mask (torch.tensor): uint8 binary mask for variable length entries
(should be in the T2 domain)
Output:
attn (torch.tensor): B x 1 x T1 x T2 attention mask.
Final dim T2 should sum to 1
"""
keys_enc = self.key_proj(keys) # B x n_attn_dims x T2
# Beware can only do this since query_dim = attn_dim = n_mel_channels
if self.use_query_proj:
if self.align_query_enc_type == "inv_conv":
queries_enc, log_det_W = self.query_proj(queries)
elif self.align_query_enc_type == "3xconv":
queries_enc = self.query_proj(queries)
log_det_W = 0.0
else:
queries_enc, log_det_W = self.query_proj(queries)
else:
queries_enc, log_det_W = queries, 0.0
# different ways of computing attn,
# one is isotopic gaussians (per phoneme)
# Simplistic Gaussian Isotopic Attention
# B x n_attn_dims x T1 x T2
attn = (queries_enc[:, :, :, None] - keys_enc[:, :, None]) ** 2
# compute log likelihood from a gaussian
attn = -0.0005 * attn.sum(1, keepdim=True)
if attn_prior is not None:
attn = self.log_softmax(attn) + torch.log(attn_prior[:, None]+1e-8)
attn_logprob = attn.clone()
if mask is not None:
attn.data.masked_fill_(mask.permute(0, 2, 1).unsqueeze(2),
-float("inf"))
attn = self.softmax(attn) # Softmax along T2
return attn, attn_logprob
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attention.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import torch
from torch import nn as nn
from fastpitch.model import TemporalPredictor
from fastpitch.transformer_jit import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
reps = torch.round(durations.float() / pace).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
bsz, _, hid = enc_out.size()
reps_padded = torch.cat([reps, (max_len - dec_lens)[:, None]], dim=1)
pad_vec = torch.zeros(bsz, 1, hid, dtype=enc_out.dtype,
device=enc_out.device)
enc_rep = torch.cat([enc_out, pad_vec], dim=1)
enc_rep = torch.repeat_interleave(
enc_rep.view(-1, hid), reps_padded.view(-1), dim=0
).view(bsz, -1, hid)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
class FastPitchJIT(nn.Module):
__constants__ = ['energy_conditioning']
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitchJIT, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
# skip self.attention (used only in training)
def infer(self, inputs, pace: float = 1.0,
dur_tgt: Optional[torch.Tensor] = None,
pitch_tgt: Optional[torch.Tensor] = None,
energy_tgt: Optional[torch.Tensor] = None,
speaker: int = 0):
if self.speaker_emb is None:
spk_emb = None
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, 100.0)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_pred = None
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/model_jit.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numba import jit, prange
@jit(nopython=True)
def mas(attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_j = np.arange(max(0, j-width), j+1)
prev_log = np.array([log_p[i-1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True)
def mas_width1(attn_map):
"""mas with hardcoded width=1"""
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]): # for each text dim
prev_log = log_p[i-1, j]
prev_j = j
if j-1 >= 0 and log_p[i-1, j-1] >= log_p[i-1, j]:
prev_log = log_p[i-1, j-1]
prev_j = j-1
log_p[i, j] = attn_map[i, j] + prev_log
prev_ind[i, j] = prev_j
# now backtrack
curr_text_idx = attn_map.shape[1]-1
for i in range(attn_map.shape[0]-1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
return opt
@jit(nopython=True, parallel=True)
def b_mas(b_attn_map, in_lens, out_lens, width=1):
assert width == 1
attn_out = np.zeros_like(b_attn_map)
for b in prange(b_attn_map.shape[0]):
out = mas_width1(b_attn_map[b, 0, :out_lens[b], :in_lens[b]])
attn_out[b, 0, :out_lens[b], :in_lens[b]] = out
return attn_out
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/alignment.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
def pitch_transform_custom(pitch, pitch_lens):
"""Apply a custom pitch transformation to predicted pitch values.
This sample modification linearly increases the pitch throughout
the utterance from 0.5 of predicted pitch to 1.5 of predicted pitch.
In other words, it starts low and ends high.
PARAMS
------
pitch: torch.Tensor (bs, max_len)
Predicted pitch values for each lexical unit, padded to max_len (in Hz).
pitch_lens: torch.Tensor (bs, max_len)
Number of lexical units in each utterance.
RETURNS
-------
pitch: torch.Tensor
Modified pitch (in Hz).
"""
weights = torch.arange(pitch.size(1), dtype=torch.float32, device=pitch.device)
# The weights increase linearly from 0.0 to 1.0 in every i-th row
# in the range (0, pitch_lens[i])
weights = weights.unsqueeze(0) / pitch_lens.unsqueeze(1)
# Shift the range from (0.0, 1.0) to (0.5, 1.5)
weights += 0.5
return pitch * weights
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/pitch_transform.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.utils import mask_from_lens
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz: Optional[int] = None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseFF(nn.Module):
def __init__(self, d_model, d_inner, dropout, pre_lnorm=False):
super(PositionwiseFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Linear(d_model, d_inner), nn.ReLU(),
nn.Dropout(dropout),
nn.Linear(d_inner, d_model),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = self.CoreNet(self.layer_norm(inp))
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = self.CoreNet(inp)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.dropout = dropout
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask: Optional[torch.Tensor] = None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=-1)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask, -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
# XXX Running TorchScript on 20.02 and 20.03 containers crashes here
# XXX Works well with 20.01-py3 container.
# XXX dirty fix is:
# XXX output = self.layer_norm(residual + attn_out).half()
output = self.layer_norm(residual + attn_out)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, mask):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
dropout, dropatt, dropemb=0.0, embed_input=True,
n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False):
super(FFTransformer, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.padding_idx = padding_idx
self.n_embed = n_embed
self.embed_input = embed_input
if embed_input:
self.word_emb = nn.Embedding(n_embed, d_embed or d_model,
padding_idx=self.padding_idx)
else:
self.word_emb = nn.Identity()
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head, d_model, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
def forward(self, dec_inp, seq_lens: Optional[torch.Tensor] = None,
conditioning: Optional[torch.Tensor] = None):
if not self.embed_input:
inp = dec_inp
assert seq_lens is not None
mask = mask_from_lens(seq_lens).unsqueeze(2)
else:
inp = self.word_emb(dec_inp)
# [bsz x L x 1]
mask = (dec_inp != self.padding_idx).unsqueeze(2)
pos_seq = torch.arange(inp.size(1), device=inp.device, dtype=inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
if conditioning is not None:
out = self.drop(inp + pos_emb + conditioning)
else:
out = self.drop(inp + pos_emb)
for layer in self.layers:
out = layer(out, mask=mask)
# out = self.drop(out)
return out, mask
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/transformer_jit.py |
from .entrypoints import nvidia_fastpitch, nvidia_textprocessing_utils | DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/__init__.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from pathlib import Path
import numpy as np
import torch
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm
from common.text import cmudict
from common.utils import init_distributed, prepare_tmp
from fastpitch.data_function import batch_to_gpu, TTSCollate, TTSDataset
from inference import CHECKPOINT_SPECIFIC_ARGS
from models import load_and_setup_model
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str, default='./',
help='Path to dataset')
general = parser.add_argument_group('general setup')
general.add_argument('--checkpoint-path', type=str, required=True,
help='Checkpoint path to fastpitch model')
general.add_argument('--resume', action='store_true',
help='Load last checkpoint from training')
general.add_argument('--amp', action='store_true',
help='Enable AMP')
general.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
general.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
general.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
data = parser.add_argument_group('dataset parameters')
data.add_argument('--dataset-files', type=str, nargs='*', required=True,
help='Paths to dataset filelists.')
data.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
data.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
data.add_argument('--p-arpabet', type=float, default=0.0,
help='Probability of using arpabets instead of graphemes '
'for each word; set 0 for pure grapheme training')
data.add_argument('--heteronyms-path', type=str, default='data/cmudict/heteronyms',
help='Path to the list of heteronyms')
data.add_argument('--cmudict-path', type=str, default='data/cmudict/cmudict-0.7b',
help='Path to the pronouncing dictionary')
data.add_argument('--prepend-space-to-text', action='store_true',
help='Capture leading silence with a space token')
data.add_argument('--append-space-to-text', action='store_true',
help='Capture trailing silence with a space token')
cond = parser.add_argument_group('data for conditioning')
cond.add_argument('--load-pitch-from-disk', action='store_true',
help='Use pitch cached on disk with prepare_dataset.py')
cond.add_argument('--pitch-online-method', default='pyin', choices=['pyin'],
help='Calculate pitch on the fly during trainig')
cond.add_argument('--pitch-online-dir', type=str, default=None,
help='A directory for storing pitch calculated on-line')
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
dist = parser.add_argument_group('distributed setup')
dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc; do not set manually')
dist.add_argument('--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc; do not set manually')
return parser
def main():
parser = argparse.ArgumentParser(
description='FastPitch spectrogram extraction', allow_abbrev=False)
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
torch.backends.cudnn.benchmark = args.cudnn_benchmark
model, model_config, train_setup = load_and_setup_model(
'FastPitch', parser, args.checkpoint_path, args.amp, unk_args=unk_args,
device=torch.device('cuda' if args.cuda else 'cpu'))
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
# use train_setup loaded from the checkpoint (sampling_rate, symbol_set, etc.)
for k in CHECKPOINT_SPECIFIC_ARGS:
if k in train_setup and getattr(args, k) != train_setup[k]:
v = train_setup[k]
print(f'Overwriting args.{k}={getattr(args, k)} with {v} '
f'from {args.checkpoint_path} checkpoint')
setattr(args, k, v)
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
distributed_run = args.world_size > 1
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
model = DDP(model, device_ids=[args.local_rank],
output_device=args.local_rank, find_unused_parameters=True)
if args.local_rank == 0:
Path(args.output).mkdir(exist_ok=True, parents=True)
prepare_tmp(args.pitch_online_dir)
args.n_speakers = model_config['n_speakers']
args.n_mel_channels = model_config['n_mel_channels']
trainset = TTSDataset(audiopaths_and_text=args.dataset_files,
load_mel_from_disk=False, **vars(args))
dataset_loader = DataLoader(
trainset, num_workers=16, shuffle=False, batch_size=args.batch_size,
sampler=(DistributedSampler(trainset) if distributed_run else None),
pin_memory=True, drop_last=False, collate_fn=TTSCollate())
with torch.no_grad():
for batch in tqdm(dataset_loader, 'Extracting mels'):
x, y, num_frames = batch_to_gpu(batch)
_, _, _, mel_lens, *_, audiopaths = x
with torch.cuda.amp.autocast(enabled=args.amp):
mel_out, *_ = model(x, use_gt_pitch=True)
mel_out = mel_out.transpose(1, 2)
assert mel_out.size(1) == args.n_mel_channels, mel_out.shape
for apath, mel, len_ in zip(audiopaths, mel_out, mel_lens):
np.save(Path(args.output, Path(apath).stem + '.npy'),
mel[:, :len_.item()].cpu().numpy())
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/extract_mels.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import functools
import json
import re
from pathlib import Path
import librosa
import numpy as np
import torch
import torch.nn.functional as F
from scipy import ndimage
from scipy.stats import betabinom
import common.layers as layers
from common.text.text_processing import TextProcessing
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class BetaBinomialInterpolator:
"""Interpolates alignment prior matrices to save computation.
Calculating beta-binomial priors is costly. Instead cache popular sizes
and use img interpolation to get priors faster.
"""
def __init__(self, round_mel_len_to=100, round_text_len_to=20):
self.round_mel_len_to = round_mel_len_to
self.round_text_len_to = round_text_len_to
self.bank = functools.lru_cache(beta_binomial_prior_distribution)
def round(self, val, to):
return max(1, int(np.round((val + 1) / to))) * to
def __call__(self, w, h):
bw = self.round(w, to=self.round_mel_len_to)
bh = self.round(h, to=self.round_text_len_to)
ret = ndimage.zoom(self.bank(bw, bh).T, zoom=(w / bw, h / bh), order=1)
assert ret.shape[0] == w, ret.shape
assert ret.shape[1] == h, ret.shape
return ret
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling=1.0):
P = phoneme_count
M = mel_count
x = np.arange(0, P)
mel_text_probs = []
for i in range(1, M+1):
a, b = scaling * i, scaling * (M + 1 - i)
rv = betabinom(P, a, b)
mel_i_prob = rv.pmf(x)
mel_text_probs.append(mel_i_prob)
return torch.tensor(np.array(mel_text_probs))
def estimate_pitch(wav, mel_len, method='pyin', normalize_mean=None,
normalize_std=None, n_formants=1):
if type(normalize_mean) is float or type(normalize_mean) is list:
normalize_mean = torch.tensor(normalize_mean)
if type(normalize_std) is float or type(normalize_std) is list:
normalize_std = torch.tensor(normalize_std)
if method == 'pyin':
snd, sr = librosa.load(wav)
pitch_mel, voiced_flag, voiced_probs = librosa.pyin(
snd, fmin=librosa.note_to_hz('C2'),
fmax=librosa.note_to_hz('C7'), frame_length=1024)
assert np.abs(mel_len - pitch_mel.shape[0]) <= 1.0
pitch_mel = np.where(np.isnan(pitch_mel), 0.0, pitch_mel)
pitch_mel = torch.from_numpy(pitch_mel).unsqueeze(0)
pitch_mel = F.pad(pitch_mel, (0, mel_len - pitch_mel.size(1)))
if n_formants > 1:
raise NotImplementedError
else:
raise ValueError
pitch_mel = pitch_mel.float()
if normalize_mean is not None:
assert normalize_std is not None
pitch_mel = normalize_pitch(pitch_mel, normalize_mean, normalize_std)
return pitch_mel
def normalize_pitch(pitch, mean, std):
zeros = (pitch == 0.0)
pitch -= mean[:, None]
pitch /= std[:, None]
pitch[zeros] = 0.0
return pitch
class TTSDataset(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self,
dataset_path,
audiopaths_and_text,
text_cleaners,
n_mel_channels,
symbol_set='english_basic',
p_arpabet=1.0,
n_speakers=1,
load_mel_from_disk=True,
load_pitch_from_disk=True,
pitch_mean=214.72203, # LJSpeech defaults
pitch_std=65.72038,
max_wav_value=None,
sampling_rate=None,
filter_length=None,
hop_length=None,
win_length=None,
mel_fmin=None,
mel_fmax=None,
prepend_space_to_text=False,
append_space_to_text=False,
pitch_online_dir=None,
betabinomial_online_dir=None,
use_betabinomial_interpolator=True,
pitch_online_method='pyin',
**ignored):
# Expect a list of filenames
if type(audiopaths_and_text) is str:
audiopaths_and_text = [audiopaths_and_text]
self.dataset_path = dataset_path
self.audiopaths_and_text = load_filepaths_and_text(
dataset_path, audiopaths_and_text,
has_speakers=(n_speakers > 1))
self.load_mel_from_disk = load_mel_from_disk
if not load_mel_from_disk:
self.max_wav_value = max_wav_value
self.sampling_rate = sampling_rate
self.stft = layers.TacotronSTFT(
filter_length, hop_length, win_length,
n_mel_channels, sampling_rate, mel_fmin, mel_fmax)
self.load_pitch_from_disk = load_pitch_from_disk
self.prepend_space_to_text = prepend_space_to_text
self.append_space_to_text = append_space_to_text
assert p_arpabet == 0.0 or p_arpabet == 1.0, (
'Only 0.0 and 1.0 p_arpabet is currently supported. '
'Variable probability breaks caching of betabinomial matrices.')
self.tp = TextProcessing(symbol_set, text_cleaners, p_arpabet=p_arpabet)
self.n_speakers = n_speakers
self.pitch_tmp_dir = pitch_online_dir
self.f0_method = pitch_online_method
self.betabinomial_tmp_dir = betabinomial_online_dir
self.use_betabinomial_interpolator = use_betabinomial_interpolator
if use_betabinomial_interpolator:
self.betabinomial_interpolator = BetaBinomialInterpolator()
expected_columns = (2 + int(load_pitch_from_disk) + (n_speakers > 1))
assert not (load_pitch_from_disk and self.pitch_tmp_dir is not None)
if len(self.audiopaths_and_text[0]) < expected_columns:
raise ValueError(f'Expected {expected_columns} columns in audiopaths file. '
'The format is <mel_or_wav>|[<pitch>|]<text>[|<speaker_id>]')
if len(self.audiopaths_and_text[0]) > expected_columns:
print('WARNING: Audiopaths file has more columns than expected')
to_tensor = lambda x: torch.Tensor([x]) if type(x) is float else x
self.pitch_mean = to_tensor(pitch_mean)
self.pitch_std = to_tensor(pitch_std)
def __getitem__(self, index):
# Separate filename and text
if self.n_speakers > 1:
audiopath, *extra, text, speaker = self.audiopaths_and_text[index]
speaker = int(speaker)
else:
audiopath, *extra, text = self.audiopaths_and_text[index]
speaker = None
pitch = None
mel = self.get_mel(audiopath)
text = self.get_text(text)
energy = torch.norm(mel.float(), dim=0, p=2)
attn_prior = self.get_prior(index, mel.shape[1], text.shape[0])
if self.f0_method is not None:
pitch = self.get_pitch(index, mel.size(-1))
assert pitch.size(-1) == mel.size(-1)
# No higher formants?
if len(pitch.size()) == 1:
pitch = pitch[None, :]
return (text, mel, len(text), pitch, energy, speaker, attn_prior,
audiopath)
def __len__(self):
return len(self.audiopaths_and_text)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm,
requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.load(filename)
# assert melspec.size(0) == self.stft.n_mel_channels, (
# 'Mel dimension mismatch: given {}, expected {}'.format(
# melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text = self.tp.encode_text(text)
space = [self.tp.encode_text("A A")[1]]
if self.prepend_space_to_text:
text = space + text
if self.append_space_to_text:
text = text + space
return torch.LongTensor(text)
def get_prior(self, index, mel_len, text_len):
if self.use_betabinomial_interpolator:
return torch.from_numpy(self.betabinomial_interpolator(mel_len,
text_len))
if self.betabinomial_tmp_dir is not None:
audiopath, *_ = self.audiopaths_and_text[index]
fname = Path(audiopath).relative_to(self.dataset_path)
fname = fname.with_suffix('.pt')
cached_fpath = Path(self.betabinomial_tmp_dir, fname)
if cached_fpath.is_file():
return torch.load(cached_fpath)
attn_prior = beta_binomial_prior_distribution(text_len, mel_len)
if self.betabinomial_tmp_dir is not None:
cached_fpath.parent.mkdir(parents=True, exist_ok=True)
torch.save(attn_prior, cached_fpath)
return attn_prior
def get_pitch(self, index, mel_len=None):
audiopath, *fields = self.audiopaths_and_text[index]
if self.n_speakers > 1:
spk = int(fields[-1])
else:
spk = 0
if self.load_pitch_from_disk:
pitchpath = fields[0]
pitch = torch.load(pitchpath)
if self.pitch_mean is not None:
assert self.pitch_std is not None
pitch = normalize_pitch(pitch, self.pitch_mean, self.pitch_std)
return pitch
if self.pitch_tmp_dir is not None:
fname = Path(audiopath).relative_to(self.dataset_path)
fname_method = fname.with_suffix('.pt')
cached_fpath = Path(self.pitch_tmp_dir, fname_method)
if cached_fpath.is_file():
return torch.load(cached_fpath)
# No luck so far - calculate
wav = audiopath
if not wav.endswith('.wav'):
wav = re.sub('/mels/', '/wavs/', wav)
wav = re.sub('.pt$', '.wav', wav)
pitch_mel = estimate_pitch(wav, mel_len, self.f0_method,
self.pitch_mean, self.pitch_std)
if self.pitch_tmp_dir is not None and not cached_fpath.is_file():
cached_fpath.parent.mkdir(parents=True, exist_ok=True)
torch.save(pitch_mel, cached_fpath)
return pitch_mel
class TTSCollate:
"""Zero-pads model inputs and targets based on number of frames per step"""
def __call__(self, batch):
"""Collate training batch from normalized text and mel-spec"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
# Include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
output_lengths[i] = mel.size(1)
has_pitch = batch[0][3] is not None
if has_pitch:
n_formants = batch[0][3].shape[0]
pitch_padded = torch.zeros(mel_padded.size(0), n_formants,
mel_padded.size(2), dtype=batch[0][3].dtype)
else:
pitch_padded = None
energy_padded = torch.zeros(mel_padded.size(0),
mel_padded.size(2), dtype=mel_padded.dtype)
for i in range(len(ids_sorted_decreasing)):
if has_pitch:
pitch = batch[ids_sorted_decreasing[i]][3]
pitch_padded[i, :, :pitch.shape[1]] = pitch
energy = batch[ids_sorted_decreasing[i]][4]
energy_padded[i, :energy.shape[0]] = energy
if batch[0][5] is not None:
speaker = torch.zeros_like(input_lengths)
for i in range(len(ids_sorted_decreasing)):
speaker[i] = batch[ids_sorted_decreasing[i]][5]
else:
speaker = None
attn_prior_padded = torch.zeros(len(batch), max_target_len,
max_input_len)
attn_prior_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
prior = batch[ids_sorted_decreasing[i]][6]
attn_prior_padded[i, :prior.size(0), :prior.size(1)] = prior
# Count number of items - characters in text
len_x = [x[2] for x in batch]
len_x = torch.Tensor(len_x)
audiopaths = [batch[i][7] for i in ids_sorted_decreasing]
return (text_padded, input_lengths, mel_padded, output_lengths, len_x,
pitch_padded, energy_padded, speaker, attn_prior_padded,
audiopaths)
def batch_to_gpu(batch):
(text_padded, input_lengths, mel_padded, output_lengths, len_x,
pitch_padded, energy_padded, speaker, attn_prior, audiopaths) = batch
text_padded = to_gpu(text_padded).long()
input_lengths = to_gpu(input_lengths).long()
mel_padded = to_gpu(mel_padded).float()
output_lengths = to_gpu(output_lengths).long()
if pitch_padded is not None:
pitch_padded = to_gpu(pitch_padded).float()
energy_padded = to_gpu(energy_padded).float()
attn_prior = to_gpu(attn_prior).float()
if speaker is not None:
speaker = to_gpu(speaker).long()
# Alignments act as both inputs and targets - pass shallow copies
x = [text_padded, input_lengths, mel_padded, output_lengths,
pitch_padded, energy_padded, speaker, attn_prior, audiopaths]
y = [mel_padded, input_lengths, output_lengths]
len_x = torch.sum(output_lengths)
return (x, y, len_x)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/data_function.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.layers import ConvReLUNorm
from common.utils import mask_from_lens
from fastpitch.alignment import b_mas, mas_width1
from fastpitch.attention import ConvAttention
from fastpitch.transformer import FFTransformer
def regulate_len(durations, enc_out, pace: float = 1.0,
mel_max_len: Optional[int] = None):
"""If target=None, then predicted durations are applied"""
dtype = enc_out.dtype
reps = durations.float() / pace
reps = (reps + 0.5).long()
dec_lens = reps.sum(dim=1)
max_len = dec_lens.max()
reps_cumsum = torch.cumsum(F.pad(reps, (1, 0, 0, 0), value=0.0),
dim=1)[:, None, :]
reps_cumsum = reps_cumsum.to(dtype)
range_ = torch.arange(max_len).to(enc_out.device)[None, :, None]
mult = ((reps_cumsum[:, :, :-1] <= range_) &
(reps_cumsum[:, :, 1:] > range_))
mult = mult.to(dtype)
enc_rep = torch.matmul(mult, enc_out)
if mel_max_len is not None:
enc_rep = enc_rep[:, :mel_max_len]
dec_lens = torch.clamp_max(dec_lens, mel_max_len)
return enc_rep, dec_lens
def average_pitch(pitch, durs):
durs_cums_ends = torch.cumsum(durs, dim=1).long()
durs_cums_starts = F.pad(durs_cums_ends[:, :-1], (1, 0))
pitch_nonzero_cums = F.pad(torch.cumsum(pitch != 0.0, dim=2), (1, 0))
pitch_cums = F.pad(torch.cumsum(pitch, dim=2), (1, 0))
bs, l = durs_cums_ends.size()
n_formants = pitch.size(1)
dcs = durs_cums_starts[:, None, :].expand(bs, n_formants, l)
dce = durs_cums_ends[:, None, :].expand(bs, n_formants, l)
pitch_sums = (torch.gather(pitch_cums, 2, dce)
- torch.gather(pitch_cums, 2, dcs)).float()
pitch_nelems = (torch.gather(pitch_nonzero_cums, 2, dce)
- torch.gather(pitch_nonzero_cums, 2, dcs)).float()
pitch_avg = torch.where(pitch_nelems == 0.0, pitch_nelems,
pitch_sums / pitch_nelems)
return pitch_avg
class TemporalPredictor(nn.Module):
"""Predicts a single float per each temporal location"""
def __init__(self, input_size, filter_size, kernel_size, dropout,
n_layers=2, n_predictions=1):
super(TemporalPredictor, self).__init__()
self.layers = nn.Sequential(*[
ConvReLUNorm(input_size if i == 0 else filter_size, filter_size,
kernel_size=kernel_size, dropout=dropout)
for i in range(n_layers)]
)
self.n_predictions = n_predictions
self.fc = nn.Linear(filter_size, self.n_predictions, bias=True)
def forward(self, enc_out, enc_out_mask):
out = enc_out * enc_out_mask
out = self.layers(out.transpose(1, 2)).transpose(1, 2)
out = self.fc(out) * enc_out_mask
return out
class FastPitch(nn.Module):
def __init__(self, n_mel_channels, n_symbols, padding_idx,
symbols_embedding_dim, in_fft_n_layers, in_fft_n_heads,
in_fft_d_head,
in_fft_conv1d_kernel_size, in_fft_conv1d_filter_size,
in_fft_output_size,
p_in_fft_dropout, p_in_fft_dropatt, p_in_fft_dropemb,
out_fft_n_layers, out_fft_n_heads, out_fft_d_head,
out_fft_conv1d_kernel_size, out_fft_conv1d_filter_size,
out_fft_output_size,
p_out_fft_dropout, p_out_fft_dropatt, p_out_fft_dropemb,
dur_predictor_kernel_size, dur_predictor_filter_size,
p_dur_predictor_dropout, dur_predictor_n_layers,
pitch_predictor_kernel_size, pitch_predictor_filter_size,
p_pitch_predictor_dropout, pitch_predictor_n_layers,
pitch_embedding_kernel_size,
energy_conditioning,
energy_predictor_kernel_size, energy_predictor_filter_size,
p_energy_predictor_dropout, energy_predictor_n_layers,
energy_embedding_kernel_size,
n_speakers, speaker_emb_weight, pitch_conditioning_formants=1):
super(FastPitch, self).__init__()
self.encoder = FFTransformer(
n_layer=in_fft_n_layers, n_head=in_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=in_fft_d_head,
d_inner=in_fft_conv1d_filter_size,
kernel_size=in_fft_conv1d_kernel_size,
dropout=p_in_fft_dropout,
dropatt=p_in_fft_dropatt,
dropemb=p_in_fft_dropemb,
embed_input=True,
d_embed=symbols_embedding_dim,
n_embed=n_symbols,
padding_idx=padding_idx)
if n_speakers > 1:
self.speaker_emb = nn.Embedding(n_speakers, symbols_embedding_dim)
else:
self.speaker_emb = None
self.speaker_emb_weight = speaker_emb_weight
self.duration_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=dur_predictor_filter_size,
kernel_size=dur_predictor_kernel_size,
dropout=p_dur_predictor_dropout, n_layers=dur_predictor_n_layers
)
self.decoder = FFTransformer(
n_layer=out_fft_n_layers, n_head=out_fft_n_heads,
d_model=symbols_embedding_dim,
d_head=out_fft_d_head,
d_inner=out_fft_conv1d_filter_size,
kernel_size=out_fft_conv1d_kernel_size,
dropout=p_out_fft_dropout,
dropatt=p_out_fft_dropatt,
dropemb=p_out_fft_dropemb,
embed_input=False,
d_embed=symbols_embedding_dim
)
self.pitch_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=pitch_predictor_filter_size,
kernel_size=pitch_predictor_kernel_size,
dropout=p_pitch_predictor_dropout, n_layers=pitch_predictor_n_layers,
n_predictions=pitch_conditioning_formants
)
self.pitch_emb = nn.Conv1d(
pitch_conditioning_formants, symbols_embedding_dim,
kernel_size=pitch_embedding_kernel_size,
padding=int((pitch_embedding_kernel_size - 1) / 2))
# Store values precomputed for training data within the model
self.register_buffer('pitch_mean', torch.zeros(1))
self.register_buffer('pitch_std', torch.zeros(1))
self.energy_conditioning = energy_conditioning
if energy_conditioning:
self.energy_predictor = TemporalPredictor(
in_fft_output_size,
filter_size=energy_predictor_filter_size,
kernel_size=energy_predictor_kernel_size,
dropout=p_energy_predictor_dropout,
n_layers=energy_predictor_n_layers,
n_predictions=1
)
self.energy_emb = nn.Conv1d(
1, symbols_embedding_dim,
kernel_size=energy_embedding_kernel_size,
padding=int((energy_embedding_kernel_size - 1) / 2))
self.proj = nn.Linear(out_fft_output_size, n_mel_channels, bias=True)
self.attention = ConvAttention(
n_mel_channels, 0, symbols_embedding_dim,
use_query_proj=True, align_query_enc_type='3xconv')
def binarize_attention(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
b_size = attn.shape[0]
with torch.no_grad():
attn_cpu = attn.data.cpu().numpy()
attn_out = torch.zeros_like(attn)
for ind in range(b_size):
hard_attn = mas_width1(
attn_cpu[ind, 0, :out_lens[ind], :in_lens[ind]])
attn_out[ind, 0, :out_lens[ind], :in_lens[ind]] = torch.tensor(
hard_attn, device=attn.get_device())
return attn_out
def binarize_attention_parallel(self, attn, in_lens, out_lens):
"""For training purposes only. Binarizes attention with MAS.
These will no longer recieve a gradient.
Args:
attn: B x 1 x max_mel_len x max_text_len
"""
with torch.no_grad():
attn_cpu = attn.data.cpu().numpy()
attn_out = b_mas(attn_cpu, in_lens.cpu().numpy(),
out_lens.cpu().numpy(), width=1)
return torch.from_numpy(attn_out).to(attn.get_device())
def forward(self, inputs, use_gt_pitch=True, pace=1.0, max_duration=75):
(inputs, input_lens, mel_tgt, mel_lens, pitch_dense, energy_dense,
speaker, attn_prior, audiopaths) = inputs
mel_max_len = mel_tgt.size(2)
# Calculate speaker embedding
if self.speaker_emb is None:
spk_emb = 0
else:
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Alignment
text_emb = self.encoder.word_emb(inputs)
# make sure to do the alignments before folding
attn_mask = mask_from_lens(input_lens)[..., None] == 0
# attn_mask should be 1 for unused timesteps in the text_enc_w_spkvec tensor
attn_soft, attn_logprob = self.attention(
mel_tgt, text_emb.permute(0, 2, 1), mel_lens, attn_mask,
key_lens=input_lens, keys_encoded=enc_out, attn_prior=attn_prior)
attn_hard = self.binarize_attention_parallel(
attn_soft, input_lens, mel_lens)
# Viterbi --> durations
attn_hard_dur = attn_hard.sum(2)[:, 0, :]
dur_tgt = attn_hard_dur
assert torch.all(torch.eq(dur_tgt.sum(dim=1), mel_lens))
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Predict pitch
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
# Average pitch over characters
pitch_tgt = average_pitch(pitch_dense, dur_tgt)
if use_gt_pitch and pitch_tgt is not None:
pitch_emb = self.pitch_emb(pitch_tgt)
else:
pitch_emb = self.pitch_emb(pitch_pred)
enc_out = enc_out + pitch_emb.transpose(1, 2)
# Predict energy
if self.energy_conditioning:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
# Average energy over characters
energy_tgt = average_pitch(energy_dense.unsqueeze(1), dur_tgt)
energy_tgt = torch.log(1.0 + energy_tgt)
energy_emb = self.energy_emb(energy_tgt)
energy_tgt = energy_tgt.squeeze(1)
enc_out = enc_out + energy_emb.transpose(1, 2)
else:
energy_pred = None
energy_tgt = None
len_regulated, dec_lens = regulate_len(
dur_tgt, enc_out, pace, mel_max_len)
# Output FFT
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
return (mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred,
pitch_tgt, energy_pred, energy_tgt, attn_soft, attn_hard,
attn_hard_dur, attn_logprob)
def infer(self, inputs, pace=1.0, dur_tgt=None, pitch_tgt=None,
energy_tgt=None, pitch_transform=None, max_duration=75,
speaker=0):
if self.speaker_emb is None:
spk_emb = 0
else:
speaker = (torch.ones(inputs.size(0)).long().to(inputs.device)
* speaker)
spk_emb = self.speaker_emb(speaker).unsqueeze(1)
spk_emb.mul_(self.speaker_emb_weight)
# Input FFT
enc_out, enc_mask = self.encoder(inputs, conditioning=spk_emb)
# Predict durations
log_dur_pred = self.duration_predictor(enc_out, enc_mask).squeeze(-1)
dur_pred = torch.clamp(torch.exp(log_dur_pred) - 1, 0, max_duration)
# Pitch over chars
pitch_pred = self.pitch_predictor(enc_out, enc_mask).permute(0, 2, 1)
if pitch_transform is not None:
if self.pitch_std[0] == 0.0:
# XXX LJSpeech-1.1 defaults
mean, std = 218.14, 67.24
else:
mean, std = self.pitch_mean[0], self.pitch_std[0]
pitch_pred = pitch_transform(pitch_pred, enc_mask.sum(dim=(1,2)),
mean, std)
if pitch_tgt is None:
pitch_emb = self.pitch_emb(pitch_pred).transpose(1, 2)
else:
pitch_emb = self.pitch_emb(pitch_tgt).transpose(1, 2)
enc_out = enc_out + pitch_emb
# Predict energy
if self.energy_conditioning:
if energy_tgt is None:
energy_pred = self.energy_predictor(enc_out, enc_mask).squeeze(-1)
energy_emb = self.energy_emb(energy_pred.unsqueeze(1)).transpose(1, 2)
else:
energy_emb = self.energy_emb(energy_tgt).transpose(1, 2)
enc_out = enc_out + energy_emb
else:
energy_pred = None
len_regulated, dec_lens = regulate_len(
dur_pred if dur_tgt is None else dur_tgt,
enc_out, pace, mel_max_len=None)
dec_out, dec_mask = self.decoder(len_regulated, dec_lens)
mel_out = self.proj(dec_out)
# mel_lens = dec_mask.squeeze(2).sum(axis=1).long()
mel_out = mel_out.permute(0, 2, 1) # For inference.py
return mel_out, dec_lens, dur_pred, pitch_pred, energy_pred
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/model.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from torch import nn
from common.utils import mask_from_lens
from fastpitch.attn_loss_function import AttentionCTCLoss
class FastPitchLoss(nn.Module):
def __init__(self, dur_predictor_loss_scale=1.0,
pitch_predictor_loss_scale=1.0, attn_loss_scale=1.0,
energy_predictor_loss_scale=0.1):
super(FastPitchLoss, self).__init__()
self.dur_predictor_loss_scale = dur_predictor_loss_scale
self.pitch_predictor_loss_scale = pitch_predictor_loss_scale
self.energy_predictor_loss_scale = energy_predictor_loss_scale
self.attn_loss_scale = attn_loss_scale
self.attn_ctc_loss = AttentionCTCLoss()
def forward(self, model_out, targets, is_training=True, meta_agg='mean'):
(mel_out, dec_mask, dur_pred, log_dur_pred, pitch_pred, pitch_tgt,
energy_pred, energy_tgt, attn_soft, attn_hard, attn_dur,
attn_logprob) = model_out
(mel_tgt, in_lens, out_lens) = targets
dur_tgt = attn_dur
dur_lens = in_lens
mel_tgt.requires_grad = False
# (B,H,T) => (B,T,H)
mel_tgt = mel_tgt.transpose(1, 2)
dur_mask = mask_from_lens(dur_lens, max_len=dur_tgt.size(1))
log_dur_tgt = torch.log(dur_tgt.float() + 1)
loss_fn = F.mse_loss
dur_pred_loss = loss_fn(log_dur_pred, log_dur_tgt, reduction='none')
dur_pred_loss = (dur_pred_loss * dur_mask).sum() / dur_mask.sum()
ldiff = mel_tgt.size(1) - mel_out.size(1)
mel_out = F.pad(mel_out, (0, 0, 0, ldiff, 0, 0), value=0.0)
mel_mask = mel_tgt.ne(0).float()
loss_fn = F.mse_loss
mel_loss = loss_fn(mel_out, mel_tgt, reduction='none')
mel_loss = (mel_loss * mel_mask).sum() / mel_mask.sum()
ldiff = pitch_tgt.size(2) - pitch_pred.size(2)
pitch_pred = F.pad(pitch_pred, (0, ldiff, 0, 0, 0, 0), value=0.0)
pitch_loss = F.mse_loss(pitch_tgt, pitch_pred, reduction='none')
pitch_loss = (pitch_loss * dur_mask.unsqueeze(1)).sum() / dur_mask.sum()
if energy_pred is not None:
energy_pred = F.pad(energy_pred, (0, ldiff, 0, 0), value=0.0)
energy_loss = F.mse_loss(energy_tgt, energy_pred, reduction='none')
energy_loss = (energy_loss * dur_mask).sum() / dur_mask.sum()
else:
energy_loss = 0
# Attention loss
attn_loss = self.attn_ctc_loss(attn_logprob, in_lens, out_lens)
loss = (mel_loss
+ dur_pred_loss * self.dur_predictor_loss_scale
+ pitch_loss * self.pitch_predictor_loss_scale
+ energy_loss * self.energy_predictor_loss_scale
+ attn_loss * self.attn_loss_scale)
meta = {
'loss': loss.clone().detach(),
'mel_loss': mel_loss.clone().detach(),
'duration_predictor_loss': dur_pred_loss.clone().detach(),
'pitch_loss': pitch_loss.clone().detach(),
'attn_loss': attn_loss.clone().detach(),
'dur_error': (torch.abs(dur_pred - dur_tgt).sum()
/ dur_mask.sum()).detach(),
}
if energy_pred is not None:
meta['energy_loss'] = energy_loss.clone().detach()
assert meta_agg in ('sum', 'mean')
if meta_agg == 'sum':
bsz = mel_out.size(0)
meta = {k: v * bsz for k, v in meta.items()}
return loss, meta
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/loss_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def parse_fastpitch_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
io = parser.add_argument_group('io parameters')
io.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
io.add_argument('--max-seq-len', default=2048, type=int,
help='')
symbols = parser.add_argument_group('symbols parameters')
symbols.add_argument('--n-symbols', default=148, type=int,
help='Number of symbols in dictionary')
symbols.add_argument('--padding-idx', default=0, type=int,
help='Index of padding symbol in dictionary')
symbols.add_argument('--symbols-embedding-dim', default=384, type=int,
help='Input embedding dimension')
in_fft = parser.add_argument_group('input FFT parameters')
in_fft.add_argument('--in-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
in_fft.add_argument('--in-fft-n-heads', default=1, type=int,
help='Number of attention heads')
in_fft.add_argument('--in-fft-d-head', default=64, type=int,
help='Dim of attention heads')
in_fft.add_argument('--in-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
in_fft.add_argument('--in-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
in_fft.add_argument('--in-fft-output-size', default=384, type=int,
help='Output dim')
in_fft.add_argument('--p-in-fft-dropout', default=0.1, type=float,
help='Dropout probability')
in_fft.add_argument('--p-in-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
in_fft.add_argument('--p-in-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
out_fft = parser.add_argument_group('output FFT parameters')
out_fft.add_argument('--out-fft-n-layers', default=6, type=int,
help='Number of FFT blocks')
out_fft.add_argument('--out-fft-n-heads', default=1, type=int,
help='Number of attention heads')
out_fft.add_argument('--out-fft-d-head', default=64, type=int,
help='Dim of attention head')
out_fft.add_argument('--out-fft-conv1d-kernel-size', default=3, type=int,
help='Conv-1D kernel size')
out_fft.add_argument('--out-fft-conv1d-filter-size', default=1536, type=int,
help='Conv-1D filter size')
out_fft.add_argument('--out-fft-output-size', default=384, type=int,
help='Output dim')
out_fft.add_argument('--p-out-fft-dropout', default=0.1, type=float,
help='Dropout probability for out_fft')
out_fft.add_argument('--p-out-fft-dropatt', default=0.1, type=float,
help='Multi-head attention dropout')
out_fft.add_argument('--p-out-fft-dropemb', default=0.0, type=float,
help='Dropout added to word+positional embeddings')
dur_pred = parser.add_argument_group('duration predictor parameters')
dur_pred.add_argument('--dur-predictor-kernel-size', default=3, type=int,
help='Duration predictor conv-1D kernel size')
dur_pred.add_argument('--dur-predictor-filter-size', default=256, type=int,
help='Duration predictor conv-1D filter size')
dur_pred.add_argument('--p-dur-predictor-dropout', default=0.1, type=float,
help='Dropout probability for duration predictor')
dur_pred.add_argument('--dur-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
pitch_pred = parser.add_argument_group('pitch predictor parameters')
pitch_pred.add_argument('--pitch-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
pitch_pred.add_argument('--pitch-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
pitch_pred.add_argument('--p-pitch-predictor-dropout', default=0.1, type=float,
help='Pitch probability for pitch predictor')
pitch_pred.add_argument('--pitch-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
energy_pred = parser.add_argument_group('energy predictor parameters')
energy_pred.add_argument('--energy-conditioning', action='store_true')
energy_pred.add_argument('--energy-predictor-kernel-size', default=3, type=int,
help='Pitch predictor conv-1D kernel size')
energy_pred.add_argument('--energy-predictor-filter-size', default=256, type=int,
help='Pitch predictor conv-1D filter size')
energy_pred.add_argument('--p-energy-predictor-dropout', default=0.1, type=float,
help='Pitch probability for energy predictor')
energy_pred.add_argument('--energy-predictor-n-layers', default=2, type=int,
help='Number of conv-1D layers')
cond = parser.add_argument_group('conditioning parameters')
cond.add_argument('--pitch-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--energy-embedding-kernel-size', default=3, type=int,
help='Pitch embedding conv-1D kernel size')
cond.add_argument('--speaker-emb-weight', type=float, default=1.0,
help='Scale speaker embedding')
return parser
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/arg_parser.py |
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from common.utils import mask_from_lens
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.matmul(torch.unsqueeze(pos_seq, -1),
torch.unsqueeze(self.inv_freq, 0))
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1)
if bsz is not None:
return pos_emb[None, :, :].expand(bsz, -1, -1)
else:
return pos_emb[None, :, :]
class PositionwiseConvFF(nn.Module):
def __init__(self, d_model, d_inner, kernel_size, dropout, pre_lnorm=False):
super(PositionwiseConvFF, self).__init__()
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.CoreNet = nn.Sequential(
nn.Conv1d(d_model, d_inner, kernel_size, 1, (kernel_size // 2)),
nn.ReLU(),
# nn.Dropout(dropout), # worse convergence
nn.Conv1d(d_inner, d_model, kernel_size, 1, (kernel_size // 2)),
nn.Dropout(dropout),
)
self.layer_norm = nn.LayerNorm(d_model)
self.pre_lnorm = pre_lnorm
def forward(self, inp):
return self._forward(inp)
def _forward(self, inp):
if self.pre_lnorm:
# layer normalization + positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(self.layer_norm(core_out).to(inp.dtype))
core_out = core_out.transpose(1, 2)
# residual connection
output = core_out + inp
else:
# positionwise feed-forward
core_out = inp.transpose(1, 2)
core_out = self.CoreNet(core_out)
core_out = core_out.transpose(1, 2)
# residual connection + layer normalization
output = self.layer_norm(inp + core_out).to(inp.dtype)
return output
class MultiHeadAttn(nn.Module):
def __init__(self, n_head, d_model, d_head, dropout, dropatt=0.1,
pre_lnorm=False):
super(MultiHeadAttn, self).__init__()
self.n_head = n_head
self.d_model = d_model
self.d_head = d_head
self.scale = 1 / (d_head ** 0.5)
self.pre_lnorm = pre_lnorm
self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head)
self.drop = nn.Dropout(dropout)
self.dropatt = nn.Dropout(dropatt)
self.o_net = nn.Linear(n_head * d_head, d_model, bias=False)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, inp, attn_mask=None):
return self._forward(inp, attn_mask)
def _forward(self, inp, attn_mask=None):
residual = inp
if self.pre_lnorm:
# layer normalization
inp = self.layer_norm(inp)
n_head, d_head = self.n_head, self.d_head
head_q, head_k, head_v = torch.chunk(self.qkv_net(inp), 3, dim=2)
head_q = head_q.view(inp.size(0), inp.size(1), n_head, d_head)
head_k = head_k.view(inp.size(0), inp.size(1), n_head, d_head)
head_v = head_v.view(inp.size(0), inp.size(1), n_head, d_head)
q = head_q.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
k = head_k.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
v = head_v.permute(0, 2, 1, 3).reshape(-1, inp.size(1), d_head)
attn_score = torch.bmm(q, k.transpose(1, 2))
attn_score.mul_(self.scale)
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(1).to(attn_score.dtype)
attn_mask = attn_mask.repeat(n_head, attn_mask.size(2), 1)
attn_score.masked_fill_(attn_mask.to(torch.bool), -float('inf'))
attn_prob = F.softmax(attn_score, dim=2)
attn_prob = self.dropatt(attn_prob)
attn_vec = torch.bmm(attn_prob, v)
attn_vec = attn_vec.view(n_head, inp.size(0), inp.size(1), d_head)
attn_vec = attn_vec.permute(1, 2, 0, 3).contiguous().view(
inp.size(0), inp.size(1), n_head * d_head)
# linear projection
attn_out = self.o_net(attn_vec)
attn_out = self.drop(attn_out)
if self.pre_lnorm:
# residual connection
output = residual + attn_out
else:
# residual connection + layer normalization
output = self.layer_norm(residual + attn_out)
output = output.to(attn_out.dtype)
return output
class TransformerLayer(nn.Module):
def __init__(self, n_head, d_model, d_head, d_inner, kernel_size, dropout,
**kwargs):
super(TransformerLayer, self).__init__()
self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs)
self.pos_ff = PositionwiseConvFF(d_model, d_inner, kernel_size, dropout,
pre_lnorm=kwargs.get('pre_lnorm'))
def forward(self, dec_inp, mask=None):
output = self.dec_attn(dec_inp, attn_mask=~mask.squeeze(2))
output *= mask
output = self.pos_ff(output)
output *= mask
return output
class FFTransformer(nn.Module):
def __init__(self, n_layer, n_head, d_model, d_head, d_inner, kernel_size,
dropout, dropatt, dropemb=0.0, embed_input=True,
n_embed=None, d_embed=None, padding_idx=0, pre_lnorm=False):
super(FFTransformer, self).__init__()
self.d_model = d_model
self.n_head = n_head
self.d_head = d_head
self.padding_idx = padding_idx
if embed_input:
self.word_emb = nn.Embedding(n_embed, d_embed or d_model,
padding_idx=self.padding_idx)
else:
self.word_emb = None
self.pos_emb = PositionalEmbedding(self.d_model)
self.drop = nn.Dropout(dropemb)
self.layers = nn.ModuleList()
for _ in range(n_layer):
self.layers.append(
TransformerLayer(
n_head, d_model, d_head, d_inner, kernel_size, dropout,
dropatt=dropatt, pre_lnorm=pre_lnorm)
)
def forward(self, dec_inp, seq_lens=None, conditioning=0):
if self.word_emb is None:
inp = dec_inp
mask = mask_from_lens(seq_lens).unsqueeze(2)
else:
inp = self.word_emb(dec_inp)
# [bsz x L x 1]
mask = (dec_inp != self.padding_idx).unsqueeze(2)
pos_seq = torch.arange(inp.size(1), device=inp.device).to(inp.dtype)
pos_emb = self.pos_emb(pos_seq) * mask
out = self.drop(inp + pos_emb + conditioning)
for layer in self.layers:
out = layer(out, mask=mask)
# out = self.drop(out)
return out, mask
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/transformer.py |
# *****************************************************************************
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import urllib.request
import torch
import os
import sys
#from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def checkpoint_from_distributed(state_dict):
"""
Checks whether checkpoint was generated by DistributedDataParallel. DDP
wraps model in additional "module.", it needs to be unwrapped for single
GPU inference.
:param state_dict: model's state dict
"""
ret = False
for key, _ in state_dict.items():
if key.find('module.') != -1:
ret = True
break
return ret
# from https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/SpeechSynthesis/Tacotron2/inference.py
def unwrap_distributed(state_dict):
"""
Unwraps model from DistributedDataParallel.
DDP wraps model in additional "module.", it needs to be removed for single
GPU inference.
:param state_dict: model's state dict
"""
new_state_dict = {}
for key, value in state_dict.items():
new_key = key.replace('module.1.', '')
new_key = new_key.replace('module.', '')
new_state_dict[new_key] = value
return new_state_dict
def _download_checkpoint(checkpoint, force_reload):
model_dir = os.path.join(torch.hub._get_torch_home(), 'checkpoints')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
ckpt_file = os.path.join(model_dir, os.path.basename(checkpoint))
if not os.path.exists(ckpt_file) or force_reload:
sys.stderr.write('Downloading checkpoint from {}\n'.format(checkpoint))
urllib.request.urlretrieve(checkpoint, ckpt_file)
return ckpt_file
def nvidia_fastpitch(pretrained=True, **kwargs):
"""TODO
"""
from fastpitch import model as fastpitch
force_reload = "force_reload" in kwargs and kwargs["force_reload"]
fp16 = "model_math" in kwargs and kwargs["model_math"] == "fp16"
if pretrained:
checkpoint = 'https://api.ngc.nvidia.com/v2/models/nvidia/dle/fastpitch__pyt_ckpt/versions/21.12.1_amp/files/nvidia_fastpitch_210824+cfg.pt'
ckpt_file = _download_checkpoint(checkpoint, force_reload)
ckpt = torch.load(ckpt_file)
state_dict = ckpt['state_dict']
if checkpoint_from_distributed(state_dict):
state_dict = unwrap_distributed(state_dict)
config = ckpt['config']
train_setup = ckpt.get('train_setup', {})
else:
config = {'n_mel_channels': 80, 'n_symbols': 148, 'padding_idx': 0, 'symbols_embedding_dim': 384,
'in_fft_n_layers': 6, 'in_fft_n_heads': 1, 'in_fft_d_head': 64, 'in_fft_conv1d_kernel_size': 3,
'in_fft_conv1d_filter_size': 1536, 'in_fft_output_size': 384, 'p_in_fft_dropout': 0.1,
'p_in_fft_dropatt': 0.1, 'p_in_fft_dropemb': 0.0, 'out_fft_n_layers': 6, 'out_fft_n_heads': 1,
'out_fft_d_head': 64, 'out_fft_conv1d_kernel_size': 3, 'out_fft_conv1d_filter_size': 1536,
'out_fft_output_size': 384, 'p_out_fft_dropout': 0.1, 'p_out_fft_dropatt': 0.1, 'p_out_fft_dropemb': 0.0,
'dur_predictor_kernel_size': 3, 'dur_predictor_filter_size': 256, 'p_dur_predictor_dropout': 0.1,
'dur_predictor_n_layers': 2, 'pitch_predictor_kernel_size': 3, 'pitch_predictor_filter_size': 256,
'p_pitch_predictor_dropout': 0.1, 'pitch_predictor_n_layers': 2, 'pitch_embedding_kernel_size': 3,
'n_speakers': 1, 'speaker_emb_weight': 1.0, 'energy_predictor_kernel_size': 3,
'energy_predictor_filter_size': 256, 'p_energy_predictor_dropout': 0.1, 'energy_predictor_n_layers': 2,
'energy_conditioning': True, 'energy_embedding_kernel_size': 3}
for k,v in kwargs.items():
if k in config.keys():
config[k] = v
train_setup = {}
model = fastpitch.FastPitch(**config)
if pretrained:
model.load_state_dict(state_dict)
if fp16:
model.half()
model.forward = model.infer
return model, train_setup
def nvidia_textprocessing_utils(cmudict_path, heteronyms_path, **kwargs):
from common.text.text_processing import TextProcessing
import numpy as np
from torch.nn.utils.rnn import pad_sequence
from common.text import cmudict
class TextPreProcessing:
@staticmethod
def prepare_input_sequence(texts, batch_size=1, device='cpu'):
cmudict.initialize(cmudict_path, heteronyms_path)
tp = TextProcessing(symbol_set='english_basic', cleaner_names=['english_cleaners_v2'], p_arpabet=1.0)
fields={}
fields['text'] = [torch.LongTensor(tp.encode_text(text))
for text in texts]
order = np.argsort([-t.size(0) for t in fields['text']])
fields['text'] = [fields['text'][i] for i in order]
fields['text_lens'] = torch.LongTensor([t.size(0) for t in fields['text']])
for t in fields['text']:
print(tp.sequence_to_text(t.numpy()))
# cut into batches & pad
batches = []
for b in range(0, len(order), batch_size):
batch = {f: values[b:b+batch_size] for f, values in fields.items()}
for f in batch:
if f == 'text':
batch[f] = pad_sequence(batch[f], batch_first=True)
if type(batch[f]) is torch.Tensor:
batch[f] = batch[f].to(device)
batches.append(batch)
return batches
return TextPreProcessing()
# # from tacotron2.text import text_to_sequence
# @staticmethod
# def pad_sequences(batch):
# # Right zero-pad all one-hot text sequences to max input length
# input_lengths, ids_sorted_decreasing = torch.sort(
# torch.LongTensor([len(x) for x in batch]),
# dim=0, descending=True)
# max_input_len = input_lengths[0]
# text_padded = torch.LongTensor(len(batch), max_input_len)
# text_padded.zero_()
# for i in range(len(ids_sorted_decreasing)):
# text = batch[ids_sorted_decreasing[i]]
# text_padded[i, :text.size(0)] = text
# return text_padded, input_lengths
# @staticmethod
# def prepare_input_sequence(texts, cpu_run=False):
# d = []
# # for i,text in enumerate(texts):
# # d.append(torch.IntTensor(
# # Processing.text_to_sequence(text, ['english_cleaners'])[:]))
# text_padded, input_lengths = Processing.pad_sequences(d)
# if not cpu_run:
# text_padded = text_padded.cuda().long()
# input_lengths = input_lengths.cuda().long()
# else:
# text_padded = text_padded.long()
# input_lengths = input_lengths.long()
# return text_padded, input_lengths
# return Processing()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/entrypoints.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionCTCLoss(torch.nn.Module):
def __init__(self, blank_logprob=-1):
super(AttentionCTCLoss, self).__init__()
self.log_softmax = torch.nn.LogSoftmax(dim=3)
self.blank_logprob = blank_logprob
self.CTCLoss = nn.CTCLoss(zero_infinity=True)
def forward(self, attn_logprob, in_lens, out_lens):
key_lens = in_lens
query_lens = out_lens
attn_logprob_padded = F.pad(input=attn_logprob,
pad=(1, 0, 0, 0, 0, 0, 0, 0),
value=self.blank_logprob)
cost_total = 0.0
for bid in range(attn_logprob.shape[0]):
target_seq = torch.arange(1, key_lens[bid]+1).unsqueeze(0)
curr_logprob = attn_logprob_padded[bid].permute(1, 0, 2)
curr_logprob = curr_logprob[:query_lens[bid], :, :key_lens[bid]+1]
curr_logprob = self.log_softmax(curr_logprob[None])[0]
ctc_cost = self.CTCLoss(
curr_logprob, target_seq, input_lengths=query_lens[bid:bid+1],
target_lengths=key_lens[bid:bid+1])
cost_total += ctc_cost
cost = cost_total/attn_logprob.shape[0]
return cost
class AttentionBinarizationLoss(torch.nn.Module):
def __init__(self):
super(AttentionBinarizationLoss, self).__init__()
def forward(self, hard_attention, soft_attention, eps=1e-12):
log_sum = torch.log(torch.clamp(soft_attention[hard_attention == 1],
min=eps)).sum()
return -log_sum / hard_attention.sum()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/fastpitch/attn_loss_function.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
import glob
import re
from pathlib import Path
import numpy as np
import torch
from torch.utils.tensorboard import SummaryWriter
import dllogger
from common.utils import plot_spectrogram
tb_loggers = {}
class TBLogger:
"""
xyz_dummies: stretch the screen with empty plots so the legend would
always fit for other plots
"""
def __init__(self, enabled, log_dir, name, interval=1, dummies=True):
self.enabled = enabled
self.interval = interval
self.cache = {}
if self.enabled:
self.summary_writer = SummaryWriter(
log_dir=Path(log_dir, name), flush_secs=120, max_queue=200)
atexit.register(self.summary_writer.close)
if dummies:
for key in ('_', '✕'):
self.summary_writer.add_scalar(key, 0.0, 1)
def log(self, step, data):
for k, v in data.items():
self.log_value(step, k, v.item() if type(v) is torch.Tensor else v)
def log_value(self, step, key, val, stat='mean'):
if self.enabled:
if key not in self.cache:
self.cache[key] = []
self.cache[key].append(val)
if len(self.cache[key]) == self.interval:
agg_val = getattr(np, stat)(self.cache[key])
self.summary_writer.add_scalar(key, agg_val, step)
del self.cache[key]
def log_grads(self, step, model):
if self.enabled:
norms = [p.grad.norm().item() for p in model.parameters()
if p.grad is not None]
for stat in ('max', 'min', 'mean'):
self.log_value(step, f'grad_{stat}', getattr(np, stat)(norms),
stat=stat)
def log_samples(self, step, sample_ind, audio, spec, rate):
if self.enabled:
log_prefix = 'gt/y' if step == 0 else 'generated/y_hat'
self.summary_writer.add_audio(
f'{log_prefix}_{sample_ind}', audio[0], step, rate)
self.summary_writer.add_figure(
f'{log_prefix}_spec_{sample_ind}',
plot_spectrogram(spec[0].cpu().numpy()),
step)
def unique_log_fpath(fpath):
"""Have a unique log filename for every separate run"""
log_num = max([0] + [int(re.search("\.(\d+)", Path(f).suffix).group(1))
for f in glob.glob(f"{fpath}.*")])
return f"{fpath}.{log_num + 1}"
def stdout_step_format(step):
if isinstance(step, str):
return step
fields = []
if len(step) > 0:
fields.append("epoch {:>4}".format(step[0]))
if len(step) > 1:
fields.append("iter {:>3}".format(step[1]))
if len(step) > 2:
fields[-1] += "/{}".format(step[2])
return " | ".join(fields)
def stdout_metric_format(metric, metadata, value):
name = metadata.get("name", metric + " : ")
unit = metadata.get("unit", None)
format = f'{{{metadata.get("format", "")}}}'
fields = [name, format.format(value) if value is not None else value, unit]
fields = [f for f in fields if f is not None]
return "| " + " ".join(fields)
def log(when, metrics={}, scope='train', flush_log=False, tb_iter=None):
dllogger.log(when, data=metrics.get_metrics(scope, 'dll'))
if tb_iter is not None:
tb_loggers[scope].log(tb_iter, metrics.get_metrics(scope, 'tb'))
if flush_log:
flush()
def log_grads_tb(tb_total_steps, grads, tb_subset='train'):
tb_loggers[tb_subset].log_grads(tb_total_steps, grads)
def log_samples_tb(tb_total_steps, sample_i, y, y_spec, rate, tb_subset='val',):
tb_loggers[tb_subset].log_samples(tb_total_steps, sample_i, y, y_spec, rate)
def parameters(data, verbosity=0, tb_subset=None):
for k, v in data.items():
dllogger.log(step="PARAMETER", data={k: v}, verbosity=verbosity)
if tb_subset is not None and tb_loggers[tb_subset].enabled:
tb_data = {k: v for k, v in data.items()
if type(v) in (str, bool, int, float)}
tb_loggers[tb_subset].summary_writer.add_hparams(tb_data, {})
def flush():
dllogger.flush()
for tbl in tb_loggers.values():
if tbl.enabled:
tbl.summary_writer.flush()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/tb_dllogger.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from csv import QUOTE_NONE
from pathlib import Path
import pandas as pd
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--metadata-path', type=str,
default='./metadata.csv',
help='Path to LJSpeech dataset metadata')
parser.add_argument('--filelists-path', default='data/filelists', type=str,
help='Directory to generate filelists to')
parser.add_argument('--log-file', type=str, default='split_log.json',
help='Filename for logging')
parser.add_argument('--subsets', type=str, nargs='+',
choices=['all', 'train', 'val', 'test'],
default=['all', 'train', 'val'],
help='Subsets to generate')
parser.add_argument('--add-transcript', action='store_true',
help='Add text columns to filelists')
parser.add_argument('--add-pitch', action='store_true',
help='Add pitch columns to filelists')
parser.add_argument('-v', '--verbose', action='store_true',
help='Increase verbosity')
parser.add_argument('--dry-run', action='store_true',
help='Do not create actual files')
return parser
def generate_filelist(subset, meta, base_fname, cols_to_dump, args):
subset_suffix = f'_{subset}' if subset != 'all' else ''
fpath = Path(args.filelists_path) / f'{base_fname}{subset_suffix}.txt'
if subset == 'all':
subset_meta = meta[meta.index.map(
lambda fname: fname not in discard_train_ids_v3)]
elif subset == 'val':
subset_meta = meta[meta.index.map(lambda fname: fname in val_ids)]
elif subset == 'test':
subset_meta = meta[meta.index.map(lambda fname: fname in test_ids)]
elif subset == 'train':
subset_meta = meta[meta.index.map(
lambda fname: (fname not in val_ids and fname not in test_ids
and fname not in discard_train_ids_v3)
)]
else:
raise ValueError(f'Unknown subset: {subset}')
print(f'Writing {len(subset_meta)} rows to {fpath}')
if args.verbose:
print(subset_meta.reset_index()[cols_to_dump].head())
if not args.dry_run:
fpath.parent.mkdir(parents=True, exist_ok=True)
subset_meta.to_csv(fpath, sep='|', header=None, quoting=QUOTE_NONE,
index=None, columns=cols_to_dump)
def main():
parser = argparse.ArgumentParser(
description='LJSpeech filelists generation')
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
meta = pd.read_csv(args.metadata_path, sep='|', index_col='fname',
names=['fname', 'txt', 'norm_txt'], quoting=QUOTE_NONE)
meta['wav_fname'] = meta.index.map(lambda stem: f'wavs/{stem}.wav')
meta['pitch_fname'] = meta.index.map(lambda stem: f'pitch/{stem}.pt')
cols_to_dump = ['wav_fname']
filelist_base_name = 'ljs_audio'
if args.add_pitch:
cols_to_dump.append('pitch_fname')
filelist_base_name += '_pitch'
if args.add_transcript:
cols_to_dump.append('norm_txt')
filelist_base_name += '_text'
# Fix incrrect transcripts
if args.add_transcript:
for lj_id, txt in corrected_transcripts.items():
print('Trying to correct', lj_id)
meta.loc[meta['wav_fname'].str.contains(lj_id), 'norm_txt'] = txt
for subset in args.subsets:
generate_filelist(subset, meta, filelist_base_name, cols_to_dump, args)
corrected_transcripts = {
'LJ031-0175': "O'Donnell tried to persuade Mrs. Kennedy to leave the area, but she refused. She said that she intended to stay with her husband.",
'LJ034-0138': "they saw and heard Brennan describing what he had seen. Norman stated, quote,",
}
# ASR-recognized French words that could hinder generalization
discard_train_ids_v3 = {
'LJ011-0058', 'LJ012-0205', 'LJ016-0257', 'LJ018-0396',
}
val_ids = {
'LJ001-0110', 'LJ002-0018', 'LJ002-0043', 'LJ003-0111', 'LJ003-0345',
'LJ004-0045', 'LJ004-0096', 'LJ004-0152', 'LJ005-0014', 'LJ005-0079',
'LJ005-0201', 'LJ007-0154', 'LJ008-0111', 'LJ008-0258', 'LJ008-0278',
'LJ008-0294', 'LJ008-0307', 'LJ009-0076', 'LJ009-0114', 'LJ009-0238',
'LJ011-0096', 'LJ012-0035', 'LJ012-0042', 'LJ012-0161', 'LJ012-0235',
'LJ012-0250', 'LJ013-0164', 'LJ014-0010', 'LJ014-0020', 'LJ014-0030',
'LJ014-0076', 'LJ014-0110', 'LJ014-0263', 'LJ015-0203', 'LJ016-0020',
'LJ016-0138', 'LJ016-0179', 'LJ016-0288', 'LJ016-0318', 'LJ017-0044',
'LJ017-0070', 'LJ017-0131', 'LJ018-0081', 'LJ018-0098', 'LJ018-0239',
'LJ019-0186', 'LJ019-0257', 'LJ019-0273', 'LJ021-0066', 'LJ021-0145',
'LJ022-0023', 'LJ024-0083', 'LJ026-0068', 'LJ027-0052', 'LJ027-0141',
'LJ028-0008', 'LJ028-0081', 'LJ028-0093', 'LJ028-0275', 'LJ028-0307',
'LJ028-0335', 'LJ028-0506', 'LJ029-0022', 'LJ029-0032', 'LJ031-0038',
'LJ031-0070', 'LJ031-0134', 'LJ031-0202', 'LJ033-0047', 'LJ034-0042',
'LJ034-0053', 'LJ034-0160', 'LJ034-0198', 'LJ035-0019', 'LJ035-0129',
'LJ036-0077', 'LJ036-0103', 'LJ036-0174', 'LJ037-0234', 'LJ038-0199',
'LJ039-0075', 'LJ039-0223', 'LJ040-0002', 'LJ040-0027', 'LJ042-0096',
'LJ042-0129', 'LJ043-0002', 'LJ043-0030', 'LJ045-0140', 'LJ045-0230',
'LJ046-0058', 'LJ046-0146', 'LJ046-0184', 'LJ047-0044', 'LJ047-0148',
'LJ048-0194', 'LJ048-0228', 'LJ049-0026', 'LJ049-0050', 'LJ050-0168'
}
test_ids = {
'LJ001-0015', 'LJ001-0051', 'LJ001-0063', 'LJ001-0072', 'LJ001-0079',
'LJ001-0094', 'LJ001-0096', 'LJ001-0102', 'LJ001-0153', 'LJ001-0173',
'LJ001-0186', 'LJ002-0096', 'LJ002-0105', 'LJ002-0106', 'LJ002-0171',
'LJ002-0174', 'LJ002-0220', 'LJ002-0225', 'LJ002-0253', 'LJ002-0260',
'LJ002-0261', 'LJ002-0289', 'LJ002-0298', 'LJ002-0299', 'LJ003-0011',
'LJ003-0088', 'LJ003-0107', 'LJ003-0140', 'LJ003-0159', 'LJ003-0211',
'LJ003-0230', 'LJ003-0282', 'LJ003-0299', 'LJ003-0319', 'LJ003-0324',
'LJ004-0009', 'LJ004-0024', 'LJ004-0036', 'LJ004-0077', 'LJ004-0083',
'LJ004-0239', 'LJ005-0019', 'LJ005-0072', 'LJ005-0086', 'LJ005-0099',
'LJ005-0248', 'LJ005-0252', 'LJ005-0253', 'LJ005-0257', 'LJ005-0264',
'LJ005-0265', 'LJ005-0294', 'LJ006-0021', 'LJ006-0040', 'LJ006-0043',
'LJ006-0044', 'LJ006-0082', 'LJ006-0084', 'LJ006-0088', 'LJ006-0137',
'LJ006-0149', 'LJ006-0202', 'LJ006-0268', 'LJ007-0071', 'LJ007-0075',
'LJ007-0076', 'LJ007-0085', 'LJ007-0090', 'LJ007-0112', 'LJ007-0125',
'LJ007-0130', 'LJ007-0150', 'LJ007-0158', 'LJ007-0170', 'LJ007-0233',
'LJ008-0054', 'LJ008-0085', 'LJ008-0098', 'LJ008-0121', 'LJ008-0181',
'LJ008-0182', 'LJ008-0206', 'LJ008-0215', 'LJ008-0228', 'LJ008-0266',
'LJ009-0037', 'LJ009-0041', 'LJ009-0061', 'LJ009-0074', 'LJ009-0084',
'LJ009-0106', 'LJ009-0124', 'LJ009-0126', 'LJ009-0172', 'LJ009-0184',
'LJ009-0192', 'LJ009-0194', 'LJ009-0276', 'LJ009-0280', 'LJ009-0286',
'LJ010-0027', 'LJ010-0030', 'LJ010-0038', 'LJ010-0062', 'LJ010-0065',
'LJ010-0083', 'LJ010-0157', 'LJ010-0158', 'LJ010-0219', 'LJ010-0228',
'LJ010-0257', 'LJ010-0281', 'LJ010-0297', 'LJ011-0041', 'LJ011-0048',
'LJ011-0118', 'LJ011-0141', 'LJ011-0245', 'LJ012-0015', 'LJ012-0021',
'LJ012-0049', 'LJ012-0054', 'LJ012-0067', 'LJ012-0188', 'LJ012-0189',
'LJ012-0194', 'LJ012-0219', 'LJ012-0230', 'LJ012-0257', 'LJ012-0271',
'LJ013-0005', 'LJ013-0045', 'LJ013-0055', 'LJ013-0091', 'LJ013-0098',
'LJ013-0104', 'LJ013-0109', 'LJ013-0213', 'LJ014-0029', 'LJ014-0094',
'LJ014-0121', 'LJ014-0128', 'LJ014-0142', 'LJ014-0146', 'LJ014-0171',
'LJ014-0186', 'LJ014-0194', 'LJ014-0199', 'LJ014-0224', 'LJ014-0233',
'LJ014-0265', 'LJ014-0306', 'LJ014-0326', 'LJ015-0001', 'LJ015-0005',
'LJ015-0007', 'LJ015-0025', 'LJ015-0027', 'LJ015-0036', 'LJ015-0043',
'LJ015-0052', 'LJ015-0144', 'LJ015-0194', 'LJ015-0218', 'LJ015-0231',
'LJ015-0266', 'LJ015-0289', 'LJ015-0308', 'LJ016-0007', 'LJ016-0049',
'LJ016-0054', 'LJ016-0077', 'LJ016-0089', 'LJ016-0117', 'LJ016-0125',
'LJ016-0137', 'LJ016-0192', 'LJ016-0205', 'LJ016-0233', 'LJ016-0238',
'LJ016-0241', 'LJ016-0248', 'LJ016-0264', 'LJ016-0274', 'LJ016-0277',
'LJ016-0283', 'LJ016-0314', 'LJ016-0347', 'LJ016-0367', 'LJ016-0380',
'LJ016-0417', 'LJ016-0426', 'LJ017-0035', 'LJ017-0050', 'LJ017-0059',
'LJ017-0102', 'LJ017-0108', 'LJ017-0133', 'LJ017-0134', 'LJ017-0164',
'LJ017-0183', 'LJ017-0189', 'LJ017-0190', 'LJ017-0226', 'LJ017-0231',
'LJ018-0031', 'LJ018-0129', 'LJ018-0130', 'LJ018-0159', 'LJ018-0206',
'LJ018-0211', 'LJ018-0215', 'LJ018-0218', 'LJ018-0231', 'LJ018-0244',
'LJ018-0262', 'LJ018-0276', 'LJ018-0278', 'LJ018-0288', 'LJ018-0309',
'LJ018-0349', 'LJ018-0354', 'LJ019-0042', 'LJ019-0052', 'LJ019-0055',
'LJ019-0129', 'LJ019-0145', 'LJ019-0161', 'LJ019-0169', 'LJ019-0179',
'LJ019-0180', 'LJ019-0201', 'LJ019-0202', 'LJ019-0221', 'LJ019-0241',
'LJ019-0248', 'LJ019-0270', 'LJ019-0289', 'LJ019-0317', 'LJ019-0318',
'LJ019-0335', 'LJ019-0344', 'LJ019-0348', 'LJ019-0355', 'LJ019-0368',
'LJ019-0371', 'LJ020-0085', 'LJ020-0092', 'LJ020-0093', 'LJ021-0012',
'LJ021-0025', 'LJ021-0026', 'LJ021-0040', 'LJ021-0078', 'LJ021-0091',
'LJ021-0110', 'LJ021-0115', 'LJ021-0139', 'LJ021-0140', 'LJ023-0016',
'LJ023-0033', 'LJ023-0047', 'LJ023-0056', 'LJ023-0089', 'LJ023-0122',
'LJ024-0018', 'LJ024-0019', 'LJ024-0034', 'LJ024-0054', 'LJ024-0102',
'LJ025-0081', 'LJ025-0098', 'LJ025-0118', 'LJ025-0129', 'LJ025-0157',
'LJ026-0034', 'LJ026-0052', 'LJ026-0054', 'LJ026-0108', 'LJ026-0140',
'LJ026-0148', 'LJ027-0006', 'LJ027-0176', 'LJ027-0178', 'LJ028-0023',
'LJ028-0136', 'LJ028-0138', 'LJ028-0145', 'LJ028-0168', 'LJ028-0212',
'LJ028-0226', 'LJ028-0278', 'LJ028-0289', 'LJ028-0340', 'LJ028-0349',
'LJ028-0357', 'LJ028-0410', 'LJ028-0416', 'LJ028-0421', 'LJ028-0459',
'LJ028-0462', 'LJ028-0494', 'LJ028-0502', 'LJ029-0004', 'LJ029-0052',
'LJ029-0060', 'LJ029-0096', 'LJ029-0114', 'LJ029-0197', 'LJ030-0006',
'LJ030-0014', 'LJ030-0021', 'LJ030-0032', 'LJ030-0035', 'LJ030-0063',
'LJ030-0084', 'LJ030-0105', 'LJ030-0125', 'LJ030-0162', 'LJ030-0196',
'LJ030-0197', 'LJ030-0238', 'LJ031-0008', 'LJ031-0014', 'LJ031-0041',
'LJ031-0058', 'LJ031-0109', 'LJ031-0122', 'LJ031-0165', 'LJ031-0185',
'LJ031-0189', 'LJ032-0012', 'LJ032-0025', 'LJ032-0027', 'LJ032-0045',
'LJ032-0085', 'LJ032-0103', 'LJ032-0164', 'LJ032-0180', 'LJ032-0204',
'LJ032-0206', 'LJ032-0261', 'LJ033-0042', 'LJ033-0055', 'LJ033-0056',
'LJ033-0072', 'LJ033-0093', 'LJ033-0120', 'LJ033-0152', 'LJ033-0159',
'LJ033-0174', 'LJ033-0183', 'LJ033-0205', 'LJ034-0035', 'LJ034-0041',
'LJ034-0072', 'LJ034-0097', 'LJ034-0117', 'LJ034-0123', 'LJ034-0134',
'LJ034-0166', 'LJ034-0197', 'LJ035-0014', 'LJ035-0082', 'LJ035-0155',
'LJ035-0164', 'LJ036-0067', 'LJ036-0104', 'LJ036-0169', 'LJ037-0001',
'LJ037-0002', 'LJ037-0007', 'LJ037-0053', 'LJ037-0061', 'LJ037-0081',
'LJ037-0208', 'LJ037-0248', 'LJ037-0249', 'LJ037-0252', 'LJ038-0035',
'LJ038-0047', 'LJ038-0264', 'LJ039-0027', 'LJ039-0059', 'LJ039-0076',
'LJ039-0088', 'LJ039-0096', 'LJ039-0118', 'LJ039-0125', 'LJ039-0139',
'LJ039-0148', 'LJ039-0154', 'LJ039-0192', 'LJ039-0207', 'LJ039-0227',
'LJ040-0018', 'LJ040-0052', 'LJ040-0097', 'LJ040-0110', 'LJ040-0176',
'LJ040-0201', 'LJ041-0022', 'LJ041-0070', 'LJ041-0195', 'LJ041-0199',
'LJ042-0097', 'LJ042-0130', 'LJ042-0133', 'LJ042-0135', 'LJ042-0194',
'LJ042-0198', 'LJ042-0219', 'LJ042-0221', 'LJ042-0230', 'LJ043-0010',
'LJ043-0016', 'LJ043-0047', 'LJ043-0107', 'LJ043-0140', 'LJ043-0188',
'LJ044-0004', 'LJ044-0013', 'LJ044-0047', 'LJ044-0105', 'LJ044-0125',
'LJ044-0135', 'LJ044-0137', 'LJ044-0139', 'LJ044-0158', 'LJ044-0224',
'LJ044-0237', 'LJ045-0015', 'LJ045-0033', 'LJ045-0045', 'LJ045-0082',
'LJ045-0090', 'LJ045-0092', 'LJ045-0096', 'LJ045-0177', 'LJ045-0178',
'LJ045-0190', 'LJ045-0194', 'LJ045-0216', 'LJ045-0228', 'LJ045-0234',
'LJ046-0016', 'LJ046-0033', 'LJ046-0055', 'LJ046-0092', 'LJ046-0105',
'LJ046-0111', 'LJ046-0113', 'LJ046-0179', 'LJ046-0191', 'LJ046-0226',
'LJ047-0015', 'LJ047-0022', 'LJ047-0049', 'LJ047-0056', 'LJ047-0073',
'LJ047-0075', 'LJ047-0093', 'LJ047-0097', 'LJ047-0126', 'LJ047-0158',
'LJ047-0197', 'LJ047-0202', 'LJ047-0240', 'LJ048-0033', 'LJ048-0053',
'LJ048-0069', 'LJ048-0112', 'LJ048-0143', 'LJ048-0197', 'LJ048-0200',
'LJ048-0222', 'LJ048-0252', 'LJ048-0288', 'LJ048-0289', 'LJ049-0022',
'LJ049-0115', 'LJ049-0154', 'LJ049-0196', 'LJ049-0202', 'LJ050-0004',
'LJ050-0022', 'LJ050-0029', 'LJ050-0031', 'LJ050-0056', 'LJ050-0069',
'LJ050-0084', 'LJ050-0090', 'LJ050-0118', 'LJ050-0137', 'LJ050-0161',
'LJ050-0162', 'LJ050-0188', 'LJ050-0209', 'LJ050-0223', 'LJ050-0235'
}
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/split_lj.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import math
import os
import pathlib
import re
import pynvml
pynvml.nvmlInit()
def systemGetDriverVersion():
return pynvml.nvmlSystemGetDriverVersion()
def deviceGetCount():
return pynvml.nvmlDeviceGetCount()
class device:
# assume nvml returns list of 64 bit ints
_nvml_affinity_elements = math.ceil(os.cpu_count() / 64)
def __init__(self, device_idx):
super().__init__()
self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx)
def getName(self):
return pynvml.nvmlDeviceGetName(self.handle)
def getCpuAffinity(self):
affinity_string = ''
for j in pynvml.nvmlDeviceGetCpuAffinity(
self.handle, device._nvml_affinity_elements
):
# assume nvml returns list of 64 bit ints
affinity_string = '{:064b}'.format(j) + affinity_string
affinity_list = [int(x) for x in affinity_string]
affinity_list.reverse() # so core 0 is in 0th element of list
ret = [i for i, e in enumerate(affinity_list) if e != 0]
return ret
def set_socket_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity)
def set_single_affinity(gpu_id):
dev = device(gpu_id)
affinity = dev.getCpuAffinity()
os.sched_setaffinity(0, affinity[:1])
def set_single_unique_affinity(gpu_id, nproc_per_node):
devices = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in devices]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
affinities = []
assigned = []
for socket_affinity in socket_affinities:
for core in socket_affinity:
if core not in assigned:
affinities.append([core])
assigned.append(core)
break
os.sched_setaffinity(0, affinities[gpu_id])
def set_socket_unique_affinity(gpu_id, nproc_per_node, mode):
device_ids = [device(i) for i in range(nproc_per_node)]
socket_affinities = [dev.getCpuAffinity() for dev in device_ids]
siblings_list = get_thread_siblings_list()
siblings_dict = dict(siblings_list)
# remove siblings
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values()))
socket_affinities_to_device_ids = collections.defaultdict(list)
for idx, socket_affinity in enumerate(socket_affinities):
socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx)
for socket_affinity, device_ids in socket_affinities_to_device_ids.items():
devices_per_group = len(device_ids)
cores_per_device = len(socket_affinity) // devices_per_group
for group_id, device_id in enumerate(device_ids):
if device_id == gpu_id:
if mode == 'interleaved':
affinity = list(socket_affinity[group_id::devices_per_group])
elif mode == 'continuous':
affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device])
else:
raise RuntimeError('Unknown set_socket_unique_affinity mode')
# reintroduce siblings
affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict]
os.sched_setaffinity(0, affinity)
def get_thread_siblings_list():
path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list'
thread_siblings_list = []
pattern = re.compile(r'(\d+)\D(\d+)')
for fname in pathlib.Path(path[0]).glob(path[1:]):
with open(fname) as f:
content = f.read().strip()
res = pattern.findall(content)
if res:
pair = tuple(map(int, res[0]))
thread_siblings_list.append(pair)
return thread_siblings_list
def set_affinity(gpu_id, nproc_per_node, mode='socket'):
if mode == 'socket':
set_socket_affinity(gpu_id)
elif mode == 'single':
set_single_affinity(gpu_id)
elif mode == 'single_unique':
set_single_unique_affinity(gpu_id, nproc_per_node)
elif mode == 'socket_unique_interleaved':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved')
elif mode == 'socket_unique_continuous':
set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous')
else:
raise RuntimeError('Unknown affinity mode')
affinity = os.sched_getaffinity(0)
return affinity
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/gpu_affinity.py |
import amp_C
import torch
def apply_ema_decay(model, ema_model, decay):
if not decay:
return
st = model.state_dict()
add_module = hasattr(model, 'module') and not hasattr(ema_model, 'module')
for k, v in ema_model.state_dict().items():
if add_module and not k.startswith('module.'):
k = 'module.' + k
v.copy_(decay * v + (1 - decay) * st[k])
def init_multi_tensor_ema(model, ema_model):
model_weights = list(model.state_dict().values())
ema_model_weights = list(ema_model.state_dict().values())
ema_overflow_buf = torch.cuda.IntTensor([0])
return model_weights, ema_model_weights, ema_overflow_buf
def apply_multi_tensor_ema(decay, model_weights, ema_weights, overflow_buf):
amp_C.multi_tensor_axpby(
65536, overflow_buf, [ema_weights, model_weights, ema_weights],
decay, 1-decay, -1)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/ema_utils.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mutes known and unrelated PyTorch warnings.
The warnings module keeps a list of filters. Importing it as late as possible
prevents its filters from being overriden.
"""
import warnings
# NGC 22.04-py3 container (PyTorch 1.12.0a0+bd13bc6)
warnings.filterwarnings(
"ignore",
message='positional arguments and argument "destination" are deprecated.'
' nn.Module.state_dict will not accept them in the future.')
# 22.08-py3 container
warnings.filterwarnings(
"ignore",
message="is_namedtuple is deprecated, please use the python checks")
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/filter_warnings.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import librosa.util as librosa_util
import numpy as np
import torch
from scipy.signal import get_window
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, size=n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/audio_processing.py |
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import numpy as np
import torch
import torch.nn.functional as F
from librosa.util import pad_center, tiny
from scipy.signal import get_window
from torch.autograd import Variable
from common.audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :].copy())
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, size=filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
with torch.no_grad():
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase, self.inverse_basis,
stride=self.hop_length, padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/stft.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# plot_spectrogram, init_weights, get_padding, AttrDict
import ctypes
import glob
import os
import re
import shutil
import warnings
from collections import defaultdict, OrderedDict
from pathlib import Path
from typing import Optional
import soundfile # flac
import matplotlib
import numpy as np
import torch
import torch.distributed as dist
def mask_from_lens(lens, max_len: Optional[int] = None):
if max_len is None:
max_len = lens.max()
ids = torch.arange(0, max_len, device=lens.device, dtype=lens.dtype)
mask = torch.lt(ids, lens.unsqueeze(1))
return mask
def freeze(model):
for p in model.parameters():
p.requires_grad = False
def unfreeze(model):
for p in model.parameters():
p.requires_grad = True
def reduce_tensor(tensor, world_size):
if world_size == 1:
return tensor
rt = tensor.detach().clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(world_size)
def adjust_fine_tuning_lr(args, ckpt_d):
assert args.fine_tuning
if args.fine_tune_lr_factor == 1.:
return
for k in ['optim_d', 'optim_g']:
for param_group in ckpt_d[k]['param_groups']:
old_v = param_group['lr']
new_v = old_v * args.fine_tune_lr_factor
print(f'Init fine-tuning: changing {k} lr: {old_v} --> {new_v}')
param_group['lr'] = new_v
def init_distributed(args, world_size, rank):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print(f"{args.local_rank}: Initializing distributed training")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(backend=('nccl' if args.cuda else 'gloo'),
init_method='env://')
print(f"{args.local_rank}: Done initializing distributed training")
def load_wav(full_path, torch_tensor=False):
data, sampling_rate = soundfile.read(full_path, dtype='int16')
if torch_tensor:
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
else:
return data, sampling_rate
def load_wav_to_torch(full_path, force_sampling_rate=None):
if force_sampling_rate is not None:
raise NotImplementedError
return load_wav(full_path, True)
def load_filepaths_and_text(dataset_path, fnames, has_speakers=False, split="|"):
def split_line(root, line):
parts = line.strip().split(split)
if len(parts) == 1:
paths, non_paths = parts, []
else:
if has_speakers:
paths, non_paths = parts[:-2], parts[-2:]
else:
paths, non_paths = parts[:-1], parts[-1:]
return tuple(str(Path(root, p)) for p in paths) + tuple(non_paths)
fpaths_and_text = []
for fname in fnames:
with open(fname, encoding='utf-8') as f:
fpaths_and_text += [split_line(dataset_path, line) for line in f]
return fpaths_and_text
def to_gpu(x):
x = x.contiguous()
return x.cuda(non_blocking=True) if torch.cuda.is_available() else x
def l2_promote():
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def prepare_tmp(path):
if path is None:
return
p = Path(path)
if p.is_dir():
warnings.warn(f'{p} exists. Removing...')
shutil.rmtree(p, ignore_errors=True)
p.mkdir(parents=False, exist_ok=False)
def print_once(*msg):
if not dist.is_initialized() or dist.get_rank() == 0:
print(*msg)
def plot_spectrogram(spectrogram):
matplotlib.use("Agg")
import matplotlib.pylab as plt
fig, ax = plt.subplots(figsize=(10, 2))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
fig.canvas.draw()
plt.close()
return fig
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DefaultAttrDict(defaultdict):
def __init__(self, *args, **kwargs):
super(DefaultAttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def __getattr__(self, item):
return self[item]
class Checkpointer:
def __init__(self, save_dir,
keep_milestones=[1000, 2000, 3000, 4000, 5000, 6000]):
self.save_dir = save_dir
self.keep_milestones = keep_milestones
find = lambda name: {int(re.search('_(\d+).pt', fn).group(1)): fn
for fn in glob.glob(f'{save_dir}/{name}_checkpoint_*.pt')}
saved_g = find('hifigan_gen')
saved_d = find('hifigan_discrim')
common_epochs = sorted(set(saved_g.keys()) & set(saved_d.keys()))
self.tracked = OrderedDict([(ep, (saved_g[ep], saved_d[ep]))
for ep in common_epochs])
def maybe_load(self, gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d,
train_state, args, gen_ema=None, mpd_ema=None, msd_ema=None):
fpath_g = args.checkpoint_path_gen
fpath_d = args.checkpoint_path_discrim
assert (fpath_g is None) == (fpath_d is None)
if fpath_g is not None:
ckpt_paths = [(fpath_g, fpath_d)]
self.tracked = OrderedDict() # Do not track/delete prev ckpts
elif args.resume:
ckpt_paths = list(reversed(self.tracked.values()))[:2]
else:
return
ckpt_g = None
ckpt_d = None
for fpath_g, fpath_d in ckpt_paths:
if args.local_rank == 0:
print(f'Loading models from {fpath_g} {fpath_d}')
try:
ckpt_g = torch.load(fpath_g, map_location='cpu')
ckpt_d = torch.load(fpath_d, map_location='cpu')
break
except:
print(f'WARNING: Cannot load {fpath_g} and {fpath_d}')
if ckpt_g is None or ckpt_d is None:
return
ep_g = ckpt_g.get('train_state', ckpt_g).get('epoch', None)
ep_d = ckpt_d.get('train_state', ckpt_d).get('epoch', None)
assert ep_g == ep_d, \
f'Mismatched epochs of gen and discrim ({ep_g} != {ep_d})'
train_state.update(ckpt_g['train_state'])
fine_tune_epoch_start = train_state.get('fine_tune_epoch_start')
if args.fine_tuning and fine_tune_epoch_start is None:
# Fine-tuning just began
train_state['fine_tune_epoch_start'] = train_state['epoch'] + 1
train_state['fine_tune_lr_factor'] = args.fine_tune_lr_factor
adjust_fine_tuning_lr(args, ckpt_d)
unwrap = lambda m: getattr(m, 'module', m)
unwrap(gen).load_state_dict(ckpt_g.get('gen', ckpt_g['generator']))
unwrap(mpd).load_state_dict(ckpt_d['mpd'])
unwrap(msd).load_state_dict(ckpt_d['msd'])
optim_g.load_state_dict(ckpt_d['optim_g'])
optim_d.load_state_dict(ckpt_d['optim_d'])
if 'scaler_g' in ckpt_d:
scaler_g.load_state_dict(ckpt_d['scaler_g'])
scaler_d.load_state_dict(ckpt_d['scaler_d'])
else:
warnings.warn('No grad scaler state found in the checkpoint.')
if gen_ema is not None:
gen_ema.load_state_dict(ckpt_g['gen_ema'])
if mpd_ema is not None:
mpd_ema.load_state_dict(ckpt_d['mpd_ema'])
if msd_ema is not None:
msd_ema.load_state_dict(ckpt_d['msd_ema'])
def maybe_save(self, gen, mpd, msd, optim_g, optim_d, scaler_g, scaler_d,
epoch, train_state, args, gen_config, train_setup,
gen_ema=None, mpd_ema=None, msd_ema=None):
rank = 0
if dist.is_initialized():
dist.barrier()
rank = dist.get_rank()
if rank != 0:
return
if epoch == 0:
return
if epoch < args.epochs and (args.checkpoint_interval == 0
or epoch % args.checkpoint_interval > 0):
return
unwrap = lambda m: getattr(m, 'module', m)
fpath_g = Path(self.save_dir, f'hifigan_gen_checkpoint_{epoch}.pt')
ckpt_g = {
'generator': unwrap(gen).state_dict(),
'gen_ema': gen_ema.state_dict() if gen_ema is not None else None,
'config': gen_config,
'train_setup': train_setup,
'train_state': train_state,
}
fpath_d = Path(self.save_dir, f'hifigan_discrim_checkpoint_{epoch}.pt')
ckpt_d = {
'mpd': unwrap(mpd).state_dict(),
'msd': unwrap(msd).state_dict(),
'mpd_ema': mpd_ema.state_dict() if mpd_ema is not None else None,
'msd_ema': msd_ema.state_dict() if msd_ema is not None else None,
'optim_g': optim_g.state_dict(),
'optim_d': optim_d.state_dict(),
'scaler_g': scaler_g.state_dict(),
'scaler_d': scaler_d.state_dict(),
'train_state': train_state,
# compat with original code
'steps': train_state['iters_all'],
'epoch': epoch,
}
print(f"Saving model and optimizer state to {fpath_g} and {fpath_d}")
torch.save(ckpt_g, fpath_g)
torch.save(ckpt_d, fpath_d)
# Remove old checkpoints; keep milestones and the last two
self.tracked[epoch] = (fpath_g, fpath_d)
for epoch in set(list(self.tracked)[:-2]) - set(self.keep_milestones):
try:
os.remove(self.tracked[epoch][0])
os.remove(self.tracked[epoch][1])
del self.tracked[epoch]
except:
pass
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/utils.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
import torch.nn.functional as F
from librosa.filters import mel as librosa_mel_fn
from common.audio_processing import (dynamic_range_compression,
dynamic_range_decompression)
from common.stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear',
batch_norm=False):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
self.norm = torch.nn.BatchNorm1D(out_channels) if batch_norm else None
torch.nn.init.xavier_uniform_(
self.conv.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
if self.norm is None:
return self.conv(signal)
else:
return self.norm(self.conv(signal))
class ConvReLUNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, dropout=0.0):
super(ConvReLUNorm, self).__init__()
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size,
padding=(kernel_size // 2))
self.norm = torch.nn.LayerNorm(out_channels)
self.dropout = torch.nn.Dropout(dropout)
def forward(self, signal):
out = F.relu(self.conv(signal))
out = self.norm(out.transpose(1, 2)).transpose(1, 2).to(signal.dtype)
return self.dropout(out)
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sr=sampling_rate,
n_fft=filter_length,
n_mels=n_mel_channels,
fmin=mel_fmin,
fmax=mel_fmax
)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/layers.py |
import re
_letters_and_numbers_re = re.compile(
r"((?:[a-zA-Z]+[0-9]|[0-9]+[a-zA-Z])[a-zA-Z0-9']*)", re.IGNORECASE)
_hardware_re = re.compile(
'([0-9]+(?:[.,][0-9]+)?)(?:\s?)(tb|gb|mb|kb|ghz|mhz|khz|hz|mm)', re.IGNORECASE)
_hardware_key = {'tb': 'terabyte',
'gb': 'gigabyte',
'mb': 'megabyte',
'kb': 'kilobyte',
'ghz': 'gigahertz',
'mhz': 'megahertz',
'khz': 'kilohertz',
'hz': 'hertz',
'mm': 'millimeter',
'cm': 'centimeter',
'km': 'kilometer'}
_dimension_re = re.compile(
r'\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b|\b(\d+(?:[,.]\d+)?\s*[xX]\s*\d+(?:[,.]\d+)?(?:in|inch|m)?)\b')
_dimension_key = {'m': 'meter',
'in': 'inch',
'inch': 'inch'}
def _expand_letters_and_numbers(m):
text = re.split(r'(\d+)', m.group(0))
# remove trailing space
if text[-1] == '':
text = text[:-1]
elif text[0] == '':
text = text[1:]
# if not like 1920s, or AK47's , 20th, 1st, 2nd, 3rd, etc...
if text[-1] in ("'s", "s", "th", "nd", "st", "rd") and text[-2].isdigit():
text[-2] = text[-2] + text[-1]
text = text[:-1]
# for combining digits 2 by 2
new_text = []
for i in range(len(text)):
string = text[i]
if string.isdigit() and len(string) < 5:
# heuristics
if len(string) > 2 and string[-2] == '0':
if string[-1] == '0':
string = [string]
else:
string = [string[:-2], string[-2], string[-1]]
elif len(string) % 2 == 0:
string = [string[i:i+2] for i in range(0, len(string), 2)]
elif len(string) > 2:
string = [string[0]] + [string[i:i+2] for i in range(1, len(string), 2)]
new_text.extend(string)
else:
new_text.append(string)
text = new_text
text = " ".join(text)
return text
def _expand_hardware(m):
quantity, measure = m.groups(0)
measure = _hardware_key[measure.lower()]
if measure[-1] != 'z' and float(quantity.replace(',', '')) > 1:
return "{} {}s".format(quantity, measure)
return "{} {}".format(quantity, measure)
def _expand_dimension(m):
text = "".join([x for x in m.groups(0) if x != 0])
text = text.replace(' x ', ' by ')
text = text.replace('x', ' by ')
if text.endswith(tuple(_dimension_key.keys())):
if text[-2].isdigit():
text = "{} {}".format(text[:-1], _dimension_key[text[-1:]])
elif text[-3].isdigit():
text = "{} {}".format(text[:-2], _dimension_key[text[-2:]])
return text
def normalize_letters_and_numbers(text):
text = re.sub(_hardware_re, _expand_hardware, text)
text = re.sub(_dimension_re, _expand_dimension, text)
text = re.sub(_letters_and_numbers_re, _expand_letters_and_numbers, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/letters_and_numbers.py |
""" from https://github.com/keithito/tacotron """
import re
import sys
import urllib.request
from pathlib import Path
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path=None, heteronyms_path=None, keep_ambiguous=True):
self._entries = {}
self.heteronyms = []
if file_or_path is not None:
self.initialize(file_or_path, heteronyms_path, keep_ambiguous)
def initialize(self, file_or_path, heteronyms_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
if not Path(file_or_path).exists():
print("CMUdict missing. Downloading to data/cmudict/.")
self.download()
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
if heteronyms_path is not None:
with open(heteronyms_path, encoding='utf-8') as f:
self.heteronyms = [l.rstrip() for l in f]
def __len__(self):
if len(self._entries) == 0:
raise ValueError("CMUDict not initialized")
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
if len(self._entries) == 0:
raise ValueError("CMUDict not initialized")
return self._entries.get(word.upper())
def download(self):
url = 'https://github.com/Alexir/CMUdict/raw/master/cmudict-0.7b'
try:
Path('data/cmudict').mkdir(parents=False, exist_ok=True)
urllib.request.urlretrieve(url, filename='data/cmudict/cmudict-0.7b')
except:
print("Automatic download of CMUdict failed. Try manually with:")
print()
print(" bash scripts/download_cmudict.sh")
print()
print("and re-run the script.")
sys.exit(0)
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/cmudict.py |
import re
_no_period_re = re.compile(r'(No[.])(?=[ ]?[0-9])')
_percent_re = re.compile(r'([ ]?[%])')
_half_re = re.compile('([0-9]½)|(½)')
_url_re = re.compile(r'([a-zA-Z])\.(com|gov|org)')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('ms', 'miss'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
('sen', 'senator'),
('etc', 'et cetera'),
]]
def _expand_no_period(m):
word = m.group(0)
if word[0] == 'N':
return 'Number'
return 'number'
def _expand_percent(m):
return ' percent'
def _expand_half(m):
word = m.group(1)
if word is None:
return 'half'
return word[0] + ' and a half'
def _expand_urls(m):
return f'{m.group(1)} dot {m.group(2)}'
def normalize_abbreviations(text):
text = re.sub(_no_period_re, _expand_no_period, text)
text = re.sub(_percent_re, _expand_percent, text)
text = re.sub(_half_re, _expand_half, text)
text = re.sub('&', ' and ', text)
text = re.sub('@', ' at ', text)
text = re.sub(_url_re, _expand_urls, text)
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/abbreviations.py |
""" adapted from https://github.com/keithito/tacotron """
import re
import numpy as np
from . import cleaners
from .symbols import get_symbols
from . import cmudict
from .numerical import _currency_re, _expand_currency
#########
# REGEX #
#########
# Regular expression matching text enclosed in curly braces for encoding
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# Regular expression matching words and not words
_words_re = re.compile(r"([a-zA-ZÀ-ž]+['][a-zA-ZÀ-ž]{1,2}|[a-zA-ZÀ-ž]+)|([{][^}]+[}]|[^a-zA-ZÀ-ž{}]+)")
# Regular expression separating words enclosed in curly braces for cleaning
_arpa_re = re.compile(r'{[^}]+}|\S+')
class TextProcessing(object):
def __init__(self, symbol_set, cleaner_names, p_arpabet=0.0,
handle_arpabet='word', handle_arpabet_ambiguous='ignore',
expand_currency=True):
self.symbols = get_symbols(symbol_set)
self.cleaner_names = cleaner_names
# Mappings from symbol to numeric ID and vice versa:
self.symbol_to_id = {s: i for i, s in enumerate(self.symbols)}
self.id_to_symbol = {i: s for i, s in enumerate(self.symbols)}
self.expand_currency = expand_currency
# cmudict
self.p_arpabet = p_arpabet
self.handle_arpabet = handle_arpabet
self.handle_arpabet_ambiguous = handle_arpabet_ambiguous
def text_to_sequence(self, text):
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += self.symbols_to_sequence(text)
break
sequence += self.symbols_to_sequence(m.group(1))
sequence += self.arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(self, sequence):
result = ''
for symbol_id in sequence:
if symbol_id in self.id_to_symbol:
s = self.id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def clean_text(self, text):
for name in self.cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def symbols_to_sequence(self, symbols):
return [self.symbol_to_id[s] for s in symbols if s in self.symbol_to_id]
def arpabet_to_sequence(self, text):
return self.symbols_to_sequence(['@' + s for s in text.split()])
def get_arpabet(self, word):
arpabet_suffix = ''
if word.lower() in cmudict.heteronyms:
return word
if len(word) > 2 and word.endswith("'s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-2])
arpabet_suffix = ' Z'
elif len(word) > 1 and word.endswith("s"):
arpabet = cmudict.lookup(word)
if arpabet is None:
arpabet = self.get_arpabet(word[:-1])
arpabet_suffix = ' Z'
else:
arpabet = cmudict.lookup(word)
if arpabet is None:
return word
elif arpabet[0] == '{':
arpabet = [arpabet[1:-1]]
# XXX arpabet might not be a list here
if type(arpabet) is not list:
return word
if len(arpabet) > 1:
if self.handle_arpabet_ambiguous == 'first':
arpabet = arpabet[0]
elif self.handle_arpabet_ambiguous == 'random':
arpabet = np.random.choice(arpabet)
elif self.handle_arpabet_ambiguous == 'ignore':
return word
else:
arpabet = arpabet[0]
arpabet = "{" + arpabet + arpabet_suffix + "}"
return arpabet
def encode_text(self, text, return_all=False):
if self.expand_currency:
text = re.sub(_currency_re, _expand_currency, text)
text_clean = [self.clean_text(split) if split[0] != '{' else split
for split in _arpa_re.findall(text)]
text_clean = ' '.join(text_clean)
text_clean = cleaners.collapse_whitespace(text_clean)
text = text_clean
text_arpabet = ''
if self.p_arpabet > 0:
if self.handle_arpabet == 'sentence':
if np.random.uniform() < self.p_arpabet:
words = _words_re.findall(text)
text_arpabet = [
self.get_arpabet(word[0])
if (word[0] != '') else word[1]
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet == 'word':
words = _words_re.findall(text)
text_arpabet = [
word[1] if word[0] == '' else (
self.get_arpabet(word[0])
if np.random.uniform() < self.p_arpabet
else word[0])
for word in words]
text_arpabet = ''.join(text_arpabet)
text = text_arpabet
elif self.handle_arpabet != '':
raise Exception("{} handle_arpabet is not supported".format(
self.handle_arpabet))
text_encoded = self.text_to_sequence(text)
if return_all:
return text_encoded, text_clean, text_arpabet
return text_encoded
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/text_processing.py |
from .cmudict import CMUDict
cmudict = CMUDict()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/__init__.py |
""" adapted from https://github.com/keithito/tacotron """
import inflect
import re
_magnitudes = ['trillion', 'billion', 'million', 'thousand', 'hundred', 'm', 'b', 't']
_magnitudes_key = {'m': 'million', 'b': 'billion', 't': 'trillion'}
_measurements = '(f|c|k|d|m)'
_measurements_key = {'f': 'fahrenheit',
'c': 'celsius',
'k': 'thousand',
'm': 'meters'}
_currency_key = {'$': 'dollar', '£': 'pound', '€': 'euro', '₩': 'won'}
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_currency_re = re.compile(r'([\$€£₩])([0-9\.\,]*[0-9]+)(?:[ ]?({})(?=[^a-zA-Z]|$))?'.format("|".join(_magnitudes)), re.IGNORECASE)
_measurement_re = re.compile(r'([0-9\.\,]*[0-9]+(\s)?{}\b)'.format(_measurements), re.IGNORECASE)
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
# _range_re = re.compile(r'(?<=[0-9])+(-)(?=[0-9])+.*?')
_roman_re = re.compile(r'\b(?=[MDCLXVI]+\b)M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{2,3})\b') # avoid I
_multiply_re = re.compile(r'(\b[0-9]+)(x)([0-9]+)')
_number_re = re.compile(r"[0-9]+'s|[0-9]+s|[0-9]+")
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_currency(m):
currency = _currency_key[m.group(1)]
quantity = m.group(2)
magnitude = m.group(3)
# remove commas from quantity to be able to convert to numerical
quantity = quantity.replace(',', '')
# check for million, billion, etc...
if magnitude is not None and magnitude.lower() in _magnitudes:
if len(magnitude) == 1:
magnitude = _magnitudes_key[magnitude.lower()]
return "{} {} {}".format(_expand_hundreds(quantity), magnitude, currency+'s')
parts = quantity.split('.')
if len(parts) > 2:
return quantity + " " + currency + "s" # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = currency if dollars == 1 else currency+'s'
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}, {} {}".format(
_expand_hundreds(dollars), dollar_unit,
_inflect.number_to_words(cents), cent_unit)
elif dollars:
dollar_unit = currency if dollars == 1 else currency+'s'
return "{} {}".format(_expand_hundreds(dollars), dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return "{} {}".format(_inflect.number_to_words(cents), cent_unit)
else:
return 'zero' + ' ' + currency + 's'
def _expand_hundreds(text):
number = float(text)
if 1000 < number < 10000 and (number % 100 == 0) and (number % 1000 != 0):
return _inflect.number_to_words(int(number / 100)) + " hundred"
else:
return _inflect.number_to_words(text)
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_measurement(m):
_, number, measurement = re.split('(\d+(?:\.\d+)?)', m.group(0))
number = _inflect.number_to_words(number)
measurement = "".join(measurement.split())
measurement = _measurements_key[measurement.lower()]
return "{} {}".format(number, measurement)
def _expand_range(m):
return ' to '
def _expand_multiply(m):
left = m.group(1)
right = m.group(3)
return "{} by {}".format(left, right)
def _expand_roman(m):
# from https://stackoverflow.com/questions/19308177/converting-roman-numerals-to-integers-in-python
roman_numerals = {'I':1, 'V':5, 'X':10, 'L':50, 'C':100, 'D':500, 'M':1000}
result = 0
num = m.group(0)
for i, c in enumerate(num):
if (i+1) == len(num) or roman_numerals[c] >= roman_numerals[num[i+1]]:
result += roman_numerals[c]
else:
result -= roman_numerals[c]
return str(result)
def _expand_number(m):
_, number, suffix = re.split(r"(\d+(?:'?\d+)?)", m.group(0))
number = int(number)
if number > 1000 < 10000 and (number % 100 == 0) and (number % 1000 != 0):
text = _inflect.number_to_words(number // 100) + " hundred"
elif number > 1000 and number < 3000:
if number == 2000:
text = 'two thousand'
elif number > 2000 and number < 2010:
text = 'two thousand ' + _inflect.number_to_words(number % 100)
elif number % 100 == 0:
text = _inflect.number_to_words(number // 100) + ' hundred'
else:
number = _inflect.number_to_words(number, andword='', zero='oh', group=2).replace(', ', ' ')
number = re.sub(r'-', ' ', number)
text = number
else:
number = _inflect.number_to_words(number, andword='and')
number = re.sub(r'-', ' ', number)
number = re.sub(r',', '', number)
text = number
if suffix in ("'s", "s"):
if text[-1] == 'y':
text = text[:-1] + 'ies'
else:
text = text + suffix
return text
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_currency_re, _expand_currency, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
# text = re.sub(_range_re, _expand_range, text)
# text = re.sub(_measurement_re, _expand_measurement, text)
text = re.sub(_roman_re, _expand_roman, text)
text = re.sub(_multiply_re, _expand_multiply, text)
text = re.sub(_number_re, _expand_number, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/numerical.py |
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from .cmudict import valid_symbols
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in valid_symbols]
def get_symbols(symbol_set='english_basic'):
if symbol_set == 'english_basic':
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_basic_lowercase':
_pad = '_'
_punctuation = '!\'"(),.:;? '
_special = '-'
_letters = 'abcdefghijklmnopqrstuvwxyz'
symbols = list(_pad + _special + _punctuation + _letters) + _arpabet
elif symbol_set == 'english_expanded':
_punctuation = '!\'",.:;? '
_math = '#%&*+-/[]()'
_special = '_@©°½—₩€$'
_accented = 'áçéêëñöøćž'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
symbols = list(_punctuation + _math + _special + _accented + _letters) + _arpabet
else:
raise Exception("{} symbol set does not exist".format(symbol_set))
return symbols
def get_pad_idx(symbol_set='english_basic'):
if symbol_set in {'english_basic', 'english_basic_lowercase'}:
return 0
else:
raise Exception("{} symbol set not used yet".format(symbol_set))
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/symbols.py |
""" adapted from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from .abbreviations import normalize_abbreviations
from .acronyms import normalize_acronyms, spell_acronyms
from .datestime import normalize_datestime
from .letters_and_numbers import normalize_letters_and_numbers
from .numerical import normalize_numbers
from .unidecoder import unidecoder
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
def expand_abbreviations(text):
return normalize_abbreviations(text)
def expand_numbers(text):
return normalize_numbers(text)
def expand_acronyms(text):
return normalize_acronyms(text)
def expand_datestime(text):
return normalize_datestime(text)
def expand_letters_and_numbers(text):
return normalize_letters_and_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def separate_acronyms(text):
text = re.sub(r"([0-9]+)([a-zA-Z]+)", r"\1 \2", text)
text = re.sub(r"([a-zA-Z]+)([0-9]+)", r"\1 \2", text)
return text
def convert_to_ascii(text):
return unidecoder(text)
def basic_cleaners(text):
'''Basic pipeline that collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, with number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
def english_cleaners_v2(text):
text = convert_to_ascii(text)
text = expand_datestime(text)
text = expand_letters_and_numbers(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = spell_acronyms(text)
text = lowercase(text)
text = collapse_whitespace(text)
# compatibility with basic_english symbol set
text = re.sub(r'/+', ' ', text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/cleaners.py |
import re
_ampm_re = re.compile(
r'([0-9]|0[0-9]|1[0-9]|2[0-3]):?([0-5][0-9])?\s*([AaPp][Mm]\b)')
def _expand_ampm(m):
matches = list(m.groups(0))
txt = matches[0]
txt = txt if int(matches[1]) == 0 else txt + ' ' + matches[1]
if matches[2][0].lower() == 'a':
txt += ' a.m.'
elif matches[2][0].lower() == 'p':
txt += ' p.m.'
return txt
def normalize_datestime(text):
text = re.sub(_ampm_re, _expand_ampm, text)
#text = re.sub(r"([0-9]|0[0-9]|1[0-9]|2[0-3]):([0-5][0-9])?", r"\1 \2", text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/datestime.py |
import re
from . import cmudict
_letter_to_arpabet = {
'A': 'EY1',
'B': 'B IY1',
'C': 'S IY1',
'D': 'D IY1',
'E': 'IY1',
'F': 'EH1 F',
'G': 'JH IY1',
'H': 'EY1 CH',
'I': 'AY1',
'J': 'JH EY1',
'K': 'K EY1',
'L': 'EH1 L',
'M': 'EH1 M',
'N': 'EH1 N',
'O': 'OW1',
'P': 'P IY1',
'Q': 'K Y UW1',
'R': 'AA1 R',
'S': 'EH1 S',
'T': 'T IY1',
'U': 'Y UW1',
'V': 'V IY1',
'X': 'EH1 K S',
'Y': 'W AY1',
'W': 'D AH1 B AH0 L Y UW0',
'Z': 'Z IY1',
's': 'Z'
}
# Acronyms that should not be expanded
hardcoded_acronyms = [
'BMW', 'MVD', 'WDSU', 'GOP', 'UK', 'AI', 'GPS', 'BP', 'FBI', 'HD',
'CES', 'LRA', 'PC', 'NBA', 'BBL', 'OS', 'IRS', 'SAC', 'UV', 'CEO', 'TV',
'CNN', 'MSS', 'GSA', 'USSR', 'DNA', 'PRS', 'TSA', 'US', 'GPU', 'USA',
'FPCC', 'CIA']
# Words and acronyms that should be read as regular words, e.g., NATO, HAPPY, etc.
uppercase_whiteliset = []
acronyms_exceptions = {
'NVIDIA': 'N.VIDIA',
}
non_uppercase_exceptions = {
'email': 'e-mail',
}
# must ignore roman numerals
_acronym_re = re.compile(r'([a-z]*[A-Z][A-Z]+)s?\.?')
_non_uppercase_re = re.compile(r'\b({})\b'.format('|'.join(non_uppercase_exceptions.keys())), re.IGNORECASE)
def _expand_acronyms_to_arpa(m, add_spaces=True):
acronym = m.group(0)
# remove dots if they exist
acronym = re.sub('\.', '', acronym)
acronym = "".join(acronym.split())
arpabet = cmudict.lookup(acronym)
if arpabet is None:
acronym = list(acronym)
arpabet = ["{" + _letter_to_arpabet[letter] + "}" for letter in acronym]
# temporary fix
if arpabet[-1] == '{Z}' and len(arpabet) > 1:
arpabet[-2] = arpabet[-2][:-1] + ' ' + arpabet[-1][1:]
del arpabet[-1]
arpabet = ' '.join(arpabet)
elif len(arpabet) == 1:
arpabet = "{" + arpabet[0] + "}"
else:
arpabet = acronym
return arpabet
def normalize_acronyms(text):
text = re.sub(_acronym_re, _expand_acronyms_to_arpa, text)
return text
def expand_acronyms(m):
text = m.group(1)
if text in acronyms_exceptions:
text = acronyms_exceptions[text]
elif text in uppercase_whiteliset:
text = text
else:
text = '.'.join(text) + '.'
if 's' in m.group(0):
text = text + '\'s'
if text[-1] != '.' and m.group(0)[-1] == '.':
return text + '.'
else:
return text
def spell_acronyms(text):
text = re.sub(_non_uppercase_re, lambda m: non_uppercase_exceptions[m.group(0).lower()], text)
text = re.sub(_acronym_re, expand_acronyms, text)
return text
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/acronyms.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) Sindre Sorhus <[email protected]> (https://sindresorhus.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/sindresorhus/transliterate/blob/main/replacements.js
#
replacements = [
# German umlauts
['ß', 'ss'],
['ẞ', 'Ss'],
['ä', 'ae'],
['Ä', 'Ae'],
['ö', 'oe'],
['Ö', 'Oe'],
['ü', 'ue'],
['Ü', 'Ue'],
# Latin
['À', 'A'],
['Á', 'A'],
['Â', 'A'],
['Ã', 'A'],
['Ä', 'Ae'],
['Å', 'A'],
['Æ', 'AE'],
['Ç', 'C'],
['È', 'E'],
['É', 'E'],
['Ê', 'E'],
['Ë', 'E'],
['Ì', 'I'],
['Í', 'I'],
['Î', 'I'],
['Ï', 'I'],
['Ð', 'D'],
['Ñ', 'N'],
['Ò', 'O'],
['Ó', 'O'],
['Ô', 'O'],
['Õ', 'O'],
['Ö', 'Oe'],
['Ő', 'O'],
['Ø', 'O'],
['Ù', 'U'],
['Ú', 'U'],
['Û', 'U'],
['Ü', 'Ue'],
['Ű', 'U'],
['Ý', 'Y'],
['Þ', 'TH'],
['ß', 'ss'],
['à', 'a'],
['á', 'a'],
['â', 'a'],
['ã', 'a'],
['ä', 'ae'],
['å', 'a'],
['æ', 'ae'],
['ç', 'c'],
['è', 'e'],
['é', 'e'],
['ê', 'e'],
['ë', 'e'],
['ì', 'i'],
['í', 'i'],
['î', 'i'],
['ï', 'i'],
['ð', 'd'],
['ñ', 'n'],
['ò', 'o'],
['ó', 'o'],
['ô', 'o'],
['õ', 'o'],
['ö', 'oe'],
['ő', 'o'],
['ø', 'o'],
['ù', 'u'],
['ú', 'u'],
['û', 'u'],
['ü', 'ue'],
['ű', 'u'],
['ý', 'y'],
['þ', 'th'],
['ÿ', 'y'],
['ẞ', 'SS'],
# Vietnamese
['à', 'a'],
['À', 'A'],
['á', 'a'],
['Á', 'A'],
['â', 'a'],
['Â', 'A'],
['ã', 'a'],
['Ã', 'A'],
['è', 'e'],
['È', 'E'],
['é', 'e'],
['É', 'E'],
['ê', 'e'],
['Ê', 'E'],
['ì', 'i'],
['Ì', 'I'],
['í', 'i'],
['Í', 'I'],
['ò', 'o'],
['Ò', 'O'],
['ó', 'o'],
['Ó', 'O'],
['ô', 'o'],
['Ô', 'O'],
['õ', 'o'],
['Õ', 'O'],
['ù', 'u'],
['Ù', 'U'],
['ú', 'u'],
['Ú', 'U'],
['ý', 'y'],
['Ý', 'Y'],
['ă', 'a'],
['Ă', 'A'],
['Đ', 'D'],
['đ', 'd'],
['ĩ', 'i'],
['Ĩ', 'I'],
['ũ', 'u'],
['Ũ', 'U'],
['ơ', 'o'],
['Ơ', 'O'],
['ư', 'u'],
['Ư', 'U'],
['ạ', 'a'],
['Ạ', 'A'],
['ả', 'a'],
['Ả', 'A'],
['ấ', 'a'],
['Ấ', 'A'],
['ầ', 'a'],
['Ầ', 'A'],
['ẩ', 'a'],
['Ẩ', 'A'],
['ẫ', 'a'],
['Ẫ', 'A'],
['ậ', 'a'],
['Ậ', 'A'],
['ắ', 'a'],
['Ắ', 'A'],
['ằ', 'a'],
['Ằ', 'A'],
['ẳ', 'a'],
['Ẳ', 'A'],
['ẵ', 'a'],
['Ẵ', 'A'],
['ặ', 'a'],
['Ặ', 'A'],
['ẹ', 'e'],
['Ẹ', 'E'],
['ẻ', 'e'],
['Ẻ', 'E'],
['ẽ', 'e'],
['Ẽ', 'E'],
['ế', 'e'],
['Ế', 'E'],
['ề', 'e'],
['Ề', 'E'],
['ể', 'e'],
['Ể', 'E'],
['ễ', 'e'],
['Ễ', 'E'],
['ệ', 'e'],
['Ệ', 'E'],
['ỉ', 'i'],
['Ỉ', 'I'],
['ị', 'i'],
['Ị', 'I'],
['ọ', 'o'],
['Ọ', 'O'],
['ỏ', 'o'],
['Ỏ', 'O'],
['ố', 'o'],
['Ố', 'O'],
['ồ', 'o'],
['Ồ', 'O'],
['ổ', 'o'],
['Ổ', 'O'],
['ỗ', 'o'],
['Ỗ', 'O'],
['ộ', 'o'],
['Ộ', 'O'],
['ớ', 'o'],
['Ớ', 'O'],
['ờ', 'o'],
['Ờ', 'O'],
['ở', 'o'],
['Ở', 'O'],
['ỡ', 'o'],
['Ỡ', 'O'],
['ợ', 'o'],
['Ợ', 'O'],
['ụ', 'u'],
['Ụ', 'U'],
['ủ', 'u'],
['Ủ', 'U'],
['ứ', 'u'],
['Ứ', 'U'],
['ừ', 'u'],
['Ừ', 'U'],
['ử', 'u'],
['Ử', 'U'],
['ữ', 'u'],
['Ữ', 'U'],
['ự', 'u'],
['Ự', 'U'],
['ỳ', 'y'],
['Ỳ', 'Y'],
['ỵ', 'y'],
['Ỵ', 'Y'],
['ỷ', 'y'],
['Ỷ', 'Y'],
['ỹ', 'y'],
['Ỹ', 'Y'],
# Arabic
['ء', 'e'],
['آ', 'a'],
['أ', 'a'],
['ؤ', 'w'],
['إ', 'i'],
['ئ', 'y'],
['ا', 'a'],
['ب', 'b'],
['ة', 't'],
['ت', 't'],
['ث', 'th'],
['ج', 'j'],
['ح', 'h'],
['خ', 'kh'],
['د', 'd'],
['ذ', 'dh'],
['ر', 'r'],
['ز', 'z'],
['س', 's'],
['ش', 'sh'],
['ص', 's'],
['ض', 'd'],
['ط', 't'],
['ظ', 'z'],
['ع', 'e'],
['غ', 'gh'],
['ـ', '_'],
['ف', 'f'],
['ق', 'q'],
['ك', 'k'],
['ل', 'l'],
['م', 'm'],
['ن', 'n'],
['ه', 'h'],
['و', 'w'],
['ى', 'a'],
['ي', 'y'],
['َ', 'a'],
['ُ', 'u'],
['ِ', 'i'],
['٠', '0'],
['١', '1'],
['٢', '2'],
['٣', '3'],
['٤', '4'],
['٥', '5'],
['٦', '6'],
['٧', '7'],
['٨', '8'],
['٩', '9'],
# Persian / Farsi
['چ', 'ch'],
['ک', 'k'],
['گ', 'g'],
['پ', 'p'],
['ژ', 'zh'],
['ی', 'y'],
['۰', '0'],
['۱', '1'],
['۲', '2'],
['۳', '3'],
['۴', '4'],
['۵', '5'],
['۶', '6'],
['۷', '7'],
['۸', '8'],
['۹', '9'],
# Pashto
['ټ', 'p'],
['ځ', 'z'],
['څ', 'c'],
['ډ', 'd'],
['ﺫ', 'd'],
['ﺭ', 'r'],
['ړ', 'r'],
['ﺯ', 'z'],
['ږ', 'g'],
['ښ', 'x'],
['ګ', 'g'],
['ڼ', 'n'],
['ۀ', 'e'],
['ې', 'e'],
['ۍ', 'ai'],
# Urdu
['ٹ', 't'],
['ڈ', 'd'],
['ڑ', 'r'],
['ں', 'n'],
['ہ', 'h'],
['ھ', 'h'],
['ے', 'e'],
# Russian
['А', 'A'],
['а', 'a'],
['Б', 'B'],
['б', 'b'],
['В', 'V'],
['в', 'v'],
['Г', 'G'],
['г', 'g'],
['Д', 'D'],
['д', 'd'],
['ъе', 'ye'],
['Ъе', 'Ye'],
['ъЕ', 'yE'],
['ЪЕ', 'YE'],
['Е', 'E'],
['е', 'e'],
['Ё', 'Yo'],
['ё', 'yo'],
['Ж', 'Zh'],
['ж', 'zh'],
['З', 'Z'],
['з', 'z'],
['И', 'I'],
['и', 'i'],
['ый', 'iy'],
['Ый', 'Iy'],
['ЫЙ', 'IY'],
['ыЙ', 'iY'],
['Й', 'Y'],
['й', 'y'],
['К', 'K'],
['к', 'k'],
['Л', 'L'],
['л', 'l'],
['М', 'M'],
['м', 'm'],
['Н', 'N'],
['н', 'n'],
['О', 'O'],
['о', 'o'],
['П', 'P'],
['п', 'p'],
['Р', 'R'],
['р', 'r'],
['С', 'S'],
['с', 's'],
['Т', 'T'],
['т', 't'],
['У', 'U'],
['у', 'u'],
['Ф', 'F'],
['ф', 'f'],
['Х', 'Kh'],
['х', 'kh'],
['Ц', 'Ts'],
['ц', 'ts'],
['Ч', 'Ch'],
['ч', 'ch'],
['Ш', 'Sh'],
['ш', 'sh'],
['Щ', 'Sch'],
['щ', 'sch'],
['Ъ', ''],
['ъ', ''],
['Ы', 'Y'],
['ы', 'y'],
['Ь', ''],
['ь', ''],
['Э', 'E'],
['э', 'e'],
['Ю', 'Yu'],
['ю', 'yu'],
['Я', 'Ya'],
['я', 'ya'],
# Romanian
['ă', 'a'],
['Ă', 'A'],
['ș', 's'],
['Ș', 'S'],
['ț', 't'],
['Ț', 'T'],
['ţ', 't'],
['Ţ', 'T'],
# Turkish
['ş', 's'],
['Ş', 'S'],
['ç', 'c'],
['Ç', 'C'],
['ğ', 'g'],
['Ğ', 'G'],
['ı', 'i'],
['İ', 'I'],
# Armenian
['ա', 'a'],
['Ա', 'A'],
['բ', 'b'],
['Բ', 'B'],
['գ', 'g'],
['Գ', 'G'],
['դ', 'd'],
['Դ', 'D'],
['ե', 'ye'],
['Ե', 'Ye'],
['զ', 'z'],
['Զ', 'Z'],
['է', 'e'],
['Է', 'E'],
['ը', 'y'],
['Ը', 'Y'],
['թ', 't'],
['Թ', 'T'],
['ժ', 'zh'],
['Ժ', 'Zh'],
['ի', 'i'],
['Ի', 'I'],
['լ', 'l'],
['Լ', 'L'],
['խ', 'kh'],
['Խ', 'Kh'],
['ծ', 'ts'],
['Ծ', 'Ts'],
['կ', 'k'],
['Կ', 'K'],
['հ', 'h'],
['Հ', 'H'],
['ձ', 'dz'],
['Ձ', 'Dz'],
['ղ', 'gh'],
['Ղ', 'Gh'],
['ճ', 'tch'],
['Ճ', 'Tch'],
['մ', 'm'],
['Մ', 'M'],
['յ', 'y'],
['Յ', 'Y'],
['ն', 'n'],
['Ն', 'N'],
['շ', 'sh'],
['Շ', 'Sh'],
['ո', 'vo'],
['Ո', 'Vo'],
['չ', 'ch'],
['Չ', 'Ch'],
['պ', 'p'],
['Պ', 'P'],
['ջ', 'j'],
['Ջ', 'J'],
['ռ', 'r'],
['Ռ', 'R'],
['ս', 's'],
['Ս', 'S'],
['վ', 'v'],
['Վ', 'V'],
['տ', 't'],
['Տ', 'T'],
['ր', 'r'],
['Ր', 'R'],
['ց', 'c'],
['Ց', 'C'],
['ու', 'u'],
['ՈՒ', 'U'],
['Ու', 'U'],
['փ', 'p'],
['Փ', 'P'],
['ք', 'q'],
['Ք', 'Q'],
['օ', 'o'],
['Օ', 'O'],
['ֆ', 'f'],
['Ֆ', 'F'],
['և', 'yev'],
# Georgian
['ა', 'a'],
['ბ', 'b'],
['გ', 'g'],
['დ', 'd'],
['ე', 'e'],
['ვ', 'v'],
['ზ', 'z'],
['თ', 't'],
['ი', 'i'],
['კ', 'k'],
['ლ', 'l'],
['მ', 'm'],
['ნ', 'n'],
['ო', 'o'],
['პ', 'p'],
['ჟ', 'zh'],
['რ', 'r'],
['ს', 's'],
['ტ', 't'],
['უ', 'u'],
['ფ', 'ph'],
['ქ', 'q'],
['ღ', 'gh'],
['ყ', 'k'],
['შ', 'sh'],
['ჩ', 'ch'],
['ც', 'ts'],
['ძ', 'dz'],
['წ', 'ts'],
['ჭ', 'tch'],
['ხ', 'kh'],
['ჯ', 'j'],
['ჰ', 'h'],
# Czech
['č', 'c'],
['ď', 'd'],
['ě', 'e'],
['ň', 'n'],
['ř', 'r'],
['š', 's'],
['ť', 't'],
['ů', 'u'],
['ž', 'z'],
['Č', 'C'],
['Ď', 'D'],
['Ě', 'E'],
['Ň', 'N'],
['Ř', 'R'],
['Š', 'S'],
['Ť', 'T'],
['Ů', 'U'],
['Ž', 'Z'],
# Dhivehi
['ހ', 'h'],
['ށ', 'sh'],
['ނ', 'n'],
['ރ', 'r'],
['ބ', 'b'],
['ޅ', 'lh'],
['ކ', 'k'],
['އ', 'a'],
['ވ', 'v'],
['މ', 'm'],
['ފ', 'f'],
['ދ', 'dh'],
['ތ', 'th'],
['ލ', 'l'],
['ގ', 'g'],
['ޏ', 'gn'],
['ސ', 's'],
['ޑ', 'd'],
['ޒ', 'z'],
['ޓ', 't'],
['ޔ', 'y'],
['ޕ', 'p'],
['ޖ', 'j'],
['ޗ', 'ch'],
['ޘ', 'tt'],
['ޙ', 'hh'],
['ޚ', 'kh'],
['ޛ', 'th'],
['ޜ', 'z'],
['ޝ', 'sh'],
['ޞ', 's'],
['ޟ', 'd'],
['ޠ', 't'],
['ޡ', 'z'],
['ޢ', 'a'],
['ޣ', 'gh'],
['ޤ', 'q'],
['ޥ', 'w'],
['ަ', 'a'],
['ާ', 'aa'],
['ި', 'i'],
['ީ', 'ee'],
['ު', 'u'],
['ޫ', 'oo'],
['ެ', 'e'],
['ޭ', 'ey'],
['ޮ', 'o'],
['ޯ', 'oa'],
['ް', ''],
# Greek
['α', 'a'],
['β', 'v'],
['γ', 'g'],
['δ', 'd'],
['ε', 'e'],
['ζ', 'z'],
['η', 'i'],
['θ', 'th'],
['ι', 'i'],
['κ', 'k'],
['λ', 'l'],
['μ', 'm'],
['ν', 'n'],
['ξ', 'ks'],
['ο', 'o'],
['π', 'p'],
['ρ', 'r'],
['σ', 's'],
['τ', 't'],
['υ', 'y'],
['φ', 'f'],
['χ', 'x'],
['ψ', 'ps'],
['ω', 'o'],
['ά', 'a'],
['έ', 'e'],
['ί', 'i'],
['ό', 'o'],
['ύ', 'y'],
['ή', 'i'],
['ώ', 'o'],
['ς', 's'],
['ϊ', 'i'],
['ΰ', 'y'],
['ϋ', 'y'],
['ΐ', 'i'],
['Α', 'A'],
['Β', 'B'],
['Γ', 'G'],
['Δ', 'D'],
['Ε', 'E'],
['Ζ', 'Z'],
['Η', 'I'],
['Θ', 'TH'],
['Ι', 'I'],
['Κ', 'K'],
['Λ', 'L'],
['Μ', 'M'],
['Ν', 'N'],
['Ξ', 'KS'],
['Ο', 'O'],
['Π', 'P'],
['Ρ', 'R'],
['Σ', 'S'],
['Τ', 'T'],
['Υ', 'Y'],
['Φ', 'F'],
['Χ', 'X'],
['Ψ', 'PS'],
['Ω', 'O'],
['Ά', 'A'],
['Έ', 'E'],
['Ί', 'I'],
['Ό', 'O'],
['Ύ', 'Y'],
['Ή', 'I'],
['Ώ', 'O'],
['Ϊ', 'I'],
['Ϋ', 'Y'],
# Disabled as it conflicts with German and Latin.
# Hungarian
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ö', 'o'],
# ['Ö', 'O'],
# ['ü', 'u'],
# ['Ü', 'U'],
# ['ű', 'u'],
# ['Ű', 'U'],
# Latvian
['ā', 'a'],
['ē', 'e'],
['ģ', 'g'],
['ī', 'i'],
['ķ', 'k'],
['ļ', 'l'],
['ņ', 'n'],
['ū', 'u'],
['Ā', 'A'],
['Ē', 'E'],
['Ģ', 'G'],
['Ī', 'I'],
['Ķ', 'K'],
['Ļ', 'L'],
['Ņ', 'N'],
['Ū', 'U'],
['č', 'c'],
['š', 's'],
['ž', 'z'],
['Č', 'C'],
['Š', 'S'],
['Ž', 'Z'],
# Lithuanian
['ą', 'a'],
['č', 'c'],
['ę', 'e'],
['ė', 'e'],
['į', 'i'],
['š', 's'],
['ų', 'u'],
['ū', 'u'],
['ž', 'z'],
['Ą', 'A'],
['Č', 'C'],
['Ę', 'E'],
['Ė', 'E'],
['Į', 'I'],
['Š', 'S'],
['Ų', 'U'],
['Ū', 'U'],
# Macedonian
['Ќ', 'Kj'],
['ќ', 'kj'],
['Љ', 'Lj'],
['љ', 'lj'],
['Њ', 'Nj'],
['њ', 'nj'],
['Тс', 'Ts'],
['тс', 'ts'],
# Polish
['ą', 'a'],
['ć', 'c'],
['ę', 'e'],
['ł', 'l'],
['ń', 'n'],
['ś', 's'],
['ź', 'z'],
['ż', 'z'],
['Ą', 'A'],
['Ć', 'C'],
['Ę', 'E'],
['Ł', 'L'],
['Ń', 'N'],
['Ś', 'S'],
['Ź', 'Z'],
['Ż', 'Z'],
# Disabled as it conflicts with Vietnamese.
# Serbian
# ['љ', 'lj'],
# ['њ', 'nj'],
# ['Љ', 'Lj'],
# ['Њ', 'Nj'],
# ['đ', 'dj'],
# ['Đ', 'Dj'],
# ['ђ', 'dj'],
# ['ј', 'j'],
# ['ћ', 'c'],
# ['џ', 'dz'],
# ['Ђ', 'Dj'],
# ['Ј', 'j'],
# ['Ћ', 'C'],
# ['Џ', 'Dz'],
# Disabled as it conflicts with German and Latin.
# Slovak
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ľ', 'l'],
# ['ĺ', 'l'],
# ['ŕ', 'r'],
# ['Ľ', 'L'],
# ['Ĺ', 'L'],
# ['Ŕ', 'R'],
# Disabled as it conflicts with German and Latin.
# Swedish
# ['å', 'o'],
# ['Å', 'o'],
# ['ä', 'a'],
# ['Ä', 'A'],
# ['ë', 'e'],
# ['Ë', 'E'],
# ['ö', 'o'],
# ['Ö', 'O'],
# Ukrainian
['Є', 'Ye'],
['І', 'I'],
['Ї', 'Yi'],
['Ґ', 'G'],
['є', 'ye'],
['і', 'i'],
['ї', 'yi'],
['ґ', 'g'],
# Dutch
['IJ', 'IJ'],
['ij', 'ij'],
# Danish
# ['Æ', 'Ae'],
# ['Ø', 'Oe'],
# ['Å', 'Aa'],
# ['æ', 'ae'],
# ['ø', 'oe'],
# ['å', 'aa']
# Currencies
['¢', 'c'],
['¥', 'Y'],
['߿', 'b'],
['৳', 't'],
['૱', 'Bo'],
['฿', 'B'],
['₠', 'CE'],
['₡', 'C'],
['₢', 'Cr'],
['₣', 'F'],
['₥', 'm'],
['₦', 'N'],
['₧', 'Pt'],
['₨', 'Rs'],
['₩', 'W'],
['₫', 's'],
['€', 'E'],
['₭', 'K'],
['₮', 'T'],
['₯', 'Dp'],
['₰', 'S'],
['₱', 'P'],
['₲', 'G'],
['₳', 'A'],
['₴', 'S'],
['₵', 'C'],
['₶', 'tt'],
['₷', 'S'],
['₸', 'T'],
['₹', 'R'],
['₺', 'L'],
['₽', 'P'],
['₿', 'B'],
['﹩', '$'],
['¢', 'c'],
['¥', 'Y'],
['₩', 'W'],
# Latin
['𝐀', 'A'],
['𝐁', 'B'],
['𝐂', 'C'],
['𝐃', 'D'],
['𝐄', 'E'],
['𝐅', 'F'],
['𝐆', 'G'],
['𝐇', 'H'],
['𝐈', 'I'],
['𝐉', 'J'],
['𝐊', 'K'],
['𝐋', 'L'],
['𝐌', 'M'],
['𝐍', 'N'],
['𝐎', 'O'],
['𝐏', 'P'],
['𝐐', 'Q'],
['𝐑', 'R'],
['𝐒', 'S'],
['𝐓', 'T'],
['𝐔', 'U'],
['𝐕', 'V'],
['𝐖', 'W'],
['𝐗', 'X'],
['𝐘', 'Y'],
['𝐙', 'Z'],
['𝐚', 'a'],
['𝐛', 'b'],
['𝐜', 'c'],
['𝐝', 'd'],
['𝐞', 'e'],
['𝐟', 'f'],
['𝐠', 'g'],
['𝐡', 'h'],
['𝐢', 'i'],
['𝐣', 'j'],
['𝐤', 'k'],
['𝐥', 'l'],
['𝐦', 'm'],
['𝐧', 'n'],
['𝐨', 'o'],
['𝐩', 'p'],
['𝐪', 'q'],
['𝐫', 'r'],
['𝐬', 's'],
['𝐭', 't'],
['𝐮', 'u'],
['𝐯', 'v'],
['𝐰', 'w'],
['𝐱', 'x'],
['𝐲', 'y'],
['𝐳', 'z'],
['𝐴', 'A'],
['𝐵', 'B'],
['𝐶', 'C'],
['𝐷', 'D'],
['𝐸', 'E'],
['𝐹', 'F'],
['𝐺', 'G'],
['𝐻', 'H'],
['𝐼', 'I'],
['𝐽', 'J'],
['𝐾', 'K'],
['𝐿', 'L'],
['𝑀', 'M'],
['𝑁', 'N'],
['𝑂', 'O'],
['𝑃', 'P'],
['𝑄', 'Q'],
['𝑅', 'R'],
['𝑆', 'S'],
['𝑇', 'T'],
['𝑈', 'U'],
['𝑉', 'V'],
['𝑊', 'W'],
['𝑋', 'X'],
['𝑌', 'Y'],
['𝑍', 'Z'],
['𝑎', 'a'],
['𝑏', 'b'],
['𝑐', 'c'],
['𝑑', 'd'],
['𝑒', 'e'],
['𝑓', 'f'],
['𝑔', 'g'],
['𝑖', 'i'],
['𝑗', 'j'],
['𝑘', 'k'],
['𝑙', 'l'],
['𝑚', 'm'],
['𝑛', 'n'],
['𝑜', 'o'],
['𝑝', 'p'],
['𝑞', 'q'],
['𝑟', 'r'],
['𝑠', 's'],
['𝑡', 't'],
['𝑢', 'u'],
['𝑣', 'v'],
['𝑤', 'w'],
['𝑥', 'x'],
['𝑦', 'y'],
['𝑧', 'z'],
['𝑨', 'A'],
['𝑩', 'B'],
['𝑪', 'C'],
['𝑫', 'D'],
['𝑬', 'E'],
['𝑭', 'F'],
['𝑮', 'G'],
['𝑯', 'H'],
['𝑰', 'I'],
['𝑱', 'J'],
['𝑲', 'K'],
['𝑳', 'L'],
['𝑴', 'M'],
['𝑵', 'N'],
['𝑶', 'O'],
['𝑷', 'P'],
['𝑸', 'Q'],
['𝑹', 'R'],
['𝑺', 'S'],
['𝑻', 'T'],
['𝑼', 'U'],
['𝑽', 'V'],
['𝑾', 'W'],
['𝑿', 'X'],
['𝒀', 'Y'],
['𝒁', 'Z'],
['𝒂', 'a'],
['𝒃', 'b'],
['𝒄', 'c'],
['𝒅', 'd'],
['𝒆', 'e'],
['𝒇', 'f'],
['𝒈', 'g'],
['𝒉', 'h'],
['𝒊', 'i'],
['𝒋', 'j'],
['𝒌', 'k'],
['𝒍', 'l'],
['𝒎', 'm'],
['𝒏', 'n'],
['𝒐', 'o'],
['𝒑', 'p'],
['𝒒', 'q'],
['𝒓', 'r'],
['𝒔', 's'],
['𝒕', 't'],
['𝒖', 'u'],
['𝒗', 'v'],
['𝒘', 'w'],
['𝒙', 'x'],
['𝒚', 'y'],
['𝒛', 'z'],
['𝒜', 'A'],
['𝒞', 'C'],
['𝒟', 'D'],
['𝒢', 'g'],
['𝒥', 'J'],
['𝒦', 'K'],
['𝒩', 'N'],
['𝒪', 'O'],
['𝒫', 'P'],
['𝒬', 'Q'],
['𝒮', 'S'],
['𝒯', 'T'],
['𝒰', 'U'],
['𝒱', 'V'],
['𝒲', 'W'],
['𝒳', 'X'],
['𝒴', 'Y'],
['𝒵', 'Z'],
['𝒶', 'a'],
['𝒷', 'b'],
['𝒸', 'c'],
['𝒹', 'd'],
['𝒻', 'f'],
['𝒽', 'h'],
['𝒾', 'i'],
['𝒿', 'j'],
['𝓀', 'h'],
['𝓁', 'l'],
['𝓂', 'm'],
['𝓃', 'n'],
['𝓅', 'p'],
['𝓆', 'q'],
['𝓇', 'r'],
['𝓈', 's'],
['𝓉', 't'],
['𝓊', 'u'],
['𝓋', 'v'],
['𝓌', 'w'],
['𝓍', 'x'],
['𝓎', 'y'],
['𝓏', 'z'],
['𝓐', 'A'],
['𝓑', 'B'],
['𝓒', 'C'],
['𝓓', 'D'],
['𝓔', 'E'],
['𝓕', 'F'],
['𝓖', 'G'],
['𝓗', 'H'],
['𝓘', 'I'],
['𝓙', 'J'],
['𝓚', 'K'],
['𝓛', 'L'],
['𝓜', 'M'],
['𝓝', 'N'],
['𝓞', 'O'],
['𝓟', 'P'],
['𝓠', 'Q'],
['𝓡', 'R'],
['𝓢', 'S'],
['𝓣', 'T'],
['𝓤', 'U'],
['𝓥', 'V'],
['𝓦', 'W'],
['𝓧', 'X'],
['𝓨', 'Y'],
['𝓩', 'Z'],
['𝓪', 'a'],
['𝓫', 'b'],
['𝓬', 'c'],
['𝓭', 'd'],
['𝓮', 'e'],
['𝓯', 'f'],
['𝓰', 'g'],
['𝓱', 'h'],
['𝓲', 'i'],
['𝓳', 'j'],
['𝓴', 'k'],
['𝓵', 'l'],
['𝓶', 'm'],
['𝓷', 'n'],
['𝓸', 'o'],
['𝓹', 'p'],
['𝓺', 'q'],
['𝓻', 'r'],
['𝓼', 's'],
['𝓽', 't'],
['𝓾', 'u'],
['𝓿', 'v'],
['𝔀', 'w'],
['𝔁', 'x'],
['𝔂', 'y'],
['𝔃', 'z'],
['𝔄', 'A'],
['𝔅', 'B'],
['𝔇', 'D'],
['𝔈', 'E'],
['𝔉', 'F'],
['𝔊', 'G'],
['𝔍', 'J'],
['𝔎', 'K'],
['𝔏', 'L'],
['𝔐', 'M'],
['𝔑', 'N'],
['𝔒', 'O'],
['𝔓', 'P'],
['𝔔', 'Q'],
['𝔖', 'S'],
['𝔗', 'T'],
['𝔘', 'U'],
['𝔙', 'V'],
['𝔚', 'W'],
['𝔛', 'X'],
['𝔜', 'Y'],
['𝔞', 'a'],
['𝔟', 'b'],
['𝔠', 'c'],
['𝔡', 'd'],
['𝔢', 'e'],
['𝔣', 'f'],
['𝔤', 'g'],
['𝔥', 'h'],
['𝔦', 'i'],
['𝔧', 'j'],
['𝔨', 'k'],
['𝔩', 'l'],
['𝔪', 'm'],
['𝔫', 'n'],
['𝔬', 'o'],
['𝔭', 'p'],
['𝔮', 'q'],
['𝔯', 'r'],
['𝔰', 's'],
['𝔱', 't'],
['𝔲', 'u'],
['𝔳', 'v'],
['𝔴', 'w'],
['𝔵', 'x'],
['𝔶', 'y'],
['𝔷', 'z'],
['𝔸', 'A'],
['𝔹', 'B'],
['𝔻', 'D'],
['𝔼', 'E'],
['𝔽', 'F'],
['𝔾', 'G'],
['𝕀', 'I'],
['𝕁', 'J'],
['𝕂', 'K'],
['𝕃', 'L'],
['𝕄', 'M'],
['𝕆', 'N'],
['𝕊', 'S'],
['𝕋', 'T'],
['𝕌', 'U'],
['𝕍', 'V'],
['𝕎', 'W'],
['𝕏', 'X'],
['𝕐', 'Y'],
['𝕒', 'a'],
['𝕓', 'b'],
['𝕔', 'c'],
['𝕕', 'd'],
['𝕖', 'e'],
['𝕗', 'f'],
['𝕘', 'g'],
['𝕙', 'h'],
['𝕚', 'i'],
['𝕛', 'j'],
['𝕜', 'k'],
['𝕝', 'l'],
['𝕞', 'm'],
['𝕟', 'n'],
['𝕠', 'o'],
['𝕡', 'p'],
['𝕢', 'q'],
['𝕣', 'r'],
['𝕤', 's'],
['𝕥', 't'],
['𝕦', 'u'],
['𝕧', 'v'],
['𝕨', 'w'],
['𝕩', 'x'],
['𝕪', 'y'],
['𝕫', 'z'],
['𝕬', 'A'],
['𝕭', 'B'],
['𝕮', 'C'],
['𝕯', 'D'],
['𝕰', 'E'],
['𝕱', 'F'],
['𝕲', 'G'],
['𝕳', 'H'],
['𝕴', 'I'],
['𝕵', 'J'],
['𝕶', 'K'],
['𝕷', 'L'],
['𝕸', 'M'],
['𝕹', 'N'],
['𝕺', 'O'],
['𝕻', 'P'],
['𝕼', 'Q'],
['𝕽', 'R'],
['𝕾', 'S'],
['𝕿', 'T'],
['𝖀', 'U'],
['𝖁', 'V'],
['𝖂', 'W'],
['𝖃', 'X'],
['𝖄', 'Y'],
['𝖅', 'Z'],
['𝖆', 'a'],
['𝖇', 'b'],
['𝖈', 'c'],
['𝖉', 'd'],
['𝖊', 'e'],
['𝖋', 'f'],
['𝖌', 'g'],
['𝖍', 'h'],
['𝖎', 'i'],
['𝖏', 'j'],
['𝖐', 'k'],
['𝖑', 'l'],
['𝖒', 'm'],
['𝖓', 'n'],
['𝖔', 'o'],
['𝖕', 'p'],
['𝖖', 'q'],
['𝖗', 'r'],
['𝖘', 's'],
['𝖙', 't'],
['𝖚', 'u'],
['𝖛', 'v'],
['𝖜', 'w'],
['𝖝', 'x'],
['𝖞', 'y'],
['𝖟', 'z'],
['𝖠', 'A'],
['𝖡', 'B'],
['𝖢', 'C'],
['𝖣', 'D'],
['𝖤', 'E'],
['𝖥', 'F'],
['𝖦', 'G'],
['𝖧', 'H'],
['𝖨', 'I'],
['𝖩', 'J'],
['𝖪', 'K'],
['𝖫', 'L'],
['𝖬', 'M'],
['𝖭', 'N'],
['𝖮', 'O'],
['𝖯', 'P'],
['𝖰', 'Q'],
['𝖱', 'R'],
['𝖲', 'S'],
['𝖳', 'T'],
['𝖴', 'U'],
['𝖵', 'V'],
['𝖶', 'W'],
['𝖷', 'X'],
['𝖸', 'Y'],
['𝖹', 'Z'],
['𝖺', 'a'],
['𝖻', 'b'],
['𝖼', 'c'],
['𝖽', 'd'],
['𝖾', 'e'],
['𝖿', 'f'],
['𝗀', 'g'],
['𝗁', 'h'],
['𝗂', 'i'],
['𝗃', 'j'],
['𝗄', 'k'],
['𝗅', 'l'],
['𝗆', 'm'],
['𝗇', 'n'],
['𝗈', 'o'],
['𝗉', 'p'],
['𝗊', 'q'],
['𝗋', 'r'],
['𝗌', 's'],
['𝗍', 't'],
['𝗎', 'u'],
['𝗏', 'v'],
['𝗐', 'w'],
['𝗑', 'x'],
['𝗒', 'y'],
['𝗓', 'z'],
['𝗔', 'A'],
['𝗕', 'B'],
['𝗖', 'C'],
['𝗗', 'D'],
['𝗘', 'E'],
['𝗙', 'F'],
['𝗚', 'G'],
['𝗛', 'H'],
['𝗜', 'I'],
['𝗝', 'J'],
['𝗞', 'K'],
['𝗟', 'L'],
['𝗠', 'M'],
['𝗡', 'N'],
['𝗢', 'O'],
['𝗣', 'P'],
['𝗤', 'Q'],
['𝗥', 'R'],
['𝗦', 'S'],
['𝗧', 'T'],
['𝗨', 'U'],
['𝗩', 'V'],
['𝗪', 'W'],
['𝗫', 'X'],
['𝗬', 'Y'],
['𝗭', 'Z'],
['𝗮', 'a'],
['𝗯', 'b'],
['𝗰', 'c'],
['𝗱', 'd'],
['𝗲', 'e'],
['𝗳', 'f'],
['𝗴', 'g'],
['𝗵', 'h'],
['𝗶', 'i'],
['𝗷', 'j'],
['𝗸', 'k'],
['𝗹', 'l'],
['𝗺', 'm'],
['𝗻', 'n'],
['𝗼', 'o'],
['𝗽', 'p'],
['𝗾', 'q'],
['𝗿', 'r'],
['𝘀', 's'],
['𝘁', 't'],
['𝘂', 'u'],
['𝘃', 'v'],
['𝘄', 'w'],
['𝘅', 'x'],
['𝘆', 'y'],
['𝘇', 'z'],
['𝘈', 'A'],
['𝘉', 'B'],
['𝘊', 'C'],
['𝘋', 'D'],
['𝘌', 'E'],
['𝘍', 'F'],
['𝘎', 'G'],
['𝘏', 'H'],
['𝘐', 'I'],
['𝘑', 'J'],
['𝘒', 'K'],
['𝘓', 'L'],
['𝘔', 'M'],
['𝘕', 'N'],
['𝘖', 'O'],
['𝘗', 'P'],
['𝘘', 'Q'],
['𝘙', 'R'],
['𝘚', 'S'],
['𝘛', 'T'],
['𝘜', 'U'],
['𝘝', 'V'],
['𝘞', 'W'],
['𝘟', 'X'],
['𝘠', 'Y'],
['𝘡', 'Z'],
['𝘢', 'a'],
['𝘣', 'b'],
['𝘤', 'c'],
['𝘥', 'd'],
['𝘦', 'e'],
['𝘧', 'f'],
['𝘨', 'g'],
['𝘩', 'h'],
['𝘪', 'i'],
['𝘫', 'j'],
['𝘬', 'k'],
['𝘭', 'l'],
['𝘮', 'm'],
['𝘯', 'n'],
['𝘰', 'o'],
['𝘱', 'p'],
['𝘲', 'q'],
['𝘳', 'r'],
['𝘴', 's'],
['𝘵', 't'],
['𝘶', 'u'],
['𝘷', 'v'],
['𝘸', 'w'],
['𝘹', 'x'],
['𝘺', 'y'],
['𝘻', 'z'],
['𝘼', 'A'],
['𝘽', 'B'],
['𝘾', 'C'],
['𝘿', 'D'],
['𝙀', 'E'],
['𝙁', 'F'],
['𝙂', 'G'],
['𝙃', 'H'],
['𝙄', 'I'],
['𝙅', 'J'],
['𝙆', 'K'],
['𝙇', 'L'],
['𝙈', 'M'],
['𝙉', 'N'],
['𝙊', 'O'],
['𝙋', 'P'],
['𝙌', 'Q'],
['𝙍', 'R'],
['𝙎', 'S'],
['𝙏', 'T'],
['𝙐', 'U'],
['𝙑', 'V'],
['𝙒', 'W'],
['𝙓', 'X'],
['𝙔', 'Y'],
['𝙕', 'Z'],
['𝙖', 'a'],
['𝙗', 'b'],
['𝙘', 'c'],
['𝙙', 'd'],
['𝙚', 'e'],
['𝙛', 'f'],
['𝙜', 'g'],
['𝙝', 'h'],
['𝙞', 'i'],
['𝙟', 'j'],
['𝙠', 'k'],
['𝙡', 'l'],
['𝙢', 'm'],
['𝙣', 'n'],
['𝙤', 'o'],
['𝙥', 'p'],
['𝙦', 'q'],
['𝙧', 'r'],
['𝙨', 's'],
['𝙩', 't'],
['𝙪', 'u'],
['𝙫', 'v'],
['𝙬', 'w'],
['𝙭', 'x'],
['𝙮', 'y'],
['𝙯', 'z'],
['𝙰', 'A'],
['𝙱', 'B'],
['𝙲', 'C'],
['𝙳', 'D'],
['𝙴', 'E'],
['𝙵', 'F'],
['𝙶', 'G'],
['𝙷', 'H'],
['𝙸', 'I'],
['𝙹', 'J'],
['𝙺', 'K'],
['𝙻', 'L'],
['𝙼', 'M'],
['𝙽', 'N'],
['𝙾', 'O'],
['𝙿', 'P'],
['𝚀', 'Q'],
['𝚁', 'R'],
['𝚂', 'S'],
['𝚃', 'T'],
['𝚄', 'U'],
['𝚅', 'V'],
['𝚆', 'W'],
['𝚇', 'X'],
['𝚈', 'Y'],
['𝚉', 'Z'],
['𝚊', 'a'],
['𝚋', 'b'],
['𝚌', 'c'],
['𝚍', 'd'],
['𝚎', 'e'],
['𝚏', 'f'],
['𝚐', 'g'],
['𝚑', 'h'],
['𝚒', 'i'],
['𝚓', 'j'],
['𝚔', 'k'],
['𝚕', 'l'],
['𝚖', 'm'],
['𝚗', 'n'],
['𝚘', 'o'],
['𝚙', 'p'],
['𝚚', 'q'],
['𝚛', 'r'],
['𝚜', 's'],
['𝚝', 't'],
['𝚞', 'u'],
['𝚟', 'v'],
['𝚠', 'w'],
['𝚡', 'x'],
['𝚢', 'y'],
['𝚣', 'z'],
# Dotless letters
['𝚤', 'l'],
['𝚥', 'j'],
# Greek
['𝛢', 'A'],
['𝛣', 'B'],
['𝛤', 'G'],
['𝛥', 'D'],
['𝛦', 'E'],
['𝛧', 'Z'],
['𝛨', 'I'],
['𝛩', 'TH'],
['𝛪', 'I'],
['𝛫', 'K'],
['𝛬', 'L'],
['𝛭', 'M'],
['𝛮', 'N'],
['𝛯', 'KS'],
['𝛰', 'O'],
['𝛱', 'P'],
['𝛲', 'R'],
['𝛳', 'TH'],
['𝛴', 'S'],
['𝛵', 'T'],
['𝛶', 'Y'],
['𝛷', 'F'],
['𝛸', 'x'],
['𝛹', 'PS'],
['𝛺', 'O'],
['𝛻', 'D'],
['𝛼', 'a'],
['𝛽', 'b'],
['𝛾', 'g'],
['𝛿', 'd'],
['𝜀', 'e'],
['𝜁', 'z'],
['𝜂', 'i'],
['𝜃', 'th'],
['𝜄', 'i'],
['𝜅', 'k'],
['𝜆', 'l'],
['𝜇', 'm'],
['𝜈', 'n'],
['𝜉', 'ks'],
['𝜊', 'o'],
['𝜋', 'p'],
['𝜌', 'r'],
['𝜍', 's'],
['𝜎', 's'],
['𝜏', 't'],
['𝜐', 'y'],
['𝜑', 'f'],
['𝜒', 'x'],
['𝜓', 'ps'],
['𝜔', 'o'],
['𝜕', 'd'],
['𝜖', 'E'],
['𝜗', 'TH'],
['𝜘', 'K'],
['𝜙', 'f'],
['𝜚', 'r'],
['𝜛', 'p'],
['𝜜', 'A'],
['𝜝', 'V'],
['𝜞', 'G'],
['𝜟', 'D'],
['𝜠', 'E'],
['𝜡', 'Z'],
['𝜢', 'I'],
['𝜣', 'TH'],
['𝜤', 'I'],
['𝜥', 'K'],
['𝜦', 'L'],
['𝜧', 'M'],
['𝜨', 'N'],
['𝜩', 'KS'],
['𝜪', 'O'],
['𝜫', 'P'],
['𝜬', 'S'],
['𝜭', 'TH'],
['𝜮', 'S'],
['𝜯', 'T'],
['𝜰', 'Y'],
['𝜱', 'F'],
['𝜲', 'X'],
['𝜳', 'PS'],
['𝜴', 'O'],
['𝜵', 'D'],
['𝜶', 'a'],
['𝜷', 'v'],
['𝜸', 'g'],
['𝜹', 'd'],
['𝜺', 'e'],
['𝜻', 'z'],
['𝜼', 'i'],
['𝜽', 'th'],
['𝜾', 'i'],
['𝜿', 'k'],
['𝝀', 'l'],
['𝝁', 'm'],
['𝝂', 'n'],
['𝝃', 'ks'],
['𝝄', 'o'],
['𝝅', 'p'],
['𝝆', 'r'],
['𝝇', 's'],
['𝝈', 's'],
['𝝉', 't'],
['𝝊', 'y'],
['𝝋', 'f'],
['𝝌', 'x'],
['𝝍', 'ps'],
['𝝎', 'o'],
['𝝏', 'a'],
['𝝐', 'e'],
['𝝑', 'i'],
['𝝒', 'k'],
['𝝓', 'f'],
['𝝔', 'r'],
['𝝕', 'p'],
['𝝖', 'A'],
['𝝗', 'B'],
['𝝘', 'G'],
['𝝙', 'D'],
['𝝚', 'E'],
['𝝛', 'Z'],
['𝝜', 'I'],
['𝝝', 'TH'],
['𝝞', 'I'],
['𝝟', 'K'],
['𝝠', 'L'],
['𝝡', 'M'],
['𝝢', 'N'],
['𝝣', 'KS'],
['𝝤', 'O'],
['𝝥', 'P'],
['𝝦', 'R'],
['𝝧', 'TH'],
['𝝨', 'S'],
['𝝩', 'T'],
['𝝪', 'Y'],
['𝝫', 'F'],
['𝝬', 'X'],
['𝝭', 'PS'],
['𝝮', 'O'],
['𝝯', 'D'],
['𝝰', 'a'],
['𝝱', 'v'],
['𝝲', 'g'],
['𝝳', 'd'],
['𝝴', 'e'],
['𝝵', 'z'],
['𝝶', 'i'],
['𝝷', 'th'],
['𝝸', 'i'],
['𝝹', 'k'],
['𝝺', 'l'],
['𝝻', 'm'],
['𝝼', 'n'],
['𝝽', 'ks'],
['𝝾', 'o'],
['𝝿', 'p'],
['𝞀', 'r'],
['𝞁', 's'],
['𝞂', 's'],
['𝞃', 't'],
['𝞄', 'y'],
['𝞅', 'f'],
['𝞆', 'x'],
['𝞇', 'ps'],
['𝞈', 'o'],
['𝞉', 'a'],
['𝞊', 'e'],
['𝞋', 'i'],
['𝞌', 'k'],
['𝞍', 'f'],
['𝞎', 'r'],
['𝞏', 'p'],
['𝞐', 'A'],
['𝞑', 'V'],
['𝞒', 'G'],
['𝞓', 'D'],
['𝞔', 'E'],
['𝞕', 'Z'],
['𝞖', 'I'],
['𝞗', 'TH'],
['𝞘', 'I'],
['𝞙', 'K'],
['𝞚', 'L'],
['𝞛', 'M'],
['𝞜', 'N'],
['𝞝', 'KS'],
['𝞞', 'O'],
['𝞟', 'P'],
['𝞠', 'S'],
['𝞡', 'TH'],
['𝞢', 'S'],
['𝞣', 'T'],
['𝞤', 'Y'],
['𝞥', 'F'],
['𝞦', 'X'],
['𝞧', 'PS'],
['𝞨', 'O'],
['𝞩', 'D'],
['𝞪', 'av'],
['𝞫', 'g'],
['𝞬', 'd'],
['𝞭', 'e'],
['𝞮', 'z'],
['𝞯', 'i'],
['𝞰', 'i'],
['𝞱', 'th'],
['𝞲', 'i'],
['𝞳', 'k'],
['𝞴', 'l'],
['𝞵', 'm'],
['𝞶', 'n'],
['𝞷', 'ks'],
['𝞸', 'o'],
['𝞹', 'p'],
['𝞺', 'r'],
['𝞻', 's'],
['𝞼', 's'],
['𝞽', 't'],
['𝞾', 'y'],
['𝞿', 'f'],
['𝟀', 'x'],
['𝟁', 'ps'],
['𝟂', 'o'],
['𝟃', 'a'],
['𝟄', 'e'],
['𝟅', 'i'],
['𝟆', 'k'],
['𝟇', 'f'],
['𝟈', 'r'],
['𝟉', 'p'],
['𝟊', 'F'],
['𝟋', 'f'],
['⒜', '(a)'],
['⒝', '(b)'],
['⒞', '(c)'],
['⒟', '(d)'],
['⒠', '(e)'],
['⒡', '(f)'],
['⒢', '(g)'],
['⒣', '(h)'],
['⒤', '(i)'],
['⒥', '(j)'],
['⒦', '(k)'],
['⒧', '(l)'],
['⒨', '(m)'],
['⒩', '(n)'],
['⒪', '(o)'],
['⒫', '(p)'],
['⒬', '(q)'],
['⒭', '(r)'],
['⒮', '(s)'],
['⒯', '(t)'],
['⒰', '(u)'],
['⒱', '(v)'],
['⒲', '(w)'],
['⒳', '(x)'],
['⒴', '(y)'],
['⒵', '(z)'],
['Ⓐ', '(A)'],
['Ⓑ', '(B)'],
['Ⓒ', '(C)'],
['Ⓓ', '(D)'],
['Ⓔ', '(E)'],
['Ⓕ', '(F)'],
['Ⓖ', '(G)'],
['Ⓗ', '(H)'],
['Ⓘ', '(I)'],
['Ⓙ', '(J)'],
['Ⓚ', '(K)'],
['Ⓛ', '(L)'],
['Ⓝ', '(N)'],
['Ⓞ', '(O)'],
['Ⓟ', '(P)'],
['Ⓠ', '(Q)'],
['Ⓡ', '(R)'],
['Ⓢ', '(S)'],
['Ⓣ', '(T)'],
['Ⓤ', '(U)'],
['Ⓥ', '(V)'],
['Ⓦ', '(W)'],
['Ⓧ', '(X)'],
['Ⓨ', '(Y)'],
['Ⓩ', '(Z)'],
['ⓐ', '(a)'],
['ⓑ', '(b)'],
['ⓒ', '(b)'],
['ⓓ', '(c)'],
['ⓔ', '(e)'],
['ⓕ', '(f)'],
['ⓖ', '(g)'],
['ⓗ', '(h)'],
['ⓘ', '(i)'],
['ⓙ', '(j)'],
['ⓚ', '(k)'],
['ⓛ', '(l)'],
['ⓜ', '(m)'],
['ⓝ', '(n)'],
['ⓞ', '(o)'],
['ⓟ', '(p)'],
['ⓠ', '(q)'],
['ⓡ', '(r)'],
['ⓢ', '(s)'],
['ⓣ', '(t)'],
['ⓤ', '(u)'],
['ⓥ', '(v)'],
['ⓦ', '(w)'],
['ⓧ', '(x)'],
['ⓨ', '(y)'],
['ⓩ', '(z)'],
# Numbers
['𝟎', '0'],
['𝟏', '1'],
['𝟐', '2'],
['𝟑', '3'],
['𝟒', '4'],
['𝟓', '5'],
['𝟔', '6'],
['𝟕', '7'],
['𝟖', '8'],
['𝟗', '9'],
['𝟘', '0'],
['𝟙', '1'],
['𝟚', '2'],
['𝟛', '3'],
['𝟜', '4'],
['𝟝', '5'],
['𝟞', '6'],
['𝟟', '7'],
['𝟠', '8'],
['𝟡', '9'],
['𝟢', '0'],
['𝟣', '1'],
['𝟤', '2'],
['𝟥', '3'],
['𝟦', '4'],
['𝟧', '5'],
['𝟨', '6'],
['𝟩', '7'],
['𝟪', '8'],
['𝟫', '9'],
['𝟬', '0'],
['𝟭', '1'],
['𝟮', '2'],
['𝟯', '3'],
['𝟰', '4'],
['𝟱', '5'],
['𝟲', '6'],
['𝟳', '7'],
['𝟴', '8'],
['𝟵', '9'],
['𝟶', '0'],
['𝟷', '1'],
['𝟸', '2'],
['𝟹', '3'],
['𝟺', '4'],
['𝟻', '5'],
['𝟼', '6'],
['𝟽', '7'],
['𝟾', '8'],
['𝟿', '9'],
['①', '1'],
['②', '2'],
['③', '3'],
['④', '4'],
['⑤', '5'],
['⑥', '6'],
['⑦', '7'],
['⑧', '8'],
['⑨', '9'],
['⑩', '10'],
['⑪', '11'],
['⑫', '12'],
['⑬', '13'],
['⑭', '14'],
['⑮', '15'],
['⑯', '16'],
['⑰', '17'],
['⑱', '18'],
['⑲', '19'],
['⑳', '20'],
['⑴', '1'],
['⑵', '2'],
['⑶', '3'],
['⑷', '4'],
['⑸', '5'],
['⑹', '6'],
['⑺', '7'],
['⑻', '8'],
['⑼', '9'],
['⑽', '10'],
['⑾', '11'],
['⑿', '12'],
['⒀', '13'],
['⒁', '14'],
['⒂', '15'],
['⒃', '16'],
['⒄', '17'],
['⒅', '18'],
['⒆', '19'],
['⒇', '20'],
['⒈', '1.'],
['⒉', '2.'],
['⒊', '3.'],
['⒋', '4.'],
['⒌', '5.'],
['⒍', '6.'],
['⒎', '7.'],
['⒏', '8.'],
['⒐', '9.'],
['⒑', '10.'],
['⒒', '11.'],
['⒓', '12.'],
['⒔', '13.'],
['⒕', '14.'],
['⒖', '15.'],
['⒗', '16.'],
['⒘', '17.'],
['⒙', '18.'],
['⒚', '19.'],
['⒛', '20.'],
['⓪', '0'],
['⓫', '11'],
['⓬', '12'],
['⓭', '13'],
['⓮', '14'],
['⓯', '15'],
['⓰', '16'],
['⓱', '17'],
['⓲', '18'],
['⓳', '19'],
['⓴', '20'],
['⓵', '1'],
['⓶', '2'],
['⓷', '3'],
['⓸', '4'],
['⓹', '5'],
['⓺', '6'],
['⓻', '7'],
['⓼', '8'],
['⓽', '9'],
['⓾', '10'],
['⓿', '0'],
# Punctuation
['🙰', '&'],
['🙱', '&'],
['🙲', '&'],
['🙳', '&'],
['🙴', '&'],
['🙵', '&'],
['🙶', '"'],
['🙷', '"'],
['🙸', '"'],
['‽', '?!'],
['🙹', '?!'],
['🙺', '?!'],
['🙻', '?!'],
['🙼', '/'],
['🙽', '\\'],
# Alchemy
['🜇', 'AR'],
['🜈', 'V'],
['🜉', 'V'],
['🜆', 'VR'],
['🜅', 'VF'],
['🜩', '2'],
['🜪', '5'],
['🝡', 'f'],
['🝢', 'W'],
['🝣', 'U'],
['🝧', 'V'],
['🝨', 'T'],
['🝪', 'V'],
['🝫', 'MB'],
['🝬', 'VB'],
['🝲', '3B'],
['🝳', '3B'],
# Emojis
['💯', '100'],
['🔙', 'BACK'],
['🔚', 'END'],
['🔛', 'ON!'],
['🔜', 'SOON'],
['🔝', 'TOP'],
['🔞', '18'],
['🔤', 'abc'],
['🔠', 'ABCD'],
['🔡', 'abcd'],
['🔢', '1234'],
['🔣', 'T&@%'],
['#️⃣', '#'],
['*️⃣', '*'],
['0️⃣', '0'],
['1️⃣', '1'],
['2️⃣', '2'],
['3️⃣', '3'],
['4️⃣', '4'],
['5️⃣', '5'],
['6️⃣', '6'],
['7️⃣', '7'],
['8️⃣', '8'],
['9️⃣', '9'],
['🔟', '10'],
['🅰️', 'A'],
['🅱️', 'B'],
['🆎', 'AB'],
['🆑', 'CL'],
['🅾️', 'O'],
['🅿', 'P'],
['🆘', 'SOS'],
['🅲', 'C'],
['🅳', 'D'],
['🅴', 'E'],
['🅵', 'F'],
['🅶', 'G'],
['🅷', 'H'],
['🅸', 'I'],
['🅹', 'J'],
['🅺', 'K'],
['🅻', 'L'],
['🅼', 'M'],
['🅽', 'N'],
['🆀', 'Q'],
['🆁', 'R'],
['🆂', 'S'],
['🆃', 'T'],
['🆄', 'U'],
['🆅', 'V'],
['🆆', 'W'],
['🆇', 'X'],
['🆈', 'Y'],
['🆉', 'Z'],
]
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/unidecoder/replacements.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import warnings
from .homoglyphs import homoglyphs
from .replacements import replacements
_replacements = {uni: asc for uni, asc in replacements}
_homoglyphs = {g: asc for asc, glyphs in homoglyphs.items() for g in glyphs}
def unidecoder(s, homoglyphs=False):
"""Transliterate unicode
Args:
s (str): unicode string
homoglyphs (bool): prioritize translating to homoglyphs
"""
warned = False # Once per utterance
ret = ''
for u in s:
if ord(u) < 127:
a = u
elif homoglyphs:
a = _homoglyphs.get(u, _replacements.get(u, None))
else:
a = _replacements.get(u, _homoglyphs.get(u, None))
if a is None:
if not warned:
warnings.warn(f'Unexpected character {u}: '
'please revise your text cleaning rules.',
stacklevel=10**6)
warned = True
else:
ret += a
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/unidecoder/__init__.py |
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The MIT License (MIT)
#
# Copyright (c) 2015 Rob Dawson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Based on:
# https://github.com/codebox/homoglyph/blob/master/raw_data/chars.txt
#
homoglyphs = {
' ': ['\xa0', '\u1680', '\u2000', '\u2001', '\u2002', '\u2003', '\u2004', '\u2005', '\u2006', '\u2007', '\u2008', '\u2009', '\u200a', '\u2028', '\u2029', '\u202f', '\u205f'],
'!': ['ǃ', 'ⵑ', '!'],
'$': ['$'],
'%': ['%'],
'&': ['ꝸ', '&'],
"'": ['´', 'ʹ', 'ʻ', 'ʼ', 'ʽ', 'ʾ', 'ˈ', 'ˊ', 'ˋ', '˴', 'ʹ', '΄', '՚', '՝', 'י', '׳', 'ߴ', 'ߵ', 'ᑊ', 'ᛌ', '᾽', '᾿', '`', '´', '῾', '‘', '’', '‛', '′', '‵', 'ꞌ', ''', '`', '𖽑', '𖽒'],
'"': ['¨', 'ʺ', '˝', 'ˮ', '״', '“', '”', '‟', '❝', '❞', '⠐', '⹂'],
'(': ['❨', '❲', '〔', '﴾', '(', '['],
')': ['❩', '❳', '〕', '﴿', ')', ']'],
'*': ['٭', '⁎', '∗', '*', '𐌟'],
'+': ['᛭', '➕', '+', '𐊛'],
',': ['¸', '؍', '٫', '‚', 'ꓹ', ','],
'-': ['˗', '۔', '‐', '‑', '‒', '–', '⁃', '−', '➖', 'Ⲻ', '﹘'],
'.': ['٠', '۰', '܁', '܂', '․', 'ꓸ', '꘎', '.', '𐩐', '𝅭'],
'/': ['᜵', '⁁', '⁄', '∕', '╱', '⟋', '⧸', 'Ⳇ', '⼃', '〳', 'ノ', '㇓', '丿', '/', '𝈺'],
'2': ['Ƨ', 'Ϩ', 'ᒿ', 'Ꙅ', 'ꛯ', 'Ꝛ', '2', '𝟐', '𝟚', '𝟤', '𝟮', '𝟸', '\U0001fbf2'],
'3': ['Ʒ', 'Ȝ', 'З', 'Ӡ', 'Ⳍ', 'Ꝫ', 'Ɜ', '3', '𑣊', '𖼻', '𝈆', '𝟑', '𝟛', '𝟥', '𝟯', '𝟹', '\U0001fbf3'],
'4': ['Ꮞ', '4', '𑢯', '𝟒', '𝟜', '𝟦', '𝟰', '𝟺', '\U0001fbf4'],
'5': ['Ƽ', '5', '𑢻', '𝟓', '𝟝', '𝟧', '𝟱', '𝟻', '\U0001fbf5'],
'6': ['б', 'Ꮾ', 'Ⳓ', '6', '𑣕', '𝟔', '𝟞', '𝟨', '𝟲', '𝟼', '\U0001fbf6'],
'7': ['7', '𐓒', '𑣆', '𝈒', '𝟕', '𝟟', '𝟩', '𝟳', '𝟽', '\U0001fbf7'],
'8': ['Ȣ', 'ȣ', '৪', '੪', 'ଃ', '8', '𐌚', '𝟖', '𝟠', '𝟪', '𝟴', '𝟾', '𞣋', '\U0001fbf8'],
'9': ['৭', '੧', '୨', '൭', 'Ⳋ', 'Ꝯ', '9', '𑢬', '𑣌', '𑣖', '𝟗', '𝟡', '𝟫', '𝟵', '𝟿', '\U0001fbf9'],
':': ['ː', '˸', '։', '׃', '܃', '܄', 'ः', 'ઃ', '᛬', '᠃', '᠉', '⁚', '∶', 'ꓽ', '꞉', '︰', ':'],
';': [';', ';'],
'<': ['˂', 'ᐸ', 'ᚲ', '‹', '❮', '<', '𝈶'],
'=': ['᐀', '⹀', '゠', '꓿', '='],
'>': ['˃', 'ᐳ', '›', '❯', '>', '𖼿', '𝈷'],
'?': ['Ɂ', 'ʔ', 'ॽ', 'Ꭾ', 'ꛫ', '?'],
'@': ['@'],
'A': ['Α', 'А', 'Ꭺ', 'ᗅ', 'ᴀ', 'ꓮ', 'ꭺ', 'A', '𐊠', '𖽀', '𝐀', '𝐴', '𝑨', '𝒜', '𝓐', '𝔄', '𝔸', '𝕬', '𝖠', '𝗔', '𝘈', '𝘼', '𝙰', '𝚨', '𝛢', '𝜜', '𝝖', '𝞐'],
'B': ['ʙ', 'Β', 'В', 'в', 'Ᏼ', 'ᏼ', 'ᗷ', 'ᛒ', 'ℬ', 'ꓐ', 'Ꞵ', 'B', '𐊂', '𐊡', '𐌁', '𝐁', '𝐵', '𝑩', '𝓑', '𝔅', '𝔹', '𝕭', '𝖡', '𝗕', '𝘉', '𝘽', '𝙱', '𝚩', '𝛣', '𝜝', '𝝗', '𝞑'],
'C': ['Ϲ', 'С', 'Ꮯ', 'ᑕ', 'ℂ', 'ℭ', 'Ⅽ', '⊂', 'Ⲥ', '⸦', 'ꓚ', 'C', '𐊢', '𐌂', '𐐕', '𐔜', '𑣩', '𑣲', '𝐂', '𝐶', '𝑪', '𝒞', '𝓒', '𝕮', '𝖢', '𝗖', '𝘊', '𝘾', '𝙲', '🝌'],
'D': ['Ꭰ', 'ᗞ', 'ᗪ', 'ᴅ', 'ⅅ', 'Ⅾ', 'ꓓ', 'ꭰ', 'D', '𝐃', '𝐷', '𝑫', '𝒟', '𝓓', '𝔇', '𝔻', '𝕯', '𝖣', '𝗗', '𝘋', '𝘿', '𝙳'],
'E': ['Ε', 'Е', 'Ꭼ', 'ᴇ', 'ℰ', '⋿', 'ⴹ', 'ꓰ', 'ꭼ', 'E', '𐊆', '𑢦', '𑢮', '𝐄', '𝐸', '𝑬', '𝓔', '𝔈', '𝔼', '𝕰', '𝖤', '𝗘', '𝘌', '𝙀', '𝙴', '𝚬', '𝛦', '𝜠', '𝝚', '𝞔'],
'F': ['Ϝ', 'ᖴ', 'ℱ', 'ꓝ', 'Ꞙ', 'F', '𐊇', '𐊥', '𐔥', '𑢢', '𑣂', '𝈓', '𝐅', '𝐹', '𝑭', '𝓕', '𝔉', '𝔽', '𝕱', '𝖥', '𝗙', '𝘍', '𝙁', '𝙵', '𝟊'],
'G': ['ɢ', 'Ԍ', 'ԍ', 'Ꮐ', 'Ᏻ', 'ᏻ', 'ꓖ', 'ꮐ', 'G', '𝐆', '𝐺', '𝑮', '𝒢', '𝓖', '𝔊', '𝔾', '𝕲', '𝖦', '𝗚', '𝘎', '𝙂', '𝙶'],
'H': ['ʜ', 'Η', 'Н', 'н', 'Ꮋ', 'ᕼ', 'ℋ', 'ℌ', 'ℍ', 'Ⲏ', 'ꓧ', 'ꮋ', 'H', '𐋏', '𝐇', '𝐻', '𝑯', '𝓗', '𝕳', '𝖧', '𝗛', '𝘏', '𝙃', '𝙷', '𝚮', '𝛨', '𝜢', '𝝜', '𝞖'],
'J': ['Ϳ', 'Ј', 'Ꭻ', 'ᒍ', 'ᴊ', 'ꓙ', 'Ʝ', 'ꭻ', 'J', '𝐉', '𝐽', '𝑱', '𝒥', '𝓙', '𝔍', '𝕁', '𝕵', '𝖩', '𝗝', '𝘑', '𝙅', '𝙹'],
'K': ['Κ', 'К', 'Ꮶ', 'ᛕ', 'K', 'Ⲕ', 'ꓗ', 'K', '𐔘', '𝐊', '𝐾', '𝑲', '𝒦', '𝓚', '𝔎', '𝕂', '𝕶', '𝖪', '𝗞', '𝘒', '𝙆', '𝙺', '𝚱', '𝛫', '𝜥', '𝝟', '𝞙'],
'L': ['ʟ', 'Ꮮ', 'ᒪ', 'ℒ', 'Ⅼ', 'Ⳑ', 'ⳑ', 'ꓡ', 'ꮮ', 'L', '𐐛', '𐑃', '𐔦', '𑢣', '𑢲', '𖼖', '𝈪', '𝐋', '𝐿', '𝑳', '𝓛', '𝔏', '𝕃', '𝕷', '𝖫', '𝗟', '𝘓', '𝙇', '𝙻'],
'M': ['Μ', 'Ϻ', 'М', 'Ꮇ', 'ᗰ', 'ᛖ', 'ℳ', 'Ⅿ', 'Ⲙ', 'ꓟ', 'M', '𐊰', '𐌑', '𝐌', '𝑀', '𝑴', '𝓜', '𝔐', '𝕄', '𝕸', '𝖬', '𝗠', '𝘔', '𝙈', '𝙼', '𝚳', '𝛭', '𝜧', '𝝡', '𝞛'],
'N': ['ɴ', 'Ν', 'ℕ', 'Ⲛ', 'ꓠ', 'N', '𐔓', '𝐍', '𝑁', '𝑵', '𝒩', '𝓝', '𝔑', '𝕹', '𝖭', '𝗡', '𝘕', '𝙉', '𝙽', '𝚴', '𝛮', '𝜨', '𝝢', '𝞜'],
'P': ['Ρ', 'Р', 'Ꮲ', 'ᑭ', 'ᴘ', 'ᴩ', 'ℙ', 'Ⲣ', 'ꓑ', 'ꮲ', 'P', '𐊕', '𝐏', '𝑃', '𝑷', '𝒫', '𝓟', '𝔓', '𝕻', '𝖯', '𝗣', '𝘗', '𝙋', '𝙿', '𝚸', '𝛲', '𝜬', '𝝦', '𝞠'],
'Q': ['ℚ', 'ⵕ', 'Q', '𝐐', '𝑄', '𝑸', '𝒬', '𝓠', '𝔔', '𝕼', '𝖰', '𝗤', '𝘘', '𝙌', '𝚀'],
'R': ['Ʀ', 'ʀ', 'Ꭱ', 'Ꮢ', 'ᖇ', 'ᚱ', 'ℛ', 'ℜ', 'ℝ', 'ꓣ', 'ꭱ', 'ꮢ', 'R', '𐒴', '𖼵', '𝈖', '𝐑', '𝑅', '𝑹', '𝓡', '𝕽', '𝖱', '𝗥', '𝘙', '𝙍', '𝚁'],
'S': ['Ѕ', 'Տ', 'Ꮥ', 'Ꮪ', 'ꓢ', 'S', '𐊖', '𐐠', '𖼺', '𝐒', '𝑆', '𝑺', '𝒮', '𝓢', '𝔖', '𝕊', '𝕾', '𝖲', '𝗦', '𝘚', '𝙎', '𝚂'],
'T': ['Τ', 'τ', 'Т', 'т', 'Ꭲ', 'ᴛ', '⊤', '⟙', 'Ⲧ', 'ꓔ', 'ꭲ', 'T', '𐊗', '𐊱', '𐌕', '𑢼', '𖼊', '𝐓', '𝑇', '𝑻', '𝒯', '𝓣', '𝔗', '𝕋', '𝕿', '𝖳', '𝗧', '𝘛', '𝙏', '𝚃', '𝚻', '𝛕', '𝛵', '𝜏', '𝜯', '𝝉', '𝝩', '𝞃', '𝞣', '𝞽', '🝨'],
'U': ['Ս', 'ሀ', 'ᑌ', '∪', '⋃', 'ꓴ', 'U', '𐓎', '𑢸', '𖽂', '𝐔', '𝑈', '𝑼', '𝒰', '𝓤', '𝔘', '𝕌', '𝖀', '𝖴', '𝗨', '𝘜', '𝙐', '𝚄'],
'V': ['Ѵ', '٧', '۷', 'Ꮩ', 'ᐯ', 'Ⅴ', 'ⴸ', 'ꓦ', 'ꛟ', 'V', '𐔝', '𑢠', '𖼈', '𝈍', '𝐕', '𝑉', '𝑽', '𝒱', '𝓥', '𝔙', '𝕍', '𝖁', '𝖵', '𝗩', '𝘝', '𝙑', '𝚅'],
'W': ['Ԝ', 'Ꮃ', 'Ꮤ', 'ꓪ', 'W', '𑣦', '𑣯', '𝐖', '𝑊', '𝑾', '𝒲', '𝓦', '𝔚', '𝕎', '𝖂', '𝖶', '𝗪', '𝘞', '𝙒', '𝚆'],
'X': ['Χ', 'Х', '᙭', 'ᚷ', 'Ⅹ', '╳', 'Ⲭ', 'ⵝ', 'ꓫ', 'Ꭓ', 'X', '𐊐', '𐊴', '𐌗', '𐌢', '𐔧', '𑣬', '𝐗', '𝑋', '𝑿', '𝒳', '𝓧', '𝔛', '𝕏', '𝖃', '𝖷', '𝗫', '𝘟', '𝙓', '𝚇', '𝚾', '𝛸', '𝜲', '𝝬', '𝞦'],
'Y': ['Υ', 'ϒ', 'У', 'Ү', 'Ꭹ', 'Ꮍ', 'Ⲩ', 'ꓬ', 'Y', '𐊲', '𑢤', '𖽃', '𝐘', '𝑌', '𝒀', '𝒴', '𝓨', '𝔜', '𝕐', '𝖄', '𝖸', '𝗬', '𝘠', '𝙔', '𝚈', '𝚼', '𝛶', '𝜰', '𝝪', '𝞤'],
'Z': ['Ζ', 'Ꮓ', 'ℤ', 'ℨ', 'ꓜ', 'Z', '𐋵', '𑢩', '𑣥', '𝐙', '𝑍', '𝒁', '𝒵', '𝓩', '𝖅', '𝖹', '𝗭', '𝘡', '𝙕', '𝚉', '𝚭', '𝛧', '𝜡', '𝝛', '𝞕'],
'\\': ['∖', '⟍', '⧵', '⧹', '⼂', '㇔', '丶', '﹨', '\', '𝈏', '𝈻'],
'^': ['˄', 'ˆ'],
'_': ['ߺ', '﹍', '﹎', '﹏', '_'],
'a': ['ɑ', 'α', 'а', '⍺', 'a', '𝐚', '𝑎', '𝒂', '𝒶', '𝓪', '𝔞', '𝕒', '𝖆', '𝖺', '𝗮', '𝘢', '𝙖', '𝚊', '𝛂', '𝛼', '𝜶', '𝝰', '𝞪'],
'b': ['Ƅ', 'Ь', 'Ꮟ', 'ᑲ', 'ᖯ', 'b', '𝐛', '𝑏', '𝒃', '𝒷', '𝓫', '𝔟', '𝕓', '𝖇', '𝖻', '𝗯', '𝘣', '𝙗', '𝚋'],
'c': ['ϲ', 'с', 'ᴄ', 'ⅽ', 'ⲥ', 'ꮯ', 'c', '𐐽', '𝐜', '𝑐', '𝒄', '𝒸', '𝓬', '𝔠', '𝕔', '𝖈', '𝖼', '𝗰', '𝘤', '𝙘', '𝚌'],
'd': ['ԁ', 'Ꮷ', 'ᑯ', 'ⅆ', 'ⅾ', 'ꓒ', 'd', '𝐝', '𝑑', '𝒅', '𝒹', '𝓭', '𝔡', '𝕕', '𝖉', '𝖽', '𝗱', '𝘥', '𝙙', '𝚍'],
'e': ['е', 'ҽ', '℮', 'ℯ', 'ⅇ', 'ꬲ', 'e', '𝐞', '𝑒', '𝒆', '𝓮', '𝔢', '𝕖', '𝖊', '𝖾', '𝗲', '𝘦', '𝙚', '𝚎'],
'f': ['ſ', 'ϝ', 'ք', 'ẝ', 'ꞙ', 'ꬵ', 'f', '𝐟', '𝑓', '𝒇', '𝒻', '𝓯', '𝔣', '𝕗', '𝖋', '𝖿', '𝗳', '𝘧', '𝙛', '𝚏', '𝟋'],
'g': ['ƍ', 'ɡ', 'ց', 'ᶃ', 'ℊ', 'g', '𝐠', '𝑔', '𝒈', '𝓰', '𝔤', '𝕘', '𝖌', '𝗀', '𝗴', '𝘨', '𝙜', '𝚐'],
'h': ['һ', 'հ', 'Ꮒ', 'ℎ', 'h', '𝐡', '𝒉', '𝒽', '𝓱', '𝔥', '𝕙', '𝖍', '𝗁', '𝗵', '𝘩', '𝙝', '𝚑'],
'i': ['ı', 'ɩ', 'ɪ', '˛', 'ͺ', 'ι', 'і', 'ӏ', 'Ꭵ', 'ι', 'ℹ', 'ⅈ', 'ⅰ', '⍳', 'ꙇ', 'ꭵ', 'i', '𑣃', '𝐢', '𝑖', '𝒊', '𝒾', '𝓲', '𝔦', '𝕚', '𝖎', '𝗂', '𝗶', '𝘪', '𝙞', '𝚒', '𝚤', '𝛊', '𝜄', '𝜾', '𝝸', '𝞲'],
'j': ['ϳ', 'ј', 'ⅉ', 'j', '𝐣', '𝑗', '𝒋', '𝒿', '𝓳', '𝔧', '𝕛', '𝖏', '𝗃', '𝗷', '𝘫', '𝙟', '𝚓'],
'k': ['k', '𝐤', '𝑘', '𝒌', '𝓀', '𝓴', '𝔨', '𝕜', '𝖐', '𝗄', '𝗸', '𝘬', '𝙠', '𝚔'],
'l': ['Ɩ', 'ǀ', 'Ι', 'І', 'Ӏ', '׀', 'ו', 'ן', 'ا', '١', '۱', 'ߊ', 'ᛁ', 'ℐ', 'ℑ', 'ℓ', 'Ⅰ', 'ⅼ', '∣', '⏽', 'Ⲓ', 'ⵏ', 'ꓲ', 'ﺍ', 'ﺎ', '1', 'I', 'l', '│', '𐊊', '𐌉', '𐌠', '𖼨', '𝐈', '𝐥', '𝐼', '𝑙', '𝑰', '𝒍', '𝓁', '𝓘', '𝓵', '𝔩', '𝕀', '𝕝', '𝕴', '𝖑', '𝖨', '𝗅', '𝗜', '𝗹', '𝘐', '𝘭', '𝙄', '𝙡', '𝙸', '𝚕', '𝚰', '𝛪', '𝜤', '𝝞', '𝞘', '𝟏', '𝟙', '𝟣', '𝟭', '𝟷', '𞣇', '𞸀', '𞺀', '\U0001fbf1'],
'm': ['m'],
'n': ['ո', 'ռ', 'n', '𝐧', '𝑛', '𝒏', '𝓃', '𝓷', '𝔫', '𝕟', '𝖓', '𝗇', '𝗻', '𝘯', '𝙣', '𝚗'],
'o': ['Ο', 'ο', 'σ', 'О', 'о', 'Օ', 'օ', 'ס', 'ه', '٥', 'ھ', 'ہ', 'ە', '۵', '߀', '०', '০', '੦', '૦', 'ଠ', '୦', '௦', 'ం', '౦', 'ಂ', '೦', 'ം', 'ഠ', '൦', 'ං', '๐', '໐', 'ဝ', '၀', 'ჿ', 'ዐ', 'ᴏ', 'ᴑ', 'ℴ', 'Ⲟ', 'ⲟ', 'ⵔ', '〇', 'ꓳ', 'ꬽ', 'ﮦ', 'ﮧ', 'ﮨ', 'ﮩ', 'ﮪ', 'ﮫ', 'ﮬ', 'ﮭ', 'ﻩ', 'ﻪ', 'ﻫ', 'ﻬ', '0', 'O', 'o', '𐊒', '𐊫', '𐐄', '𐐬', '𐓂', '𐓪', '𐔖', '𑓐', '𑢵', '𑣈', '𑣗', '𑣠', '𝐎', '𝐨', '𝑂', '𝑜', '𝑶', '𝒐', '𝒪', '𝓞', '𝓸', '𝔒', '𝔬', '𝕆', '𝕠', '𝕺', '𝖔', '𝖮', '𝗈', '𝗢', '𝗼', '𝘖', '𝘰', '𝙊', '𝙤', '𝙾', '𝚘', '𝚶', '𝛐', '𝛔', '𝛰', '𝜊', '𝜎', '𝜪', '𝝄', '𝝈', '𝝤', '𝝾', '𝞂', '𝞞', '𝞸', '𝞼', '𝟎', '𝟘', '𝟢', '𝟬', '𝟶', '𞸤', '𞹤', '𞺄', '\U0001fbf0'],
'p': ['ρ', 'ϱ', 'р', '⍴', 'ⲣ', 'p', '𝐩', '𝑝', '𝒑', '𝓅', '𝓹', '𝔭', '𝕡', '𝖕', '𝗉', '𝗽', '𝘱', '𝙥', '𝚙', '𝛒', '𝛠', '𝜌', '𝜚', '𝝆', '𝝔', '𝞀', '𝞎', '𝞺', '𝟈'],
'q': ['ԛ', 'գ', 'զ', 'q', '𝐪', '𝑞', '𝒒', '𝓆', '𝓺', '𝔮', '𝕢', '𝖖', '𝗊', '𝗾', '𝘲', '𝙦', '𝚚'],
'r': ['г', 'ᴦ', 'ⲅ', 'ꭇ', 'ꭈ', 'ꮁ', 'r', '𝐫', '𝑟', '𝒓', '𝓇', '𝓻', '𝔯', '𝕣', '𝖗', '𝗋', '𝗿', '𝘳', '𝙧', '𝚛'],
's': ['ƽ', 'ѕ', 'ꜱ', 'ꮪ', 's', '𐑈', '𑣁', '𝐬', '𝑠', '𝒔', '𝓈', '𝓼', '𝔰', '𝕤', '𝖘', '𝗌', '𝘀', '𝘴', '𝙨', '𝚜'],
't': ['t', '𝐭', '𝑡', '𝒕', '𝓉', '𝓽', '𝔱', '𝕥', '𝖙', '𝗍', '𝘁', '𝘵', '𝙩', '𝚝'],
'u': ['ʋ', 'υ', 'ս', 'ᴜ', 'ꞟ', 'ꭎ', 'ꭒ', 'u', '𐓶', '𑣘', '𝐮', '𝑢', '𝒖', '𝓊', '𝓾', '𝔲', '𝕦', '𝖚', '𝗎', '𝘂', '𝘶', '𝙪', '𝚞', '𝛖', '𝜐', '𝝊', '𝞄', '𝞾'],
'v': ['ν', 'ѵ', 'ט', 'ᴠ', 'ⅴ', '∨', '⋁', 'ꮩ', 'v', '𑜆', '𑣀', '𝐯', '𝑣', '𝒗', '𝓋', '𝓿', '𝔳', '𝕧', '𝖛', '𝗏', '𝘃', '𝘷', '𝙫', '𝚟', '𝛎', '𝜈', '𝝂', '𝝼', '𝞶'],
'w': ['ɯ', 'ѡ', 'ԝ', 'ա', 'ᴡ', 'ꮃ', 'w', '𑜊', '𑜎', '𑜏', '𝐰', '𝑤', '𝒘', '𝓌', '𝔀', '𝔴', '𝕨', '𝖜', '𝗐', '𝘄', '𝘸', '𝙬', '𝚠'],
'x': ['×', 'х', 'ᕁ', 'ᕽ', '᙮', 'ⅹ', '⤫', '⤬', '⨯', 'x', '𝐱', '𝑥', '𝒙', '𝓍', '𝔁', '𝔵', '𝕩', '𝖝', '𝗑', '𝘅', '𝘹', '𝙭', '𝚡'],
'y': ['ɣ', 'ʏ', 'γ', 'у', 'ү', 'ყ', 'ᶌ', 'ỿ', 'ℽ', 'ꭚ', 'y', '𑣜', '𝐲', '𝑦', '𝒚', '𝓎', '𝔂', '𝔶', '𝕪', '𝖞', '𝗒', '𝘆', '𝘺', '𝙮', '𝚢', '𝛄', '𝛾', '𝜸', '𝝲', '𝞬'],
'z': ['ᴢ', 'ꮓ', 'z', '𑣄', '𝐳', '𝑧', '𝒛', '𝓏', '𝔃', '𝔷', '𝕫', '𝖟', '𝗓', '𝘇', '𝘻', '𝙯', '𝚣'],
'{': ['❴', '{', '𝄔'],
'}': ['❵', '}'],
'~': ['˜', '῀', '⁓', '∼'],
}
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/HiFiGAN/common/text/unidecoder/homoglyphs.py |
# *****************************************************************************
# Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import torch
from inference import load_and_setup_model
def parse_args(parser):
parser.add_argument('--generator-name', type=str, required=True,
choices=('Tacotron2', 'FastPitch'), help='model name')
parser.add_argument('--generator-checkpoint', type=str, required=True,
help='full path to the generator checkpoint file')
parser.add_argument('-o', '--output', type=str, default="trtis_repo/tacotron/1/model.pt",
help='filename for the Tacotron 2 TorchScript model')
parser.add_argument('--amp', action='store_true',
help='inference with AMP')
return parser
def main():
parser = argparse.ArgumentParser(description='Export models to TorchScript')
parser = parse_args(parser)
args = parser.parse_args()
model = load_and_setup_model(
args.generator_name, parser, args.generator_checkpoint,
args.amp, device='cpu', forward_is_infer=True, polyak=False,
jitable=True)
torch.jit.save(torch.jit.script(model), args.output)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/export_torchscript.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import time
from pathlib import Path
import torch
import tqdm
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from torch.utils.data import DataLoader
from fastpitch.data_function import TTSCollate, TTSDataset
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-d', '--dataset-path', type=str,
default='./', help='Path to dataset')
parser.add_argument('--wav-text-filelists', required=True, nargs='+',
type=str, help='Files with audio paths and text')
parser.add_argument('--extract-mels', action='store_true',
help='Calculate spectrograms from .wav files')
parser.add_argument('--extract-pitch', action='store_true',
help='Extract pitch')
parser.add_argument('--save-alignment-priors', action='store_true',
help='Pre-calculate diagonal matrices of alignment of text to audio')
parser.add_argument('--log-file', type=str, default='preproc_log.json',
help='Filename for logging')
parser.add_argument('--n-speakers', type=int, default=1)
# Mel extraction
parser.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
parser.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
parser.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
parser.add_argument('--win-length', default=1024, type=int,
help='Window length')
parser.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
parser.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
parser.add_argument('--n-mel-channels', type=int, default=80)
# Pitch extraction
parser.add_argument('--f0-method', default='pyin', type=str,
choices=['pyin'], help='F0 estimation method')
# Performance
parser.add_argument('-b', '--batch-size', default=1, type=int)
parser.add_argument('--n-workers', type=int, default=16)
# Language
parser.add_argument('--symbol_set', default='english_basic',
choices=['english_basic', 'english_mandarin_basic'],
help='Symbols in the dataset')
return parser
def main():
parser = argparse.ArgumentParser(description='FastPitch Data Pre-processing')
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
DLLogger.init(backends=[JSONStreamBackend(Verbosity.DEFAULT, Path(args.dataset_path, args.log_file)),
StdOutBackend(Verbosity.VERBOSE)])
for k, v in vars(args).items():
DLLogger.log(step="PARAMETER", data={k: v})
DLLogger.flush()
if args.extract_mels:
Path(args.dataset_path, 'mels').mkdir(parents=False, exist_ok=True)
if args.extract_pitch:
Path(args.dataset_path, 'pitch').mkdir(parents=False, exist_ok=True)
if args.save_alignment_priors:
Path(args.dataset_path, 'alignment_priors').mkdir(parents=False, exist_ok=True)
for filelist in args.wav_text_filelists:
print(f'Processing {filelist}...')
dataset = TTSDataset(
args.dataset_path,
filelist,
text_cleaners=['english_cleaners_v2'],
n_mel_channels=args.n_mel_channels,
symbol_set=args.symbol_set,
p_arpabet=0.0,
n_speakers=args.n_speakers,
load_mel_from_disk=False,
load_pitch_from_disk=False,
pitch_mean=None,
pitch_std=None,
max_wav_value=args.max_wav_value,
sampling_rate=args.sampling_rate,
filter_length=args.filter_length,
hop_length=args.hop_length,
win_length=args.win_length,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
betabinomial_online_dir=None,
pitch_online_dir=None,
pitch_online_method=args.f0_method)
data_loader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=False,
sampler=None,
num_workers=args.n_workers,
collate_fn=TTSCollate(),
pin_memory=False,
drop_last=False)
all_filenames = set()
for i, batch in enumerate(tqdm.tqdm(data_loader)):
tik = time.time()
_, input_lens, mels, mel_lens, _, pitch, _, _, attn_prior, fpaths = batch
# Ensure filenames are unique
for p in fpaths:
fname = Path(p).name
if fname in all_filenames:
raise ValueError(f'Filename is not unique: {fname}')
all_filenames.add(fname)
if args.extract_mels:
for j, mel in enumerate(mels):
fname = Path(fpaths[j]).with_suffix('.pt').name
fpath = Path(args.dataset_path, 'mels', fname)
torch.save(mel[:, :mel_lens[j]], fpath)
if args.extract_pitch:
for j, p in enumerate(pitch):
fname = Path(fpaths[j]).with_suffix('.pt').name
fpath = Path(args.dataset_path, 'pitch', fname)
torch.save(p[:mel_lens[j]], fpath)
if args.save_alignment_priors:
for j, prior in enumerate(attn_prior):
fname = Path(fpaths[j]).with_suffix('.pt').name
fpath = Path(args.dataset_path, 'alignment_priors', fname)
torch.save(prior[:mel_lens[j], :input_lens[j]], fpath)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/prepare_dataset.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import re
import sys
import torch
from common.text.symbols import get_symbols, get_pad_idx
from common.utils import DefaultAttrDict, AttrDict
from fastpitch.model import FastPitch
from fastpitch.model_jit import FastPitchJIT
from hifigan.models import Generator
try:
from waveglow.model import WaveGlow
from waveglow import model as glow
from waveglow.denoiser import Denoiser
sys.modules['glow'] = glow
except ImportError:
print("WARNING: Couldn't import WaveGlow")
def parse_model_args(model_name, parser, add_help=False):
if model_name == 'FastPitch':
from fastpitch import arg_parser
return arg_parser.parse_fastpitch_args(parser, add_help)
elif model_name == 'HiFi-GAN':
from hifigan import arg_parser
return arg_parser.parse_hifigan_args(parser, add_help)
elif model_name == 'WaveGlow':
from waveglow.arg_parser import parse_waveglow_args
return parse_waveglow_args(parser, add_help)
else:
raise NotImplementedError(model_name)
def get_model(model_name, model_config, device, bn_uniform_init=False,
forward_is_infer=False, jitable=False):
"""Chooses a model based on name"""
del bn_uniform_init # unused (old name: uniform_initialize_bn_weight)
if model_name == 'FastPitch':
if jitable:
model = FastPitchJIT(**model_config)
else:
model = FastPitch(**model_config)
elif model_name == 'HiFi-GAN':
model = Generator(model_config)
elif model_name == 'WaveGlow':
model = WaveGlow(**model_config)
else:
raise NotImplementedError(model_name)
if forward_is_infer and hasattr(model, 'infer'):
model.forward = model.infer
return model.to(device)
def get_model_config(model_name, args, ckpt_config=None):
""" Get config needed to instantiate the model """
# Mark keys missing in `args` with an object (None is ambiguous)
_missing = object()
args = DefaultAttrDict(lambda: _missing, vars(args))
# `ckpt_config` is loaded from the checkpoint and has the priority
# `model_config` is based on args and fills empty slots in `ckpt_config`
if model_name == 'FastPitch':
model_config = dict(
# io
n_mel_channels=args.n_mel_channels,
# symbols
n_symbols=(len(get_symbols(args.symbol_set))
if args.symbol_set is not _missing else _missing),
padding_idx=(get_pad_idx(args.symbol_set)
if args.symbol_set is not _missing else _missing),
symbols_embedding_dim=args.symbols_embedding_dim,
# input FFT
in_fft_n_layers=args.in_fft_n_layers,
in_fft_n_heads=args.in_fft_n_heads,
in_fft_d_head=args.in_fft_d_head,
in_fft_conv1d_kernel_size=args.in_fft_conv1d_kernel_size,
in_fft_conv1d_filter_size=args.in_fft_conv1d_filter_size,
in_fft_output_size=args.in_fft_output_size,
p_in_fft_dropout=args.p_in_fft_dropout,
p_in_fft_dropatt=args.p_in_fft_dropatt,
p_in_fft_dropemb=args.p_in_fft_dropemb,
# output FFT
out_fft_n_layers=args.out_fft_n_layers,
out_fft_n_heads=args.out_fft_n_heads,
out_fft_d_head=args.out_fft_d_head,
out_fft_conv1d_kernel_size=args.out_fft_conv1d_kernel_size,
out_fft_conv1d_filter_size=args.out_fft_conv1d_filter_size,
out_fft_output_size=args.out_fft_output_size,
p_out_fft_dropout=args.p_out_fft_dropout,
p_out_fft_dropatt=args.p_out_fft_dropatt,
p_out_fft_dropemb=args.p_out_fft_dropemb,
# duration predictor
dur_predictor_kernel_size=args.dur_predictor_kernel_size,
dur_predictor_filter_size=args.dur_predictor_filter_size,
p_dur_predictor_dropout=args.p_dur_predictor_dropout,
dur_predictor_n_layers=args.dur_predictor_n_layers,
# pitch predictor
pitch_predictor_kernel_size=args.pitch_predictor_kernel_size,
pitch_predictor_filter_size=args.pitch_predictor_filter_size,
p_pitch_predictor_dropout=args.p_pitch_predictor_dropout,
pitch_predictor_n_layers=args.pitch_predictor_n_layers,
# pitch conditioning
pitch_embedding_kernel_size=args.pitch_embedding_kernel_size,
# speakers parameters
n_speakers=args.n_speakers,
speaker_emb_weight=args.speaker_emb_weight,
# energy predictor
energy_predictor_kernel_size=args.energy_predictor_kernel_size,
energy_predictor_filter_size=args.energy_predictor_filter_size,
p_energy_predictor_dropout=args.p_energy_predictor_dropout,
energy_predictor_n_layers=args.energy_predictor_n_layers,
# energy conditioning
energy_conditioning=args.energy_conditioning,
energy_embedding_kernel_size=args.energy_embedding_kernel_size,
)
elif model_name == 'HiFi-GAN':
if args.hifigan_config is not None:
assert ckpt_config is None, (
"Supplied --hifigan-config, but the checkpoint has a config. "
"Drop the flag or remove the config from the checkpoint file.")
print(f'HiFi-GAN: Reading model config from {args.hifigan_config}')
with open(args.hifigan_config) as f:
args = AttrDict(json.load(f))
model_config = dict(
# generator architecture
upsample_rates=args.upsample_rates,
upsample_kernel_sizes=args.upsample_kernel_sizes,
upsample_initial_channel=args.upsample_initial_channel,
resblock=args.resblock,
resblock_kernel_sizes=args.resblock_kernel_sizes,
resblock_dilation_sizes=args.resblock_dilation_sizes,
)
elif model_name == 'WaveGlow':
model_config = dict(
n_mel_channels=args.n_mel_channels,
n_flows=args.flows,
n_group=args.groups,
n_early_every=args.early_every,
n_early_size=args.early_size,
WN_config=dict(
n_layers=args.wn_layers,
kernel_size=args.wn_kernel_size,
n_channels=args.wn_channels
)
)
else:
raise NotImplementedError(model_name)
# Start with ckpt_config, and fill missing keys from model_config
final_config = {} if ckpt_config is None else ckpt_config.copy()
missing_keys = set(model_config.keys()) - set(final_config.keys())
final_config.update({k: model_config[k] for k in missing_keys})
# If there was a ckpt_config, it should have had all args
if ckpt_config is not None and len(missing_keys) > 0:
print(f'WARNING: Keys {missing_keys} missing from the loaded config; '
'using args instead.')
assert all(v is not _missing for v in final_config.values())
return final_config
def get_model_train_setup(model_name, args):
""" Dump train setup for documentation purposes """
if model_name == 'FastPitch':
return dict()
elif model_name == 'HiFi-GAN':
return dict(
# audio
segment_size=args.segment_size,
filter_length=args.filter_length,
num_mels=args.num_mels,
hop_length=args.hop_length,
win_length=args.win_length,
sampling_rate=args.sampling_rate,
mel_fmin=args.mel_fmin,
mel_fmax=args.mel_fmax,
mel_fmax_loss=args.mel_fmax_loss,
max_wav_value=args.max_wav_value,
# other
seed=args.seed,
# optimization
base_lr=args.learning_rate,
lr_decay=args.lr_decay,
epochs_all=args.epochs,
)
elif model_name == 'WaveGlow':
return dict()
else:
raise NotImplementedError(model_name)
def load_model_from_ckpt(checkpoint_data, model, key='state_dict'):
if key is None:
return checkpoint_data['model'], None
sd = checkpoint_data[key]
sd = {re.sub('^module\.', '', k): v for k, v in sd.items()}
status = model.load_state_dict(sd, strict=False)
return model, status
def load_and_setup_model(model_name, parser, checkpoint, amp, device,
unk_args=[], forward_is_infer=False, jitable=False):
if checkpoint is not None:
ckpt_data = torch.load(checkpoint)
print(f'{model_name}: Loading {checkpoint}...')
ckpt_config = ckpt_data.get('config')
if ckpt_config is None:
print(f'{model_name}: No model config in the checkpoint; using args.')
else:
print(f'{model_name}: Found model config saved in the checkpoint.')
else:
ckpt_config = None
ckpt_data = {}
model_parser = parse_model_args(model_name, parser, add_help=False)
model_args, model_unk_args = model_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(model_unk_args))
model_config = get_model_config(model_name, model_args, ckpt_config)
model = get_model(model_name, model_config, device,
forward_is_infer=forward_is_infer,
jitable=jitable)
if checkpoint is not None:
key = 'generator' if model_name == 'HiFi-GAN' else 'state_dict'
model, status = load_model_from_ckpt(ckpt_data, model, key)
missing = [] if status is None else status.missing_keys
unexpected = [] if status is None else status.unexpected_keys
# Attention is only used during training, we won't miss it
if model_name == 'FastPitch':
missing = [k for k in missing if not k.startswith('attention.')]
unexpected = [k for k in unexpected if not k.startswith('attention.')]
assert len(missing) == 0 and len(unexpected) == 0, (
f'Mismatched keys when loading parameters. Missing: {missing}, '
f'unexpected: {unexpected}.')
if model_name == "WaveGlow":
for k, m in model.named_modules():
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatability
model = model.remove_weightnorm(model)
elif model_name == 'HiFi-GAN':
assert model_args.hifigan_config is not None or ckpt_config is not None, (
'Use a HiFi-GAN checkpoint from NVIDIA DeepLearningExamples with '
'saved config or supply --hifigan-config <json_file>.')
model.remove_weight_norm()
if amp:
model.half()
model.eval()
return model.to(device), model_config, ckpt_data.get('train_setup', {})
def load_and_setup_ts_model(model_name, checkpoint, amp, device=None):
print(f'{model_name}: Loading TorchScript checkpoint {checkpoint}...')
model = torch.jit.load(checkpoint).eval()
if device is not None:
model = model.to(device)
if amp:
model.half()
elif next(model.parameters()).dtype == torch.float16:
raise ValueError('Trying to load FP32 model,'
'TS checkpoint is in FP16 precision.')
return model
def convert_ts_to_trt(model_name, ts_model, parser, amp, unk_args=[]):
trt_parser = _parse_trt_compilation_args(model_name, parser, add_help=False)
trt_args, trt_unk_args = trt_parser.parse_known_args()
unk_args[:] = list(set(unk_args) & set(trt_unk_args))
if model_name == 'HiFi-GAN':
return _convert_ts_to_trt_hifigan(
ts_model, amp, trt_args.trt_min_opt_max_batch,
trt_args.trt_min_opt_max_hifigan_length)
else:
raise NotImplementedError
def _parse_trt_compilation_args(model_name, parent, add_help=False):
"""
Parse model and inference specific commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help,
allow_abbrev=False)
trt = parser.add_argument_group(f'{model_name} Torch-TensorRT compilation parameters')
trt.add_argument('--trt-min-opt-max-batch', nargs=3, type=int,
default=(1, 8, 16),
help='Torch-TensorRT min, optimal and max batch size')
if model_name == 'HiFi-GAN':
trt.add_argument('--trt-min-opt-max-hifigan-length', nargs=3, type=int,
default=(100, 800, 1200),
help='Torch-TensorRT min, optimal and max audio length (in frames)')
return parser
def _convert_ts_to_trt_hifigan(ts_model, amp, trt_min_opt_max_batch,
trt_min_opt_max_hifigan_length, num_mels=80):
import torch_tensorrt
trt_dtype = torch.half if amp else torch.float
print(f'Torch TensorRT: compiling HiFi-GAN for dtype {trt_dtype}.')
min_shp, opt_shp, max_shp = zip(trt_min_opt_max_batch,
(num_mels,) * 3,
trt_min_opt_max_hifigan_length)
compile_settings = {
"inputs": [torch_tensorrt.Input(
min_shape=min_shp,
opt_shape=opt_shp,
max_shape=max_shp,
dtype=trt_dtype,
)],
"enabled_precisions": {trt_dtype},
"require_full_compilation": True,
}
trt_model = torch_tensorrt.compile(ts_model, **compile_settings)
print('Torch TensorRT: compilation successful.')
return trt_model
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/models.py |
# *****************************************************************************
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import copy
import os
import time
from collections import defaultdict, OrderedDict
from itertools import cycle
import numpy as np
import torch
import torch.distributed as dist
import amp_C
from apex.optimizers import FusedAdam, FusedLAMB
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import common.tb_dllogger as logger
import models
from common.tb_dllogger import log
from common.repeated_dataloader import (RepeatedDataLoader,
RepeatedDistributedSampler)
from common.text import cmudict
from common.utils import (BenchmarkStats, Checkpointer,
load_pretrained_weights, prepare_tmp)
from fastpitch.attn_loss_function import AttentionBinarizationLoss
from fastpitch.data_function import batch_to_gpu, ensure_disjoint, TTSCollate, TTSDataset
from fastpitch.loss_function import FastPitchLoss
def parse_args(parser):
parser.add_argument('-o', '--output', type=str, required=True,
help='Directory to save checkpoints')
parser.add_argument('-d', '--dataset-path', type=str, default='./',
help='Path to dataset')
parser.add_argument('--log-file', type=str, default=None,
help='Path to a DLLogger log file')
train = parser.add_argument_group('training setup')
train.add_argument('--epochs', type=int, required=True,
help='Number of total epochs to run')
train.add_argument('--epochs-per-checkpoint', type=int, default=50,
help='Number of epochs per checkpoint')
train.add_argument('--checkpoint-path', type=str, default=None,
help='Checkpoint path to resume training')
train.add_argument('--keep-milestones', default=list(range(100, 1000, 100)),
type=int, nargs='+',
help='Milestone checkpoints to keep from removing')
train.add_argument('--resume', action='store_true',
help='Resume training from the last checkpoint')
train.add_argument('--seed', type=int, default=1234,
help='Seed for PyTorch random number generators')
train.add_argument('--amp', action='store_true',
help='Enable AMP')
train.add_argument('--cuda', action='store_true',
help='Run on GPU using CUDA')
train.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
train.add_argument('--ema-decay', type=float, default=0,
help='Discounting factor for training weights EMA')
train.add_argument('--grad-accumulation', type=int, default=1,
help='Training steps to accumulate gradients for')
train.add_argument('--kl-loss-start-epoch', type=int, default=250,
help='Start adding the hard attention loss term')
train.add_argument('--kl-loss-warmup-epochs', type=int, default=100,
help='Gradually increase the hard attention loss term')
train.add_argument('--kl-loss-weight', type=float, default=1.0,
help='Gradually increase the hard attention loss term')
train.add_argument('--benchmark-epochs-num', type=int, default=20,
help='Number of epochs for calculating final stats')
train.add_argument('--validation-freq', type=int, default=1,
help='Validate every N epochs to use less compute')
train.add_argument('--init-from-checkpoint', type=str, default=None,
help='Initialize model weights with a pre-trained ckpt')
opt = parser.add_argument_group('optimization setup')
opt.add_argument('--optimizer', type=str, default='lamb',
help='Optimization algorithm')
opt.add_argument('-lr', '--learning-rate', type=float, required=True,
help='Learing rate')
opt.add_argument('--weight-decay', default=1e-6, type=float,
help='Weight decay')
opt.add_argument('--grad-clip-thresh', default=1000.0, type=float,
help='Clip threshold for gradients')
opt.add_argument('-bs', '--batch-size', type=int, required=True,
help='Batch size per GPU')
opt.add_argument('--warmup-steps', type=int, default=1000,
help='Number of steps for lr warmup')
opt.add_argument('--dur-predictor-loss-scale', type=float,
default=1.0, help='Rescale duration predictor loss')
opt.add_argument('--pitch-predictor-loss-scale', type=float,
default=1.0, help='Rescale pitch predictor loss')
opt.add_argument('--attn-loss-scale', type=float,
default=1.0, help='Rescale alignment loss')
data = parser.add_argument_group('dataset parameters')
data.add_argument('--training-files', type=str, nargs='*', required=True,
help='Paths to training filelists.')
data.add_argument('--validation-files', type=str, nargs='*',
required=True, help='Paths to validation filelists')
data.add_argument('--text-cleaners', nargs='*',
default=['english_cleaners'], type=str,
help='Type of text cleaners for input text')
data.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
data.add_argument('--p-arpabet', type=float, default=0.0,
help='Probability of using arpabets instead of graphemes '
'for each word; set 0 for pure grapheme training')
data.add_argument('--heteronyms-path', type=str, default='cmudict/heteronyms',
help='Path to the list of heteronyms')
data.add_argument('--cmudict-path', type=str, default='cmudict/cmudict-0.7b',
help='Path to the pronouncing dictionary')
data.add_argument('--prepend-space-to-text', action='store_true',
help='Capture leading silence with a space token')
data.add_argument('--append-space-to-text', action='store_true',
help='Capture trailing silence with a space token')
data.add_argument('--num-workers', type=int, default=6,
help='Subprocesses for train and val DataLoaders')
data.add_argument('--trainloader-repeats', type=int, default=100,
help='Repeats the dataset to prolong epochs')
cond = parser.add_argument_group('data for conditioning')
cond.add_argument('--n-speakers', type=int, default=1,
help='Number of speakers in the dataset. '
'n_speakers > 1 enables speaker embeddings')
cond.add_argument('--load-pitch-from-disk', action='store_true',
help='Use pitch cached on disk with prepare_dataset.py')
cond.add_argument('--pitch-online-method', default='pyin',
choices=['pyin'],
help='Calculate pitch on the fly during trainig')
cond.add_argument('--pitch-online-dir', type=str, default=None,
help='A directory for storing pitch calculated on-line')
cond.add_argument('--pitch-mean', type=float, default=214.72203,
help='Normalization value for pitch')
cond.add_argument('--pitch-std', type=float, default=65.72038,
help='Normalization value for pitch')
cond.add_argument('--load-mel-from-disk', action='store_true',
help='Use mel-spectrograms cache on the disk') # XXX
audio = parser.add_argument_group('audio parameters')
audio.add_argument('--max-wav-value', default=32768.0, type=float,
help='Maximum audiowave value')
audio.add_argument('--sampling-rate', default=22050, type=int,
help='Sampling rate')
audio.add_argument('--filter-length', default=1024, type=int,
help='Filter length')
audio.add_argument('--hop-length', default=256, type=int,
help='Hop (stride) length')
audio.add_argument('--win-length', default=1024, type=int,
help='Window length')
audio.add_argument('--mel-fmin', default=0.0, type=float,
help='Minimum mel frequency')
audio.add_argument('--mel-fmax', default=8000.0, type=float,
help='Maximum mel frequency')
dist = parser.add_argument_group('distributed setup')
dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0),
help='Rank of the process for multiproc; do not set manually')
dist.add_argument('--world_size', type=int, default=os.getenv('WORLD_SIZE', 1),
help='Number of processes for multiproc; do not set manually')
return parser
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
return rt.true_divide(num_gpus)
def init_distributed(args, world_size, rank):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing distributed training")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(backend=('nccl' if args.cuda else 'gloo'),
init_method='env://')
print("Done initializing distributed training")
def validate(model, epoch, total_iter, criterion, val_loader, distributed_run,
batch_to_gpu, ema=False):
was_training = model.training
model.eval()
tik = time.perf_counter()
with torch.no_grad():
val_meta = defaultdict(float)
val_num_frames = 0
for i, batch in enumerate(val_loader):
x, y, num_frames = batch_to_gpu(batch)
y_pred = model(x)
loss, meta = criterion(y_pred, y, is_training=False, meta_agg='sum')
if distributed_run:
for k, v in meta.items():
val_meta[k] += reduce_tensor(v, 1)
val_num_frames += reduce_tensor(num_frames.data, 1).item()
else:
for k, v in meta.items():
val_meta[k] += v
val_num_frames += num_frames.item()
val_meta = {k: v / len(val_loader.dataset) for k, v in val_meta.items()}
val_meta['took'] = time.perf_counter() - tik
log((epoch,) if epoch is not None else (), tb_total_steps=total_iter,
subset='val_ema' if ema else 'val',
data=OrderedDict([
('loss', val_meta['loss'].item()),
('mel_loss', val_meta['mel_loss'].item()),
('frames/s', val_num_frames / val_meta['took']),
('took', val_meta['took'])]),
)
if was_training:
model.train()
return val_meta
def adjust_learning_rate(total_iter, opt, learning_rate, warmup_iters=None):
if warmup_iters == 0:
scale = 1.0
elif total_iter > warmup_iters:
scale = 1. / (total_iter ** 0.5)
else:
scale = total_iter / (warmup_iters ** 1.5)
for param_group in opt.param_groups:
param_group['lr'] = learning_rate * scale
def apply_ema_decay(model, ema_model, decay):
if not decay:
return
st = model.state_dict()
add_module = hasattr(model, 'module') and not hasattr(ema_model, 'module')
for k, v in ema_model.state_dict().items():
if add_module and not k.startswith('module.'):
k = 'module.' + k
v.copy_(decay * v + (1 - decay) * st[k])
def init_multi_tensor_ema(model, ema_model):
model_weights = list(model.state_dict().values())
ema_model_weights = list(ema_model.state_dict().values())
ema_overflow_buf = torch.cuda.IntTensor([0])
return model_weights, ema_model_weights, ema_overflow_buf
def apply_multi_tensor_ema(decay, model_weights, ema_weights, overflow_buf):
amp_C.multi_tensor_axpby(
65536, overflow_buf, [ema_weights, model_weights, ema_weights],
decay, 1-decay, -1)
def main():
parser = argparse.ArgumentParser(description='PyTorch FastPitch Training',
allow_abbrev=False)
parser = parse_args(parser)
args, _ = parser.parse_known_args()
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
distributed_run = args.world_size > 1
torch.manual_seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
if args.local_rank == 0:
if not os.path.exists(args.output):
os.makedirs(args.output)
log_fpath = args.log_file or os.path.join(args.output, 'nvlog.json')
tb_subsets = ['train', 'val']
if args.ema_decay > 0.0:
tb_subsets.append('val_ema')
logger.init(log_fpath, args.output, enabled=(args.local_rank == 0),
tb_subsets=tb_subsets)
logger.parameters(vars(args), tb_subset='train')
parser = models.parse_model_args('FastPitch', parser)
args, unk_args = parser.parse_known_args()
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if distributed_run:
init_distributed(args, args.world_size, args.local_rank)
else:
if args.trainloader_repeats > 1:
print('WARNING: Disabled --trainloader-repeats, supported only for'
' multi-GPU data loading.')
args.trainloader_repeats = 1
device = torch.device('cuda' if args.cuda else 'cpu')
model_config = models.get_model_config('FastPitch', args)
model = models.get_model('FastPitch', model_config, device)
if args.init_from_checkpoint is not None:
load_pretrained_weights(model, args.init_from_checkpoint)
attention_kl_loss = AttentionBinarizationLoss()
# Store pitch mean/std as params to translate from Hz during inference
model.pitch_mean[0] = args.pitch_mean
model.pitch_std[0] = args.pitch_std
kw = dict(lr=args.learning_rate, betas=(0.9, 0.98), eps=1e-9,
weight_decay=args.weight_decay)
if args.optimizer == 'adam':
optimizer = FusedAdam(model.parameters(), **kw)
elif args.optimizer == 'lamb':
optimizer = FusedLAMB(model.parameters(), **kw)
else:
raise ValueError
scaler = torch.cuda.amp.GradScaler(enabled=args.amp)
if args.ema_decay > 0:
ema_model = copy.deepcopy(model)
else:
ema_model = None
if distributed_run:
model = DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
find_unused_parameters=True)
train_state = {'epoch': 1, 'total_iter': 1}
checkpointer = Checkpointer(args.output, args.keep_milestones)
checkpointer.maybe_load(model, optimizer, scaler, train_state, args,
ema_model)
start_epoch = train_state['epoch']
total_iter = train_state['total_iter']
criterion = FastPitchLoss(
dur_predictor_loss_scale=args.dur_predictor_loss_scale,
pitch_predictor_loss_scale=args.pitch_predictor_loss_scale,
attn_loss_scale=args.attn_loss_scale)
collate_fn = TTSCollate()
if args.local_rank == 0:
prepare_tmp(args.pitch_online_dir)
trainset = TTSDataset(audiopaths_and_text=args.training_files, **vars(args))
valset = TTSDataset(audiopaths_and_text=args.validation_files, **vars(args))
ensure_disjoint(trainset, valset)
if distributed_run:
train_sampler = RepeatedDistributedSampler(args.trainloader_repeats,
trainset, drop_last=True)
val_sampler = DistributedSampler(valset)
shuffle = False
else:
train_sampler, val_sampler, shuffle = None, None, True
# 4 workers are optimal on DGX-1 (from epoch 2 onwards)
kw = {'num_workers': args.num_workers, 'batch_size': args.batch_size,
'collate_fn': collate_fn}
train_loader = RepeatedDataLoader(args.trainloader_repeats, trainset,
shuffle=shuffle, drop_last=True,
sampler=train_sampler, pin_memory=True,
persistent_workers=True, **kw)
val_loader = DataLoader(valset, shuffle=False, sampler=val_sampler,
pin_memory=False, **kw)
if args.ema_decay:
mt_ema_params = init_multi_tensor_ema(model, ema_model)
model.train()
bmark_stats = BenchmarkStats()
torch.cuda.synchronize()
for epoch in range(start_epoch, args.epochs + 1):
epoch_start_time = time.perf_counter()
epoch_loss = 0.0
epoch_mel_loss = 0.0
epoch_num_frames = 0
epoch_frames_per_sec = 0.0
if distributed_run:
train_loader.sampler.set_epoch(epoch)
iter_loss = 0
iter_num_frames = 0
iter_meta = {}
iter_start_time = time.perf_counter()
epoch_iter = 1
for batch, accum_step in zip(train_loader,
cycle(range(1, args.grad_accumulation + 1))):
if accum_step == 1:
adjust_learning_rate(total_iter, optimizer, args.learning_rate,
args.warmup_steps)
model.zero_grad(set_to_none=True)
x, y, num_frames = batch_to_gpu(batch)
with torch.cuda.amp.autocast(enabled=args.amp):
y_pred = model(x)
loss, meta = criterion(y_pred, y)
if (args.kl_loss_start_epoch is not None
and epoch >= args.kl_loss_start_epoch):
if args.kl_loss_start_epoch == epoch and epoch_iter == 1:
print('Begin hard_attn loss')
_, _, _, _, _, _, _, _, attn_soft, attn_hard, _, _ = y_pred
binarization_loss = attention_kl_loss(attn_hard, attn_soft)
kl_weight = min((epoch - args.kl_loss_start_epoch) / args.kl_loss_warmup_epochs, 1.0) * args.kl_loss_weight
meta['kl_loss'] = binarization_loss.clone().detach() * kl_weight
loss += kl_weight * binarization_loss
else:
meta['kl_loss'] = torch.zeros_like(loss)
kl_weight = 0
binarization_loss = 0
loss /= args.grad_accumulation
meta = {k: v / args.grad_accumulation
for k, v in meta.items()}
if args.amp:
scaler.scale(loss).backward()
else:
loss.backward()
if distributed_run:
reduced_loss = reduce_tensor(loss.data, args.world_size).item()
reduced_num_frames = reduce_tensor(num_frames.data, 1).item()
meta = {k: reduce_tensor(v, args.world_size) for k, v in meta.items()}
else:
reduced_loss = loss.item()
reduced_num_frames = num_frames.item()
if np.isnan(reduced_loss):
raise Exception("loss is NaN")
iter_loss += reduced_loss
iter_num_frames += reduced_num_frames
iter_meta = {k: iter_meta.get(k, 0) + meta.get(k, 0) for k in meta}
if accum_step % args.grad_accumulation == 0:
logger.log_grads_tb(total_iter, model)
if args.amp:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
scaler.step(optimizer)
scaler.update()
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.grad_clip_thresh)
optimizer.step()
if args.ema_decay > 0.0:
apply_multi_tensor_ema(args.ema_decay, *mt_ema_params)
iter_mel_loss = iter_meta['mel_loss'].item()
iter_kl_loss = iter_meta['kl_loss'].item()
iter_time = time.perf_counter() - iter_start_time
epoch_frames_per_sec += iter_num_frames / iter_time
epoch_loss += iter_loss
epoch_num_frames += iter_num_frames
epoch_mel_loss += iter_mel_loss
num_iters = len(train_loader) // args.grad_accumulation
log((epoch, epoch_iter, num_iters), tb_total_steps=total_iter,
subset='train', data=OrderedDict([
('loss', iter_loss),
('mel_loss', iter_mel_loss),
('kl_loss', iter_kl_loss),
('kl_weight', kl_weight),
('frames/s', iter_num_frames / iter_time),
('took', iter_time),
('lrate', optimizer.param_groups[0]['lr'])]),
)
iter_loss = 0
iter_num_frames = 0
iter_meta = {}
iter_start_time = time.perf_counter()
if epoch_iter == num_iters:
break
epoch_iter += 1
total_iter += 1
# Finished epoch
epoch_loss /= epoch_iter
epoch_mel_loss /= epoch_iter
epoch_time = time.perf_counter() - epoch_start_time
log((epoch,), tb_total_steps=None, subset='train_avg',
data=OrderedDict([
('loss', epoch_loss),
('mel_loss', epoch_mel_loss),
('frames/s', epoch_num_frames / epoch_time),
('took', epoch_time)]),
)
bmark_stats.update(epoch_num_frames, epoch_loss, epoch_mel_loss,
epoch_time)
if epoch % args.validation_freq == 0:
validate(model, epoch, total_iter, criterion, val_loader,
distributed_run, batch_to_gpu)
if args.ema_decay > 0:
validate(ema_model, epoch, total_iter, criterion, val_loader,
distributed_run, batch_to_gpu, ema=True)
# save before making sched.step() for proper loading of LR
checkpointer.maybe_save(args, model, ema_model, optimizer, scaler,
epoch, total_iter, model_config)
logger.flush()
# Finished training
if len(bmark_stats) > 0:
log((), tb_total_steps=None, subset='train_avg',
data=bmark_stats.get(args.benchmark_epochs_num))
validate(model, None, total_iter, criterion, val_loader, distributed_run,
batch_to_gpu)
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/train.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import sys
import time
import warnings
from pathlib import Path
from tqdm import tqdm
import torch
import numpy as np
from scipy.stats import norm
from scipy.io.wavfile import write
from torch.nn.functional import l1_loss
from torch.nn.utils.rnn import pad_sequence
import dllogger as DLLogger
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
import models
from common import gpu_affinity
from common.tb_dllogger import (init_inference_metadata, stdout_metric_format,
unique_log_fpath)
from common.text import cmudict
from common.text.text_processing import get_text_processing
from common.utils import l2_promote
from fastpitch.pitch_transform import pitch_transform_custom
from hifigan.data_function import MAX_WAV_VALUE, mel_spectrogram
from hifigan.models import Denoiser
from waveglow import model as glow
CHECKPOINT_SPECIFIC_ARGS = [
'sampling_rate', 'hop_length', 'win_length', 'p_arpabet', 'text_cleaners',
'symbol_set', 'max_wav_value', 'prepend_space_to_text',
'append_space_to_text']
def parse_args(parser):
"""
Parse commandline arguments.
"""
parser.add_argument('-i', '--input', type=str, required=True,
help='Full path to the input text (phareses separated by newlines)')
parser.add_argument('-o', '--output', default=None,
help='Output folder to save audio (file per phrase)')
parser.add_argument('--log-file', type=str, default=None,
help='Path to a DLLogger log file')
parser.add_argument('--save-mels', action='store_true',
help='Save generator outputs to disk')
parser.add_argument('--cuda', action='store_true',
help='Run inference on a GPU using CUDA')
parser.add_argument('--cudnn-benchmark', action='store_true',
help='Enable cudnn benchmark mode')
parser.add_argument('--l2-promote', action='store_true',
help='Increase max fetch granularity of GPU L2 cache')
parser.add_argument('--fastpitch', type=str, default=None, required=False,
help='Full path to the spectrogram generator .pt file '
'(skip to synthesize from ground truth mels)')
parser.add_argument('--waveglow', type=str, default=None, required=False,
help='Full path to a WaveGlow model .pt file')
parser.add_argument('-s', '--waveglow-sigma-infer', default=0.9, type=float,
help='WaveGlow sigma')
parser.add_argument('--hifigan', type=str, default=None, required=False,
help='Full path to a HiFi-GAN model .pt file')
parser.add_argument('-d', '--denoising-strength', default=0.0, type=float,
help='Capture and subtract model bias to enhance audio')
parser.add_argument('--hop-length', type=int, default=256,
help='STFT hop length for estimating audio length from mel size')
parser.add_argument('--win-length', type=int, default=1024,
help='STFT win length for denoiser and mel loss')
parser.add_argument('-sr', '--sampling-rate', default=22050, type=int,
choices=[22050, 44100], help='Sampling rate')
parser.add_argument('--max_wav_value', default=32768.0, type=float,
help='Maximum audiowave value')
parser.add_argument('--amp', action='store_true',
help='Inference with AMP')
parser.add_argument('-bs', '--batch-size', type=int, default=64)
parser.add_argument('--warmup-steps', type=int, default=0,
help='Warmup iterations before measuring performance')
parser.add_argument('--repeats', type=int, default=1,
help='Repeat inference for benchmarking')
parser.add_argument('--torchscript', action='store_true',
help='Run inference with TorchScript model (convert to TS if needed)')
parser.add_argument('--checkpoint-format', type=str,
choices=['pyt', 'ts'], default='pyt',
help='Input checkpoint format (PyT or TorchScript)')
parser.add_argument('--torch-tensorrt', action='store_true',
help='Run inference with Torch-TensorRT model (compile beforehand)')
parser.add_argument('--report-mel-loss', action='store_true',
help='Report mel loss in metrics')
parser.add_argument('--ema', action='store_true',
help='Use EMA averaged model (if saved in checkpoints)')
parser.add_argument('--dataset-path', type=str,
help='Path to dataset (for loading extra data fields)')
parser.add_argument('--speaker', type=int, default=0,
help='Speaker ID for a multi-speaker model')
parser.add_argument('--affinity', type=str, default='single',
choices=['socket', 'single', 'single_unique',
'socket_unique_interleaved',
'socket_unique_continuous',
'disabled'],
help='type of CPU affinity')
transf = parser.add_argument_group('transform')
transf.add_argument('--fade-out', type=int, default=10,
help='Number of fadeout frames at the end')
transf.add_argument('--pace', type=float, default=1.0,
help='Adjust the pace of speech')
transf.add_argument('--pitch-transform-flatten', action='store_true',
help='Flatten the pitch')
transf.add_argument('--pitch-transform-invert', action='store_true',
help='Invert the pitch wrt mean value')
transf.add_argument('--pitch-transform-amplify', type=float, default=1.0,
help='Multiplicative amplification of pitch variability. '
'Typical values are in the range (1.0, 3.0).')
transf.add_argument('--pitch-transform-shift', type=float, default=0.0,
help='Raise/lower the pitch by <hz>')
transf.add_argument('--pitch-transform-custom', action='store_true',
help='Apply the transform from pitch_transform.py')
txt = parser.add_argument_group('Text processing parameters')
txt.add_argument('--text-cleaners', type=str, nargs='*',
default=['english_cleaners_v2'],
help='Type of text cleaners for input text')
txt.add_argument('--symbol-set', type=str, default='english_basic',
help='Define symbol set for input text')
txt.add_argument('--p-arpabet', type=float, default=0.0, help='')
txt.add_argument('--heteronyms-path', type=str,
default='cmudict/heteronyms', help='')
txt.add_argument('--cmudict-path', type=str,
default='cmudict/cmudict-0.7b', help='')
return parser
def load_fields(fpath):
lines = [l.strip() for l in open(fpath, encoding='utf-8')]
if fpath.endswith('.tsv'):
columns = lines[0].split('\t')
fields = list(zip(*[t.split('\t') for t in lines[1:]]))
else:
columns = ['text']
fields = [lines]
return {c: f for c, f in zip(columns, fields)}
def prepare_input_sequence(fields, device, symbol_set, text_cleaners,
batch_size=128, dataset=None, load_mels=False,
load_pitch=False, p_arpabet=0.0):
tp = get_text_processing(symbol_set, text_cleaners, p_arpabet)
fields['text'] = [torch.LongTensor(tp.encode_text(text))
for text in fields['text']]
order = np.argsort([-t.size(0) for t in fields['text']])
fields['text'] = [fields['text'][i] for i in order]
fields['text_lens'] = torch.LongTensor([t.size(0) for t in fields['text']])
for t in fields['text']:
print(tp.sequence_to_text(t.numpy()))
if load_mels:
assert 'mel' in fields
assert dataset is not None
fields['mel'] = [
torch.load(Path(dataset, fields['mel'][i])).t() for i in order]
fields['mel_lens'] = torch.LongTensor([t.size(0) for t in fields['mel']])
if load_pitch:
assert 'pitch' in fields
fields['pitch'] = [
torch.load(Path(dataset, fields['pitch'][i])) for i in order]
fields['pitch_lens'] = torch.LongTensor([t.size(0) for t in fields['pitch']])
if 'output' in fields:
fields['output'] = [fields['output'][i] for i in order]
# cut into batches & pad
batches = []
for b in range(0, len(order), batch_size):
batch = {f: values[b:b+batch_size] for f, values in fields.items()}
for f in batch:
if f == 'text':
batch[f] = pad_sequence(batch[f], batch_first=True)
elif f == 'mel' and load_mels:
batch[f] = pad_sequence(batch[f], batch_first=True).permute(0, 2, 1)
elif f == 'pitch' and load_pitch:
batch[f] = pad_sequence(batch[f], batch_first=True)
if type(batch[f]) is torch.Tensor:
batch[f] = batch[f].to(device)
batches.append(batch)
return batches
def build_pitch_transformation(args):
if args.pitch_transform_custom:
def custom_(pitch, pitch_lens, mean, std):
return (pitch_transform_custom(pitch * std + mean, pitch_lens)
- mean) / std
return custom_
fun = 'pitch'
if args.pitch_transform_flatten:
fun = f'({fun}) * 0.0'
if args.pitch_transform_invert:
fun = f'({fun}) * -1.0'
if args.pitch_transform_amplify != 1.0:
ampl = args.pitch_transform_amplify
fun = f'({fun}) * {ampl}'
if args.pitch_transform_shift != 0.0:
hz = args.pitch_transform_shift
fun = f'({fun}) + {hz} / std'
if fun == 'pitch':
return None
return eval(f'lambda pitch, pitch_lens, mean, std: {fun}')
def setup_mel_loss_reporting(args, voc_train_setup):
if args.denoising_strength > 0.0:
print('WARNING: denoising will be included in vocoder mel loss')
num_mels = voc_train_setup.get('num_mels', 80)
fmin = voc_train_setup.get('mel_fmin', 0)
fmax = voc_train_setup.get('mel_fmax', 8000) # not mel_fmax_loss
def compute_audio_mel_loss(gen_audios, gt_mels, mel_lens):
gen_audios /= MAX_WAV_VALUE
total_loss = 0
for gen_audio, gt_mel, mel_len in zip(gen_audios, gt_mels, mel_lens):
mel_len = mel_len.item()
gen_audio = gen_audio[None, :mel_len * args.hop_length]
gen_mel = mel_spectrogram(gen_audio, args.win_length, num_mels,
args.sampling_rate, args.hop_length,
args.win_length, fmin, fmax)[0]
total_loss += l1_loss(gen_mel, gt_mel[:, :mel_len])
return total_loss.item()
return compute_audio_mel_loss
def compute_mel_loss(mels, lens, gt_mels, gt_lens):
total_loss = 0
for mel, len_, gt_mel, gt_len in zip(mels, lens, gt_mels, gt_lens):
min_len = min(len_, gt_len)
total_loss += l1_loss(gt_mel[:, :min_len], mel[:, :min_len])
return total_loss.item()
class MeasureTime(list):
def __init__(self, *args, cuda=True, **kwargs):
super(MeasureTime, self).__init__(*args, **kwargs)
self.cuda = cuda
def __enter__(self):
if self.cuda:
torch.cuda.synchronize()
self.t0 = time.time()
def __exit__(self, exc_type, exc_value, exc_traceback):
if self.cuda:
torch.cuda.synchronize()
self.append(time.time() - self.t0)
def __add__(self, other):
assert len(self) == len(other)
return MeasureTime((sum(ab) for ab in zip(self, other)), cuda=self.cuda)
def main():
"""
Launches text-to-speech inference on a single GPU.
"""
parser = argparse.ArgumentParser(description='PyTorch FastPitch Inference',
allow_abbrev=False)
parser = parse_args(parser)
args, unk_args = parser.parse_known_args()
if args.affinity != 'disabled':
nproc_per_node = torch.cuda.device_count()
# print(nproc_per_node)
affinity = gpu_affinity.set_affinity(
0,
nproc_per_node,
args.affinity
)
print(f'Thread affinity: {affinity}')
if args.l2_promote:
l2_promote()
torch.backends.cudnn.benchmark = args.cudnn_benchmark
if args.output is not None:
Path(args.output).mkdir(parents=False, exist_ok=True)
log_fpath = args.log_file or str(Path(args.output, 'nvlog_infer.json'))
DLLogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, log_fpath, append=True),
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, metric_format=stdout_metric_format)
])
init_inference_metadata(args.batch_size)
[DLLogger.log("PARAMETER", {k: v}) for k, v in vars(args).items()]
device = torch.device('cuda' if args.cuda else 'cpu')
gen_train_setup = {}
voc_train_setup = {}
generator = None
vocoder = None
denoiser = None
is_ts_based_infer = args.torch_tensorrt or args.torchscript
assert args.checkpoint_format == 'pyt' or is_ts_based_infer, \
'TorchScript checkpoint can be used only for TS or Torch-TRT' \
' inference. Please set --torchscript or --torch-tensorrt flag.'
assert args.waveglow is None or args.hifigan is None, \
"Specify a single vocoder model"
def _load_pyt_or_ts_model(model_name, ckpt_path):
if args.checkpoint_format == 'ts':
model = models.load_and_setup_ts_model(model_name, ckpt_path,
args.amp, device)
model_train_setup = {}
return model, model_train_setup
model, _, model_train_setup = models.load_and_setup_model(
model_name, parser, ckpt_path, args.amp, device,
unk_args=unk_args, forward_is_infer=True, jitable=is_ts_based_infer)
if is_ts_based_infer:
model = torch.jit.script(model)
return model, model_train_setup
if args.fastpitch is not None:
gen_name = 'fastpitch'
generator, gen_train_setup = _load_pyt_or_ts_model('FastPitch',
args.fastpitch)
if args.waveglow is not None:
voc_name = 'waveglow'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
vocoder, _, voc_train_setup = models.load_and_setup_model(
'WaveGlow', parser, args.waveglow, args.amp, device,
unk_args=unk_args, forward_is_infer=True, jitable=False)
if args.denoising_strength > 0.0:
denoiser = Denoiser(vocoder, sigma=0.0,
win_length=args.win_length).to(device)
# if args.torchscript:
# vocoder = torch.jit.script(vocoder)
def generate_audio(mel):
audios = vocoder(mel, sigma=args.waveglow_sigma_infer)
if denoiser is not None:
audios = denoiser(audios.float(), args.denoising_strength).squeeze(1)
return audios
elif args.hifigan is not None:
voc_name = 'hifigan'
vocoder, voc_train_setup = _load_pyt_or_ts_model('HiFi-GAN',
args.hifigan)
if args.denoising_strength > 0.0:
denoiser = Denoiser(vocoder, win_length=args.win_length).to(device)
if args.torch_tensorrt:
vocoder = models.convert_ts_to_trt('HiFi-GAN', vocoder, parser,
args.amp, unk_args)
def generate_audio(mel):
audios = vocoder(mel).float()
if denoiser is not None:
audios = denoiser(audios.squeeze(1), args.denoising_strength)
return audios.squeeze(1) * args.max_wav_value
if len(unk_args) > 0:
raise ValueError(f'Invalid options {unk_args}')
for k in CHECKPOINT_SPECIFIC_ARGS:
v1 = gen_train_setup.get(k, None)
v2 = voc_train_setup.get(k, None)
assert v1 is None or v2 is None or v1 == v2, \
f'{k} mismatch in spectrogram generator and vocoder'
val = v1 or v2
if val and getattr(args, k) != val:
src = 'generator' if v2 is None else 'vocoder'
print(f'Overwriting args.{k}={getattr(args, k)} with {val} '
f'from {src} checkpoint.')
setattr(args, k, val)
gen_kw = {'pace': args.pace,
'speaker': args.speaker,
'pitch_tgt': None,
'pitch_transform': build_pitch_transformation(args)}
if is_ts_based_infer and generator is not None:
gen_kw.pop('pitch_transform')
print('Note: --pitch-transform-* args are disabled with TorchScript. '
'To condition on pitch, pass pitch_tgt as input.')
if args.p_arpabet > 0.0:
cmudict.initialize(args.cmudict_path, args.heteronyms_path)
if args.report_mel_loss:
mel_loss_fn = setup_mel_loss_reporting(args, voc_train_setup)
fields = load_fields(args.input)
batches = prepare_input_sequence(
fields, device, args.symbol_set, args.text_cleaners, args.batch_size,
args.dataset_path, load_mels=(generator is None or args.report_mel_loss),
p_arpabet=args.p_arpabet)
cycle = itertools.cycle(batches)
# Use real data rather than synthetic - FastPitch predicts len
for _ in tqdm(range(args.warmup_steps), 'Warmup'):
with torch.no_grad():
b = next(cycle)
if generator is not None:
mel, *_ = generator(b['text'])
else:
mel, mel_lens = b['mel'], b['mel_lens']
if args.amp:
mel = mel.half()
if vocoder is not None:
audios = generate_audio(mel)
gen_measures = MeasureTime(cuda=args.cuda)
vocoder_measures = MeasureTime(cuda=args.cuda)
all_utterances = 0
all_samples = 0
all_batches = 0
all_letters = 0
all_frames = 0
gen_mel_loss_sum = 0
voc_mel_loss_sum = 0
reps = args.repeats
log_enabled = reps == 1
log = lambda s, d: DLLogger.log(step=s, data=d) if log_enabled else None
for rep in (tqdm(range(reps), 'Inference') if reps > 1 else range(reps)):
for b in batches:
if generator is None:
mel, mel_lens = b['mel'], b['mel_lens']
if args.amp:
mel = mel.half()
else:
with torch.no_grad(), gen_measures:
mel, mel_lens, *_ = generator(b['text'], **gen_kw)
if args.report_mel_loss:
gen_mel_loss_sum += compute_mel_loss(
mel, mel_lens, b['mel'], b['mel_lens'])
gen_infer_perf = mel.size(0) * mel.size(2) / gen_measures[-1]
all_letters += b['text_lens'].sum().item()
all_frames += mel.size(0) * mel.size(2)
log(rep, {f"{gen_name}_frames/s": gen_infer_perf})
log(rep, {f"{gen_name}_latency": gen_measures[-1]})
if args.save_mels:
for i, mel_ in enumerate(mel):
m = mel_[:, :mel_lens[i].item()].permute(1, 0)
fname = b['output'][i] if 'output' in b else f'mel_{i}.npy'
mel_path = Path(args.output, Path(fname).stem + '.npy')
np.save(mel_path, m.cpu().numpy())
if vocoder is not None:
with torch.no_grad(), vocoder_measures:
audios = generate_audio(mel)
vocoder_infer_perf = (
audios.size(0) * audios.size(1) / vocoder_measures[-1])
log(rep, {f"{voc_name}_samples/s": vocoder_infer_perf})
log(rep, {f"{voc_name}_latency": vocoder_measures[-1]})
if args.report_mel_loss:
voc_mel_loss_sum += mel_loss_fn(audios, mel, mel_lens)
if args.output is not None and reps == 1:
for i, audio in enumerate(audios):
audio = audio[:mel_lens[i].item() * args.hop_length]
if args.fade_out:
fade_len = args.fade_out * args.hop_length
fade_w = torch.linspace(1.0, 0.0, fade_len)
audio[-fade_len:] *= fade_w.to(audio.device)
audio = audio / torch.max(torch.abs(audio))
fname = b['output'][i] if 'output' in b else f'audio_{all_utterances + i}.wav'
audio_path = Path(args.output, fname)
write(audio_path, args.sampling_rate, audio.cpu().numpy())
if generator is not None:
log(rep, {"latency": (gen_measures[-1] + vocoder_measures[-1])})
all_utterances += mel.size(0)
all_samples += mel_lens.sum().item() * args.hop_length
all_batches += 1
log_enabled = True
if generator is not None:
gm = np.sort(np.asarray(gen_measures))
rtf = all_samples / (all_utterances * gm.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * gm.mean() * args.sampling_rate)
log((), {f"avg_{gen_name}_tokens/s": all_letters / gm.sum()})
log((), {f"avg_{gen_name}_frames/s": all_frames / gm.sum()})
log((), {f"avg_{gen_name}_latency": gm.mean()})
log((), {f"avg_{gen_name}_RTF": rtf})
log((), {f"avg_{gen_name}_RTF@{args.batch_size}": rtf_at})
log((), {f"90%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.90) / 2) * gm.std()})
log((), {f"95%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.95) / 2) * gm.std()})
log((), {f"99%_{gen_name}_latency": gm.mean() + norm.ppf((1.0 + 0.99) / 2) * gm.std()})
if args.report_mel_loss:
log((), {f"avg_{gen_name}_mel-loss": gen_mel_loss_sum / all_utterances})
if vocoder is not None:
vm = np.sort(np.asarray(vocoder_measures))
rtf = all_samples / (all_utterances * vm.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * vm.mean() * args.sampling_rate)
log((), {f"avg_{voc_name}_samples/s": all_samples / vm.sum()})
log((), {f"avg_{voc_name}_latency": vm.mean()})
log((), {f"avg_{voc_name}_RTF": rtf})
log((), {f"avg_{voc_name}_RTF@{args.batch_size}": rtf_at})
log((), {f"90%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.90) / 2) * vm.std()})
log((), {f"95%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.95) / 2) * vm.std()})
log((), {f"99%_{voc_name}_latency": vm.mean() + norm.ppf((1.0 + 0.99) / 2) * vm.std()})
if args.report_mel_loss:
log((), {f"avg_{voc_name}_mel-loss": voc_mel_loss_sum / all_utterances})
if generator is not None and vocoder is not None:
m = gm + vm
rtf = all_samples / (all_utterances * m.mean() * args.sampling_rate)
rtf_at = all_samples / (all_batches * m.mean() * args.sampling_rate)
log((), {"avg_samples/s": all_samples / m.sum()})
log((), {"avg_letters/s": all_letters / m.sum()})
log((), {"avg_latency": m.mean()})
log((), {"avg_RTF": rtf})
log((), {f"avg_RTF@{args.batch_size}": rtf_at})
log((), {"90%_latency": m.mean() + norm.ppf((1.0 + 0.90) / 2) * m.std()})
log((), {"95%_latency": m.mean() + norm.ppf((1.0 + 0.95) / 2) * m.std()})
log((), {"99%_latency": m.mean() + norm.ppf((1.0 + 0.99) / 2) * m.std()})
DLLogger.flush()
if __name__ == '__main__':
main()
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/inference.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import torch
import random
import common.layers as layers
from common.utils import load_wav_to_torch, load_filepaths_and_text, to_gpu
class MelAudioLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) computes mel-spectrograms from audio files.
"""
def __init__(self,
dataset_path,
audiopaths_and_text,
segment_length,
n_mel_channels,
max_wav_value,
sampling_rate,
filter_length,
hop_length,
win_length,
mel_fmin,
mel_fmax,
args):
self.audiopaths_and_text = load_filepaths_and_text(dataset_path, audiopaths_and_text)
self.max_wav_value = max_wav_value
self.sampling_rate = sampling_rate
self.stft = layers.TacotronSTFT(
filter_length, hop_length, win_length,
n_mel_channels, sampling_rate, mel_fmin,
mel_fmax)
self.segment_length = segment_length
random.seed(1234)
random.shuffle(self.audiopaths_and_text)
def get_mel_audio_pair(self, filename):
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(
audio, (0, self.segment_length - audio.size(0)), 'constant').data
audio = audio / self.max_wav_value
audio_norm = audio.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = melspec.squeeze(0)
return (melspec, audio, len(audio))
def __getitem__(self, index):
return self.get_mel_audio_pair(self.audiopaths_and_text[index][0])
def __len__(self):
return len(self.audiopaths_and_text)
def batch_to_gpu(batch):
x, y, len_y = batch
x = to_gpu(x).float()
y = to_gpu(y).float()
len_y = to_gpu(torch.sum(len_y))
return ((x, y), y, len_y)
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/waveglow/data_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:, 0] = -1 * W[:, 0]
W = W.view(c, c, 1)
W = W.contiguous()
self.conv.weight.data = W
def forward(self, z):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W.unsqueeze(0).float()).squeeze()
z = self.conv(z)
return z, log_det_W
def infer(self, z):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor' or z.type() == 'torch.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary
difference from WaveNet is the convolutions need not be causal. There is
also no dilation size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2 * n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(n_channels, 2 * n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2 * n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(
res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
torch.IntTensor([self.n_channels]))
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:, :self.n_channels, :] + audio
skip_acts = res_skip_acts[:, self.n_channels:, :]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group / 2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size / 2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels * n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:, :self.n_early_size, :])
audio = audio[:, self.n_early_size:, :]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s) * audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1], 1)
output_audio.append(audio)
return torch.cat(output_audio, 1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1)
spect = spect.permute(0, 2, 1)
audio = torch.randn(spect.size(0),
self.n_remaining_channels,
spect.size(2), device=spect.device).to(spect.dtype)
audio = torch.autograd.Variable(sigma * audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) / 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:, :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
z = torch.randn(spect.size(0), self.n_early_size, spect.size(
2), device=spect.device).to(spect.dtype)
audio = torch.cat((sigma * z, audio), 1)
audio = audio.permute(
0, 2, 1).contiguous().view(
audio.size(0), -1).data
return audio
def infer_onnx(self, spect, z, sigma=0.9):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
length_spect_group = spect.size(2)//8
mel_dim = 80
batch_size = spect.size(0)
spect = spect.view((batch_size, mel_dim, length_spect_group, self.n_group))
spect = spect.permute(0, 2, 1, 3)
spect = spect.contiguous()
spect = spect.view((batch_size, length_spect_group, self.n_group*mel_dim))
spect = spect.permute(0, 2, 1)
spect = spect.contiguous()
audio = z[:, :self.n_remaining_channels, :]
z = z[:, self.n_remaining_channels:self.n_group, :]
audio = sigma*audio
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1) // 2)
audio_0 = audio[:, :n_half, :]
audio_1 = audio[:, n_half:(n_half+n_half), :]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:(n_half+n_half), :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b) / torch.exp(s)
audio = torch.cat([audio_0, audio_1], 1)
audio = self.convinv[k].infer(audio)
if k % self.n_early_every == 0 and k > 0:
audio = torch.cat((z[:, :self.n_early_size, :], audio), 1)
z = z[:, self.n_early_size:self.n_group, :]
audio = audio.permute(0,2,1).contiguous().view(batch_size, (length_spect_group * self.n_group))
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/waveglow/model.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
from common.layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
device = waveglow.upsample.weight.device
dtype = waveglow.upsample.weight.dtype
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).to(device)
if mode == 'zeros':
mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device)
elif mode == 'normal':
mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio)
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/waveglow/denoiser.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import torch
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output, clean_audio):
# clean_audio is unused;
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(
z * z) / (2 * self.sigma * self.sigma) - log_s_total - log_det_W_total # noqa: E501
meta = {}
return loss / (z.size(0) * z.size(1) * z.size(2)), meta
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/waveglow/loss_function.py |
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
def parse_waveglow_args(parent, add_help=False):
"""
Parse commandline arguments.
"""
parser = argparse.ArgumentParser(parents=[parent], add_help=add_help, allow_abbrev=False)
# misc parameters
parser.add_argument('--n-mel-channels', default=80, type=int,
help='Number of bins in mel-spectrograms')
# glow parameters
parser.add_argument('--flows', default=12, type=int,
help='Number of steps of flow')
parser.add_argument('--groups', default=8, type=int,
help='Number of samples in a group processed by the steps of flow')
parser.add_argument('--early-every', default=4, type=int,
help='Determines how often (i.e., after how many coupling layers) \
a number of channels (defined by --early-size parameter) are output\
to the loss function')
parser.add_argument('--early-size', default=2, type=int,
help='Number of channels output to the loss function')
parser.add_argument('--sigma', default=1.0, type=float,
help='Standard deviation used for sampling from Gaussian')
parser.add_argument('--segment-length', default=4000, type=int,
help='Segment length (audio samples) processed per iteration')
# wavenet parameters
wavenet = parser.add_argument_group('WaveNet parameters')
wavenet.add_argument('--wn-kernel-size', default=3, type=int,
help='Kernel size for dialted convolution in the affine coupling layer (WN)')
wavenet.add_argument('--wn-channels', default=512, type=int,
help='Number of channels in WN')
wavenet.add_argument('--wn-layers', default=8, type=int,
help='Number of layers in WN')
return parser
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/waveglow/arg_parser.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from collections import OrderedDict
from copy import copy
from pathlib import Path
import dllogger
import numpy as np
import torch.distributed as dist
import torch
from dllogger import StdOutBackend, JSONStreamBackend, Verbosity
from common import tb_dllogger
from common.tb_dllogger import (stdout_metric_format, stdout_step_format,
unique_log_fpath, TBLogger)
def init_logger(output_dir, log_file, ema_decay=0.0):
local_rank = 0 if not dist.is_initialized() else dist.get_rank()
print('logger init', local_rank)
if local_rank == 0:
Path(output_dir).mkdir(parents=False, exist_ok=True)
log_fpath = log_file or Path(output_dir, 'nvlog.json')
dllogger.init(backends=[
JSONStreamBackend(Verbosity.DEFAULT, unique_log_fpath(log_fpath)),
StdOutBackend(Verbosity.VERBOSE, step_format=stdout_step_format,
metric_format=stdout_metric_format)])
init_train_metadata()
else:
dllogger.init(backends=[])
tb_train = ['train']
tb_val = ['val']
tb_ema = [k + '_ema' for k in tb_val] if ema_decay > 0.0 else []
tb_dllogger.tb_loggers = {
s: TBLogger(enabled=(local_rank == 0), log_dir=output_dir, name=s)
for s in tb_train + tb_val + tb_ema}
def init_train_metadata():
dllogger.metadata("train_lrate_gen",
{"name": "g lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_lrate_discrim",
{"name": "d lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_avg_lrate_gen",
{"name": "avg g lr", "unit": None, "format": ":>3.2e"})
dllogger.metadata("train_avg_lrate_discrim",
{"name": "avg d lr", "unit": None, "format": ":>3.2e"})
for id_, pref in [('train', ''), ('train_avg', 'avg train '),
('val', ' avg val '), ('val_ema', ' EMA val ')]:
dllogger.metadata(f"{id_}_loss_gen",
{"name": f"{pref}g loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_loss_discrim",
{"name": f"{pref}d loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_loss_mel",
{"name": f"{pref}mel loss", "unit": None, "format": ":>6.3f"})
dllogger.metadata(f"{id_}_frames/s",
{"name": None, "unit": "frames/s", "format": ":>8.2f"})
dllogger.metadata(f"{id_}_took",
{"name": "took", "unit": "s", "format": ":>3.2f"})
def init_infer_metadata():
raise NotImplementedError
# modalities = [('latency', 's', ':>10.5f'), ('RTF', 'x', ':>10.2f'),
# ('frames/s', None, ':>10.2f'), ('samples/s', None, ':>10.2f'),
# ('letters/s', None, ':>10.2f')]
# for perc in ['', 'avg', '90%', '95%', '99%']:
# for model in ['fastpitch', 'waveglow', '']:
# for mod, unit, format in modalities:
# name = f'{perc} {model} {mod}'.strip().replace(' ', ' ')
# dllogger.metadata(
# name.replace(' ', '_'),
# {'name': f'{name: <26}', 'unit': unit, 'format': format})
class defaultdict(OrderedDict):
"""A simple, ordered defaultdict."""
def __init__(self, type_, *args, **kwargs):
self.type_ = type_
super().__init__(*args, **kwargs)
def __getitem__(self, key):
if key not in self:
self.__setitem__(key, self.type_())
return super().__getitem__(key)
def __copy__(self):
return defaultdict(self.type_, self)
class Metrics(dict):
def __init__(self, scopes=['train', 'train_avg'],
dll_keys=['loss_gen', 'loss_discrim', 'loss_mel',
'frames/s', 'took', 'lrate_gen', 'lrate_discrim'],
benchmark_epochs=0):
super().__init__()
self.dll_keys = dll_keys
self.metrics = {scope: defaultdict(float) for scope in scopes}
self.metric_counts = {scope: defaultdict(int) for scope in scopes}
self.start_time = {scope: None for scope in scopes}
self.benchmark_epochs = benchmark_epochs
if benchmark_epochs > 0:
self.metrics['train_benchmark'] = defaultdict(list)
def __setitem__(self, key, val):
extract = lambda t: t.item() if type(t) is torch.Tensor else t
if type(val) is dict:
for k, v in val.items():
super().__setitem__(k, extract(v))
else:
super().__setitem__(key, extract(val))
def __getitem__(self, key):
if key not in self:
self.__setitem__(key, 0.0)
return super().__getitem__(key)
def start_accumulating(self, step, start_timer=True, scope='train'):
del step # unused
self.clear()
self.metrics[scope].clear()
self.metric_counts[scope].clear()
if start_timer:
self.start_time[scope] = time.time()
def accumulate(self, scopes=['train', 'train_avg']):
for scope in scopes:
for k, v in self.items():
self.metrics[scope][k] += v
self.metric_counts[scope][k] += 1
self.clear()
def finish_accumulating(self, stop_timer=True, scope='train'):
metr = self.metrics[scope]
counts = self.metric_counts[scope]
for k, v in metr.items():
metr[k] = v / counts[k]
if stop_timer:
took = time.time() - self.start_time[scope]
if 'frames' in metr:
metr['frames/s'] = metr.pop('frames') * counts['frames'] / took
metr['took'] = took
def start_iter(self, iter, start_timer=True):
self.start_accumulating(iter, start_timer, 'train')
def start_epoch(self, epoch, start_timer=True):
self.start_accumulating(epoch, start_timer, 'train_avg')
def start_val(self, start_timer=True):
self.start_accumulating(None, start_timer, 'val')
def finish_iter(self, stop_timer=True):
self.finish_accumulating(stop_timer, 'train')
def finish_epoch(self, stop_timer=True):
self.finish_accumulating(stop_timer, 'train_avg')
metr = self.metrics['train_benchmark']
for k in ('took', 'frames/s', 'loss_gen', 'loss_discrim', 'loss_mel'):
metr[k].append(self.metrics['train_avg'][k])
if len(metr[k]) > self.benchmark_epochs:
metr[k].pop(0)
def finish_val(self, stop_timer=True):
self.finish_accumulating(stop_timer, 'val')
def get_metrics(self, scope='train', target='dll'):
if scope == 'train_benchmark':
metr = self.metrics[scope]
ret = {'train_' + k: np.mean(v) for k, v in metr.items()}
ret['benchmark_epochs_num'] = len(list(metr.values())[0])
return ret
ret = copy(self.metrics[scope])
if scope == 'train':
ret.update(self)
if target == 'dll':
ret = {f'{scope}_{k}': v
for k, v in ret.items() if k in self.dll_keys}
elif target == 'tb':
# Rename keys so they would group nicely inside TensorBoard
def split_key(k):
pos = k.rfind('_')
return k[:pos] + '/' + k[pos+1:] if pos >= 0 else k
ret = {split_key(k): v for k, v in ret.items()}
return ret
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/logging.py |
import timer
from collections import defaultdict
class Metrics(defaultdict):
# TODO Where to measure - gpu:0 or all gpus?
def __init__(self, tb_keys=[], benchmark_epochs=10):
super().__init__(float)
# dll_tb_keys=['loss_gen', 'loss_discrim', 'loss_mel', 'took']:
self.tb_keys = tb_keys #_ = {'dll': dll_keys, 'tb': tb_keys, 'dll+tb': dll_tb_keys}
self.iter_start_time = None
self.iter_metrics = defaultdict(float)
self.epoch_start_time = None
self.epoch_metrics = defaultdict(float)
self.benchmark_epochs = benchmark_epochs
def start_epoch(self, epoch, start_timer=True):
self.epoch = epoch
if start_timer:
self.epoch_start_time = time.time()
def start_iter(self, iter, start_timer=True):
self.iter = iter
self.accum_steps = 0
self.step_metrics.clear()
if start_timer:
self.iter_start_time = time.time()
def update_iter(self, ...):
# do stuff
pass
def accumulate(self, scope='step'):
tgt = {'step': self.step_metrics, 'epoch': self.epoch_metrics}[scope]
for k, v in self.items():
tgt[k] += v
self.clear()
def update_iter(self, metrics={}, stop_timer=True):
is not self.started_iter:
return
self.accumulate(metrics)
self.accumulate(self.iter_metrics, scope='epoch')
if stop_timer:
self.iter_metrics['took'] = time.time() - self.iter_time_start
def update_epoch(self, stop_timer=True):
# tb_total_steps=None,
# subset='train_avg',
# data=OrderedDict([
# ('loss', epoch_loss[-1]),
# ('mel_loss', epoch_mel_loss[-1]),
# ('frames/s', epoch_num_frames[-1] / epoch_time[-1]),
# ('took', epoch_time[-1])]),
# )
if stop_timer:
self.['epoch_time'] = time.time() - self.epoch_time_start
if steps % args.stdout_interval == 0:
# with torch.no_grad():
# mel_error = F.l1_loss(y_mel, y_g_hat_mel).item()
took = time.time() - self.start_b
self.sws['train'].add_scalar("gen_loss_total", loss_gen_all.item(), steps)
self.sws['train'].add_scalar("mel_spec_error", mel_error.item(), steps)
for key, val in meta.items():
sw_name = 'train'
for name_ in keys_mpd + keys_msd:
if name_ in key:
sw_name = 'train_' + name_
key = key.replace('loss_', 'loss/')
key = re.sub('mpd\d+', 'mpd-msd', key)
key = re.sub('msd\d+', 'mpd-msd', key)
self.sws[sw_name].add_scalar(key, val / h.batch_size, steps)
def iter_metrics(self, target='dll+tb'):
return {self.iter_metrics[k] for k in self.keys_[target]}
def foo
Steps : 40, Gen Loss Total : 57.993, Mel-Spec. Error : 47.374, s/b : 1.013
logger.log((epoch, epoch_iter, num_iters),
tb_total_steps=total_iter,
subset='train',
data=OrderedDict([
('loss', iter_loss),
('mel_loss', iter_mel_loss),
('frames/s', iter_num_frames / iter_time),
('took', iter_time),
('lrate', optimizer.param_groups[0]['lr'])]),
)
class Meter:
def __init__(self, sink_type, scope, downstream=None, end_points=None, verbosity=dllogger.Verbosity.DEFAULT):
self.verbosity = verbosity
self.sink_type = sink_type
self.scope = scope
self.downstream = downstream
self.end_points = end_points or []
def start(self):
ds = None if self.downstream is None else self.downstream.sink
end_pt_fn = lambda x: list(map(lambda f: f(x), self.end_points)) # call all endpoint functions
self.sink = self.sink_type(end_pt_fn, ds)
def end(self):
self.sink.close()
def send(self, data):
self.sink.send(data)
def meters(self):
if self.downstream is not None:
downstream_meters = self.downstream.meters()
else:
downstream_meters = []
return [self] + downstream_meters
def add_end_point(self, new_endpoint):
self.end_points.append(new_endpoint)
def __or__(self, other):
"""for easy chaining of meters"""
if self.downstream is None:
self.downstream = other
else:
self.downstream | other
return self
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/metrics.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# ResBlock1, ResBlock2, Generator, DiscriminatorP, DiscriminatorS, MultiScaleDiscriminator,
# MultiPeriodDiscriminator, feature_loss, discriminator_loss, generator_loss,
# init_weights, get_padding
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d
from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm
from common.stft import STFT
from common.utils import AttrDict, init_weights, get_padding
LRELU_SLOPE = 0.1
class NoAMPConv1d(Conv1d):
def __init__(self, *args, no_amp=False, **kwargs):
super().__init__(*args, **kwargs)
self.no_amp = no_amp
def _cast(self, x, dtype):
if isinstance(x, (list, tuple)):
return [self._cast(t, dtype) for t in x]
else:
return x.to(dtype)
def forward(self, *args):
if not self.no_amp:
return super().forward(*args)
with torch.cuda.amp.autocast(enabled=False):
return self._cast(
super().forward(*self._cast(args, torch.float)), args[0].dtype)
class ResBlock1(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3, 5)):
super().__init__()
self.conf = conf
self.lrelu_slope = LRELU_SLOPE
ch, ks = channels, kernel_size
self.convs1 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[1]), dilation[1])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, dilation[2]), dilation[2])),
])
self.convs2 = nn.Sequential(*[
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(ks, 1))),
])
self.convs1.apply(init_weights)
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c1(xt)
xt = F.leaky_relu(xt, self.lrelu_slope)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(nn.Module):
__constants__ = ['lrelu_slope']
def __init__(self, conf, channels, kernel_size=3, dilation=(1, 3)):
super().__init__()
self.conf = conf
ch, ks = channels, kernel_size
self.convs = nn.ModuleList([
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[0]), dilation[0])),
weight_norm(Conv1d(ch, ch, ks, 1, get_padding(kernel_size, dilation[1]), dilation[1])),
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, self.lrelu_slope)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(nn.Module):
__constants__ = ['lrelu_slope', 'num_kernels', 'num_upsamples']
def __init__(self, conf):
super().__init__()
conf = AttrDict(conf)
self.conf = conf
self.num_kernels = len(conf.resblock_kernel_sizes)
self.num_upsamples = len(conf.upsample_rates)
self.conv_pre = weight_norm(
Conv1d(80, conf.upsample_initial_channel, 7, 1, padding=3))
self.lrelu_slope = LRELU_SLOPE
resblock = ResBlock1 if conf.resblock == '1' else ResBlock2
self.ups = []
for i, (u, k) in enumerate(zip(conf.upsample_rates,
conf.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(conf.upsample_initial_channel // (2 ** i),
conf.upsample_initial_channel // (2 ** (i + 1)),
k, u, padding=(k-u)//2)))
self.ups = nn.Sequential(*self.ups)
self.resblocks = []
for i in range(len(self.ups)):
resblock_list = []
ch = conf.upsample_initial_channel // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(conf.resblock_kernel_sizes,
conf.resblock_dilation_sizes)):
resblock_list.append(resblock(conf, ch, k, d))
resblock_list = nn.Sequential(*resblock_list)
self.resblocks.append(resblock_list)
self.resblocks = nn.Sequential(*self.resblocks)
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def load_state_dict(self, state_dict, strict=True):
# Fallback for old checkpoints (pre-ONNX fix)
new_sd = {}
for k, v in state_dict.items():
new_k = k
if 'resblocks' in k:
parts = k.split(".")
# only do this is the checkpoint type is older
if len(parts) == 5:
layer = int(parts[1])
new_layer = f"{layer//3}.{layer%3}"
new_k = f"resblocks.{new_layer}.{'.'.join(parts[2:])}"
new_sd[new_k] = v
# Fix for conv1d/conv2d/NHWC
curr_sd = self.state_dict()
for key in new_sd:
len_diff = len(new_sd[key].size()) - len(curr_sd[key].size())
if len_diff == -1:
new_sd[key] = new_sd[key].unsqueeze(-1)
elif len_diff == 1:
new_sd[key] = new_sd[key].squeeze(-1)
super().load_state_dict(new_sd, strict=strict)
def forward(self, x):
x = self.conv_pre(x)
for upsample_layer, resblock_group in zip(self.ups, self.resblocks):
x = F.leaky_relu(x, self.lrelu_slope)
x = upsample_layer(x)
xs = 0
for resblock in resblock_group:
xs += resblock(x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('HiFi-GAN: Removing weight norm.')
for l in self.ups:
remove_weight_norm(l)
for group in self.resblocks:
for block in group:
block.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class Denoiser(nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros', **infer_kw):
super().__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
for name, p in hifigan.named_parameters():
if name.endswith('.weight'):
dtype = p.dtype
device = p.device
break
mel_init = {'zeros': torch.zeros, 'normal': torch.randn}[mode]
mel_input = mel_init((1, 80, 88), dtype=dtype, device=device)
with torch.no_grad():
bias_audio = hifigan(mel_input, **infer_kw).float()
if len(bias_audio.size()) > 2:
bias_audio = bias_audio.squeeze(0)
elif len(bias_audio.size()) < 2:
bias_audio = bias_audio.unsqueeze(0)
assert len(bias_audio.size()) == 2
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
class DiscriminatorP(nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super().__init__()
self.period = period
norm_f = spectral_norm if use_spectral_norm else weight_norm
ks = kernel_size
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (ks, 1), (stride, 1), (get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (ks, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(nn.Module):
def __init__(self, periods, concat_fwd=False):
super().__init__()
layers = [DiscriminatorP(p) for p in periods]
self.discriminators = nn.ModuleList(layers)
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
y_ds, fmaps = d(concat_discr_input(y, y_hat))
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(nn.Module):
def __init__(self, use_spectral_norm=False, no_amp_grouped_conv=False):
super().__init__()
norm_f = spectral_norm if use_spectral_norm else weight_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(NoAMPConv1d(128, 256, 41, 2, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(256, 512, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(512, 1024, 41, 4, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(NoAMPConv1d(1024, 1024, 41, 1, groups=16, padding=20, no_amp=no_amp_grouped_conv)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
# x = l(x.unsqueeze(-1)).squeeze(-1)
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(nn.Module):
def __init__(self, no_amp_grouped_conv=False, concat_fwd=False):
super().__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
DiscriminatorS(no_amp_grouped_conv=no_amp_grouped_conv),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=1),
AvgPool1d(4, 2, padding=1)
])
self.concat_fwd = concat_fwd
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if self.concat_fwd:
ys = concat_discr_input(y, y_hat)
if i != 0:
ys = self.meanpools[i-1](ys)
y_ds, fmaps = d(ys)
y_d_r, y_d_g, fmap_r, fmap_g = split_discr_output(y_ds, fmaps)
else:
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def concat_discr_input(y, y_hat):
return torch.cat((y, y_hat), dim=0)
def split_discr_output(y_ds, fmaps):
y_d_r, y_d_g = torch.chunk(y_ds, 2, dim=0)
fmap_r, fmap_g = zip(*(torch.chunk(f, 2, dim=0) for f in fmaps))
return y_d_r, y_d_g, fmap_r, fmap_g
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/models.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d, ConvTranspose2d, AvgPool2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from common.utils import init_weights, get_padding, print_once
LRELU_SLOPE = 0.1
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[0], 1),
padding=(get_padding(kernel_size, dilation[0]), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[1], 1),
padding=(get_padding(kernel_size, dilation[1]), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=(dilation[2], 1),
padding=(get_padding(kernel_size, dilation[2]), 0)))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0))),
weight_norm(Conv2d(channels, channels, (kernel_size, 1), 1, dilation=1,
padding=(get_padding(kernel_size, 1), 0)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv2d(80, h.upsample_initial_channel, (7,1), (1,1), padding=(3,0)))
assert h.resblock == '1', 'Only ResBlock1 currently supported for NHWC'
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
# ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
# k, u, padding=(k-u)//2)))
ConvTranspose2d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
(k, 1), (u, 1), padding=((k-u)//2, 0))))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv2d(ch, 1, (7,1), (1,1), padding=(3,0)))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = x.unsqueeze(-1).to(memory_format=torch.channels_last)
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
# x = self.ups[i](x.unsqueeze(-1)).squeeze(-1)
x = self.ups[i](x)
xs = 0
for j in range(self.num_kernels):
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
x = x.squeeze(-1)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t, unit = x.shape
assert unit == 1
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, 0, 0, n_pad), "reflect")
t = t + n_pad
# print_once('x pre channels last:', x.is_contiguous(memory_format=torch.channels_last))
x = x.view(b, c, t // self.period, self.period)
# print_once('x post channels last:', x.is_contiguous(memory_format=torch.channels_last))
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class DiscriminatorPConv1d(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorPConv1d, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0), dilation=(period, 1))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0), dilation=(period, 1))),
])
# self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1, dilation=period))
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0), dilation=(period, 1)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t, unit = x.shape
assert unit == 1
# if t % self.period != 0: # pad first
# n_pad = self.period - (t % self.period)
# x = F.pad(x, (0, n_pad), "reflect")
# t = t + n_pad
# x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
def share_params_of(self, dp):
assert len(self.convs) == len(dp.convs)
for c1, c2 in zip(self.convs, dp.convs):
c1.weight = c2.weight
c1.bias = c2.bias
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, periods, use_conv1d=False, shared=False):
super(MultiPeriodDiscriminator, self).__init__()
print('MPD PERIODS:', periods)
if use_conv1d:
print('Constructing dilated MPD')
layers = [DiscriminatorPConv1d(p) for p in periods]
else:
layers = [DiscriminatorP(p) for p in periods]
if shared:
print('MPD HAS SHARED PARAMS')
for l in layers[1:]:
l.share_params_of(layers[0])
self.discriminators = nn.ModuleList(layers)
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False, amp_groups=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
# self.convs = nn.ModuleList([
# norm_f(Conv1d(1, 128, 15, 1, padding=7)),
# norm_f(Conv1d(128, 128, 41, 2, groups=1 if amp_groups else 4, padding=20)), # was: groups=4
# norm_f(Conv1d(128, 256, 41, 2, groups=1 if amp_groups else 16, padding=20)), # was: groups=16
# norm_f(Conv1d(256, 512, 41, 4, groups=1 if amp_groups else 16, padding=20)), # was: groups=16
# norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
# norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
# norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
# ])
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 128, (15,1), (1,1), padding=(7 , 0))),
norm_f(Conv2d(128, 128, (41,1), (2,1), groups=1 if amp_groups else 4, padding=(20, 0))), # was: groups=4
norm_f(Conv2d(128, 256, (41,1), (2,1), groups=1 if amp_groups else 16, padding=(20, 0))), # was: groups=16
norm_f(Conv2d(256, 512, (41,1), (4,1), groups=1 if amp_groups else 16, padding=(20, 0))), # was: groups=16
norm_f(Conv2d(512, 1024, (41,1), (4,1), groups=16 , padding=(20, 0))),
norm_f(Conv2d(1024, 1024, (41,1), (1,1), groups=16 , padding=(20, 0))),
norm_f(Conv2d(1024, 1024, ( 5,1), (1,1), padding=(2 , 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3,1), (1,1), padding=(1,0)))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
# x = x.squeeze(-1)
# x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self, amp_groups=False):
super(MultiScaleDiscriminator, self).__init__()
if amp_groups:
print('MSD: AMP groups')
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, amp_groups=amp_groups),
DiscriminatorS(amp_groups=amp_groups),
DiscriminatorS(amp_groups=amp_groups),
])
self.meanpools = nn.ModuleList([
AvgPool2d((4, 1), (2, 1), padding=(1, 0)),
AvgPool2d((4, 1), (2, 1), padding=(1, 0))
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g, keys=[]):
loss = 0
meta = {}
assert len(keys) == len(fmap_r)
for key, dr, dg in zip(keys, fmap_r, fmap_g):
k = 'loss_gen_feat_' + key
meta[k] = 0
for rl, gl in zip(dr, dg):
# loss += torch.mean(torch.abs(rl - gl))
diff = torch.mean(torch.abs(rl - gl))
loss += diff
meta[k] += diff.item()
return loss*2, meta
def discriminator_loss(disc_real_outputs, disc_generated_outputs, keys=[]):
loss = 0
r_losses = []
g_losses = []
meta = {}
assert len(keys) == len(disc_real_outputs)
for key, dr, dg in zip(keys, disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
meta['loss_disc_real_' + key] = r_loss.item()
meta['loss_disc_gen_' + key] = g_loss.item()
return loss, r_losses, g_losses, meta
def generator_loss(disc_outputs, keys=[]):
loss = 0
gen_losses = []
meta = {}
assert len(keys) == len(disc_outputs)
for key, dg in zip(keys, disc_outputs):
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
meta['loss_gen_' + key] = l.item()
return loss, gen_losses, meta
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/models_ch_last_.py |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2020 Jungil Kong
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# The following functions/classes were based on code from https://github.com/jik876/hifi-gan:
# mel_spectrogram, MelDataset
import math
import os
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from librosa.util import normalize
from numpy import random
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from common.audio_processing import dynamic_range_compression
from common.utils import load_filepaths_and_text, load_wav
MAX_WAV_VALUE = 32768.0
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size,
fmin, fmax, center=False):
if torch.min(y) < -1.:
print('min value is ', torch.min(y))
if torch.max(y) > 1.:
print('max value is ', torch.max(y))
global mel_basis, hann_window
fmax_key = f'{fmax}_{y.device}'
if fmax_key not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[fmax_key] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
pad = int((n_fft-hop_size)/2)
y = F.pad(y.unsqueeze(1), (pad, pad), mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size,
window=hann_window[str(y.device)], center=center,
pad_mode='reflect', normalized=False, onesided=True,
return_complex=True)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9))
spec = torch.matmul(mel_basis[str(fmax)+'_'+str(y.device)], spec)
spec = dynamic_range_compression(spec) # spectral normalize
return spec
class MelDataset(torch.utils.data.Dataset):
def __init__(self, training_files, segment_size, n_fft, num_mels,
hop_size, win_size, sampling_rate, fmin, fmax, split=True,
device=None, fmax_loss=None, fine_tuning=False,
base_mels_path=None, repeat=1, deterministic=False,
max_wav_value=MAX_WAV_VALUE):
self.audio_files = training_files
self.segment_size = segment_size
self.sampling_rate = sampling_rate
self.split = split
self.n_fft = n_fft
self.num_mels = num_mels
self.hop_size = hop_size
self.win_size = win_size
self.fmin = fmin
self.fmax = fmax
self.fmax_loss = fmax_loss
self.max_wav_value = max_wav_value
self.fine_tuning = fine_tuning
self.base_mels_path = base_mels_path
self.repeat = repeat
self.deterministic = deterministic
self.rng = random.default_rng()
def __getitem__(self, index):
if index >= len(self):
raise IndexError('Dataset index out of range')
rng = random.default_rng(index) if self.deterministic else self.rng
index = index % len(self.audio_files) # collapse **after** setting seed
filename = self.audio_files[index]
audio, sampling_rate = load_wav(filename)
audio = audio / self.max_wav_value
if not self.fine_tuning:
audio = normalize(audio) * 0.95
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
audio = audio.unsqueeze(0)
if not self.fine_tuning:
if self.split:
if audio.size(1) >= self.segment_size:
max_audio_start = audio.size(1) - self.segment_size
audio_start = rng.integers(0, max_audio_start)
audio = audio[:, audio_start:audio_start+self.segment_size]
else:
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax,
center=False)
else:
mel = np.load(
os.path.join(self.base_mels_path,
os.path.splitext(os.path.split(filename)[-1])[0] + '.npy'))
mel = torch.from_numpy(mel).float()
if len(mel.shape) < 3:
mel = mel.unsqueeze(0)
if self.split:
frames_per_seg = math.ceil(self.segment_size / self.hop_size)
if audio.size(1) >= self.segment_size:
mel_start = rng.integers(0, mel.size(2) - frames_per_seg - 1)
mel = mel[:, :, mel_start:mel_start + frames_per_seg]
a = mel_start * self.hop_size
b = (mel_start + frames_per_seg) * self.hop_size
audio = audio[:, a:b]
else:
mel = F.pad(mel, (0, frames_per_seg - mel.size(2)))
audio = F.pad(audio, (0, self.segment_size - audio.size(1)))
mel_loss = mel_spectrogram(audio, self.n_fft, self.num_mels,
self.sampling_rate, self.hop_size,
self.win_size, self.fmin, self.fmax_loss,
center=False)
return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze())
def __len__(self):
return len(self.audio_files) * self.repeat
def get_data_loader(args, distributed_run, train=True, batch_size=None,
val_kwargs=None):
filelists = args.training_files if train else args.validation_files
files = load_filepaths_and_text(args.dataset_path, filelists)
files = list(zip(*files))[0]
dataset_kw = {
'segment_size': args.segment_size,
'n_fft': args.filter_length,
'num_mels': args.num_mels,
'hop_size': args.hop_length,
'win_size': args.win_length,
'sampling_rate': args.sampling_rate,
'fmin': args.mel_fmin,
'fmax': args.mel_fmax,
'fmax_loss': args.mel_fmax_loss,
'max_wav_value': args.max_wav_value,
'fine_tuning': args.fine_tuning,
'base_mels_path': args.input_mels_dir,
'deterministic': not train
}
if train:
dataset = MelDataset(files, **dataset_kw)
sampler = DistributedSampler(dataset) if distributed_run else None
else:
dataset_kw.update(val_kwargs or {})
dataset = MelDataset(files, **dataset_kw)
sampler = (DistributedSampler(dataset, shuffle=False)
if distributed_run else None)
loader = DataLoader(dataset,
# NOTE On DGX-1 and DGX A100 =1 is optimal
num_workers=args.num_workers if train else 1,
shuffle=(train and not distributed_run),
sampler=sampler,
batch_size=batch_size or args.batch_size,
pin_memory=True,
persistent_workers=True,
drop_last=train)
return loader
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/data_function.py |
import torch
from .stft import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with hifigan """
def __init__(self, hifigan, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=hifigan.ups[0].weight.dtype,
device=hifigan.ups[0].weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=hifigan.upsample.weight.dtype,
device=hifigan.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = hifigan(mel_input).float()[0]
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
| DeepLearningExamples-master | PyTorch/SpeechSynthesis/FastPitch/hifigan/denoiser.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.