python_code
stringlengths 0
229k
|
---|
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel
class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE = 2
DEFAULT_EVAL_BSIZE = 1
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="hf_Longformer", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
|
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
|
"""
HuggingFace Stable Diffusion model.
It requires users to specify "HUGGINGFACE_AUTH_TOKEN" in environment variable
to authorize login and agree HuggingFace terms and conditions.
"""
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceAuthMixin
import torch
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
class Model(BenchmarkModel, HuggingFaceAuthMixin):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# Skip deepcopy because it will oom on A100 40GB
DEEPCOPY = False
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, test, device, batch_size=None, extra_args=[]):
HuggingFaceAuthMixin.__init__(self)
super().__init__(test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
model_id = "stabilityai/stable-diffusion-2"
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
self.pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler)
self.example_inputs = "a photo of an astronaut riding a horse on mars"
self.pipe.to(self.device)
def enable_fp16_half(self):
pass
def get_module(self):
random_input = torch.randn(1, 4, 128, 128).to(self.device)
timestep = torch.tensor([1.0]).to(self.device)
encoder_hidden_states = torch.randn(1, 1, 1024).to(self.device)
return self.pipe.unet, [random_input, timestep, encoder_hidden_states]
def train(self):
raise NotImplementedError("Train test is not implemented for the stable diffusion model.")
def eval(self):
image = self.pipe(self.example_inputs)
return (image, )
|
from torchbenchmark.util.framework.diffusers import install_diffusers
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceAuthMixin
import torch
import os
import warnings
MODEL_NAME = "stabilityai/stable-diffusion-2"
def load_model_checkpoint():
from diffusers import StableDiffusionPipeline
StableDiffusionPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None)
if __name__ == "__main__":
if not 'HUGGING_FACE_HUB_TOKEN' in os.environ:
warnings.warn("Make sure to set `HUGGINGFACE_HUB_TOKEN` so you can download weights")
else:
install_diffusers()
load_model_checkpoint()
|
import os
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.detectron2.model_factory import Detectron2Model
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
class Model(Detectron2Model):
task = COMPUTER_VISION.DETECTION
model_file = os.path.join(MODEL_DIR, ".data", f"{MODEL_NAME}.pkl")
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(variant="COCO-Detection/faster_rcnn_R_101_C4_3x.yaml", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
|
import os
from torchbenchmark.util.framework.detectron2 import install_detectron2
MODEL_NAME = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
MODEL_DIR = os.path.abspath(os.path.dirname(__file__))
if __name__ == '__main__':
install_detectron2(MODEL_NAME, MODEL_DIR)
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
|
import os
import time
import argparse
import math
from numpy import finfo
import torch
from .distributed import apply_gradient_allreduce
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import DataLoader
from .model import Tacotron2
from .data_utils import TextMelLoader, TextMelCollate
from .loss_function import Tacotron2Loss
# from hparams import create_hparams
def reduce_tensor(tensor, n_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= n_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
if hparams.distributed_run:
train_sampler = DistributedSampler(trainset)
shuffle = False
else:
train_sampler = None
shuffle = True
train_loader = DataLoader(trainset, num_workers=0, shuffle=shuffle,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
# logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
logger = None
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model.decoder.attention_layer.score_mask_value = finfo('float16').min
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
return model
def warm_start_model(checkpoint_path, model, ignore_layers):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_dict = checkpoint_dict['state_dict']
if len(ignore_layers) > 0:
model_dict = {k: v for k, v in model_dict.items()
if k not in ignore_layers}
dummy_dict = model.state_dict()
dummy_dict.update(model_dict)
model_dict = dummy_dict
model.load_state_dict(model_dict)
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
for i, batch in enumerate(val_loader):
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if distributed_run:
reduced_val_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_val_loss = loss.item()
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
if rank == 0:
print("Validation loss {}: {:9f} ".format(iteration, val_loss))
logger.log_validation(val_loss, model, y, y_pred, iteration)
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
from apex import amp
model, optimizer = amp.initialize(
model, optimizer, opt_level='O2')
if hparams.distributed_run:
model = apply_gradient_allreduce(model)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(
checkpoint_path, model, hparams.ignore_layers)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
is_overflow = False
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
x, y = model.parse_batch(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
if hparams.distributed_run:
reduced_loss = reduce_tensor(loss.data, n_gpus).item()
else:
reduced_loss = loss.item()
if hparams.fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if hparams.fp16_run:
grad_norm = torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), hparams.grad_clip_thresh)
is_overflow = math.isnan(grad_norm)
else:
grad_norm = torch.nn.utils.clip_grad_norm_(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
if not is_overflow and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not is_overflow and (iteration % hparams.iters_per_checkpoint == 0):
validate(model, criterion, valset, iteration,
hparams.batch_size, n_gpus, collate_fn, logger,
hparams.distributed_run, rank)
if rank == 0:
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load model weights only, ignore specified layers')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
|
import tensorflow as tf
from text import symbols
def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=500,
iters_per_checkpoint=1000,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="tcp://localhost:54321",
cudnn_enabled=True,
cudnn_benchmark=False,
ignore_layers=['embedding.weight'],
################################
# Data Parameters #
################################
load_mel_from_disk=False,
training_files='filelists/ljs_audio_text_train_filelist.txt',
validation_files='filelists/ljs_audio_text_val_filelist.txt',
text_cleaners=['english_cleaners'],
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
sampling_rate=22050,
filter_length=1024,
hop_length=256,
win_length=1024,
n_mel_channels=80,
mel_fmin=0.0,
mel_fmax=8000.0,
################################
# Model Parameters #
################################
n_symbols=len(symbols),
symbols_embedding_dim=512,
# Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
# Decoder parameters
n_frames_per_step=1, # currently only 1 is supported
decoder_rnn_dim=1024,
prenet_dim=256,
max_decoder_steps=1000,
gate_threshold=0.5,
p_attention_dropout=0.1,
p_decoder_dropout=0.1,
# Attention parameters
attention_rnn_dim=1024,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=31,
# Mel-post processing network parameters
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
weight_decay=1e-6,
grad_clip_thresh=1.0,
batch_size=64,
mask_padding=True # set model's padded outputs to padded values
)
if hparams_string:
tf.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.logging.info('Final parsed hparams: %s', hparams.values())
return hparams
|
from .train_tacotron2 import load_model, prepare_dataloaders
import torch
from .loss_function import Tacotron2Loss
from argparse import Namespace
from .text import symbols
from pathlib import Path
from ...util.model import BenchmarkModel
from typing import Tuple
from contextlib import nullcontext
from torchbenchmark.tasks import SPEECH
class Model(BenchmarkModel):
task = SPEECH.SYNTHESIS
# Training batch size comes from the source code:
# Source: https://github.com/NVIDIA/tacotron2/blob/bb6761349354ee914909a42208e4820929612069/hparams.py#L84
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
# Tacotron2 CUDA inference test uses amp precision
DEFAULT_EVAL_CUDA_PRECISION = "amp"
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
if device == 'cpu':
# TODO - currently load_model assumes cuda
raise NotImplementedError("Tacotron2 doesn't support CPU because load_model assumes CUDA.")
self.hparams = self.create_hparams(batch_size=self.batch_size)
self.model = load_model(self.hparams).to(device=device)
self.optimizer = torch.optim.Adam(self.model.parameters(),
lr=self.hparams.learning_rate,
weight_decay=self.hparams.weight_decay)
self.criterion = Tacotron2Loss().to(device=device)
loader, valset, collate_fn = prepare_dataloaders(self.hparams)
self.example_inputs, self.target = self.model.parse_batch(next(iter(loader)), device=self.device)
self.amp_context = nullcontext
# Parameters were obtained from the source code.
# Source: https://github.com/NVIDIA/tacotron2/blob/bb6761349354ee914909a42208e4820929612069/hparams.py#L5
def create_hparams(hparams_string=None, verbose=False, batch_size=64):
"""Create model hyperparameters. Parse nondefault from given string."""
root = str(Path(__file__).parent.parent.parent)
hparams = Namespace(**{
################################
# Experiment Parameters #
################################
'epochs': 2, # Reduced in TorchBench to shorten number of train iterations.
'iters_per_checkpoint': 1000,
'dynamic_loss_scaling': True,
'fp16_run': False,
'distributed_run': False,
'dist_backend': "nccl",
'dist_url': "tcp://localhost:54321",
'cudnn_enabled': True,
'cudnn_benchmark': False,
'ignore_layers': ['embedding.weight'],
################################
# Data Parameters #
################################
'load_mel_from_disk': False,
'training_files': f'{root}/data/.data/tacotron2-minimal/filelists/ljs_audio_text_train_filelist.txt',
'validation_files': f'{root}/data/.data/tacotron2-minimal/filelists/ljs_audio_text_val_filelist.txt',
'text_cleaners': ['english_cleaners'],
################################
# Audio Parameters #
################################
'max_wav_value': 32768.0,
'sampling_rate': 22050,
'filter_length': 1024,
'hop_length': 256,
'win_length': 1024,
'n_mel_channels': 80,
'mel_fmin': 0.0,
'mel_fmax': 8000.0,
################################
# Model Parameters #
################################
'n_symbols': len(symbols),
'symbols_embedding_dim': 512,
# Encoder parameters
'encoder_kernel_size': 5,
'encoder_n_convolutions': 3,
'encoder_embedding_dim': 512,
# Decoder parameters
'n_frames_per_step': 1, # currently only 1 is supported
'decoder_rnn_dim': 1024,
'prenet_dim': 256,
'max_decoder_steps': 1000,
'gate_threshold': 0.5,
'p_attention_dropout': 0.1,
'p_decoder_dropout': 0.1,
# Attention parameters
'attention_rnn_dim': 1024,
'attention_dim': 128,
# Location Layer parameters
'attention_location_n_filters': 32,
'attention_location_kernel_size': 31,
# Mel-post processing network parameters
'postnet_embedding_dim': 512,
'postnet_kernel_size': 5,
'postnet_n_convolutions': 5,
################################
# Optimization Hyperparameters #
################################
'use_saved_learning_rate': False,
'learning_rate': 1e-3,
'weight_decay': 1e-6,
'grad_clip_thresh': 1.0,
'batch_size': batch_size,
'mask_padding': True # set model's padded outputs to padded values
})
return hparams
def get_module(self):
return self.model, (self.example_inputs,)
def train(self):
self.model.train()
self.model.zero_grad()
y_pred = self.model(self.example_inputs)
loss = self.criterion(y_pred, self.target)
loss.backward()
self.optimizer.step()
def eval(self) -> Tuple[torch.Tensor]:
self.model.eval()
with self.amp_context():
out = self.model(self.example_inputs)
return out
|
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
|
import random
import torch
from torch.utils.tensorboard import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir):
super(Tacotron2Logger, self).__init__(logdir)
def log_training(self, reduced_loss, grad_norm, learning_rate, duration,
iteration):
self.add_scalar("training.loss", reduced_loss, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
def log_validation(self, reduced_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
_, mel_outputs, gate_outputs, alignments = y_pred
mel_targets, gate_targets = y
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration, dataformats='HWC')
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration, dataformats='HWC')
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
torch.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration, dataformats='HWC')
|
from math import sqrt
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from .layers import ConvNorm, LinearNorm
from .tacotron2_utils import to_gpu, get_mask_from_lengths
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(torch.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = F.dropout(torch.tanh(self.convolutions[i](x)), 0.5, self.training)
x = F.dropout(self.convolutions[-1](x), 0.5, self.training)
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = F.dropout(F.relu(conv(x)), 0.5, self.training)
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.p_attention_dropout = hparams.p_attention_dropout
self.p_decoder_dropout = hparams.p_decoder_dropout
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, hparams.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
hparams.attention_rnn_dim + hparams.encoder_embedding_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim,
hparams.n_mel_channels * hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((decoder_input, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
self.attention_hidden = F.dropout(
self.attention_hidden, self.p_attention_dropout, self.training)
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
decoder_input = torch.cat(
(self.attention_hidden, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
self.decoder_hidden = F.dropout(
self.decoder_hidden, self.p_decoder_dropout, self.training)
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory).unsqueeze(0)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
decoder_inputs = torch.cat((decoder_input, decoder_inputs), dim=0)
decoder_inputs = self.prenet(decoder_inputs)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0) - 1:
decoder_input = decoder_inputs[len(mel_outputs)]
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output.squeeze(1)]
alignments += [attention_weights]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_outputs, gate_outputs, alignments = [], [], []
while True:
decoder_input = self.prenet(decoder_input)
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output.squeeze(1)]
gate_outputs += [gate_output]
alignments += [alignment]
if torch.sigmoid(gate_output.data) > self.gate_threshold:
break
elif len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
std = sqrt(2.0 / (hparams.n_symbols + hparams.symbols_embedding_dim))
val = sqrt(3.0) * std # uniform bounds for std
self.embedding.weight.data.uniform_(-val, val)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
def parse_batch(self, batch, device='cuda'):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths = batch
text_padded = text_padded.to(device).long()
input_lengths = input_lengths.to(device).long()
max_len = torch.max(input_lengths.data).item()
mel_padded = mel_padded.to(device).float()
gate_padded = gate_padded.to(device).float()
output_lengths = output_lengths.to(device).long()
return (
(text_padded, input_lengths, mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths)
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
return outputs
def forward(self, inputs):
text_inputs, text_lengths, mels, max_len, output_lengths = inputs
text_lengths, output_lengths = text_lengths.data, output_lengths.data
embedded_inputs = self.embedding(text_inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, text_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, mels, memory_lengths=text_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def inference(self, inputs):
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.inference(embedded_inputs)
mel_outputs, gate_outputs, alignments = self.decoder.inference(
encoder_outputs)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
|
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from .audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
|
import torch
import torch.distributed as dist
from torch.nn.modules import Module
from torch.autograd import Variable
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
#fallback for PyTorch 0.3
if not hasattr(dist, '_backend'):
self.warn_on_half = True
else:
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
'''
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
'''
def apply_gradient_allreduce(module):
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = param.data.dtype
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
|
import random
import numpy as np
import torch
import torch.utils.data
from .layers import TacotronSTFT
from .tacotron2_utils import load_wav_to_torch, load_filepaths_and_text
from .text import text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, audiopaths_and_text, hparams):
self.audiopaths_and_text = load_filepaths_and_text(audiopaths_and_text)
self.text_cleaners = hparams.text_cleaners
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.load_mel_from_disk = hparams.load_mel_from_disk
self.stft = TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
random.shuffle(self.audiopaths_and_text)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.stft.sampling_rate:
raise ValueError("{} {} SR doesn't match target {} SR".format(
sampling_rate, self.stft.sampling_rate))
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.from_numpy(np.load(filename))
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch])
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1)-1:] = 1
output_lengths[i] = mel.size(1)
return text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths
|
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
|
import os
from pathlib import Path
import subprocess
import sys
from utils import s3_utils
def check_data_dir():
current_dir = Path(os.path.dirname(os.path.realpath(__file__)))
tacotron2_data_dir = os.path.join(current_dir.parent.parent, "data", ".data", "tacotron2-minimal")
assert os.path.exists(tacotron2_data_dir), "Couldn't find tacotron2 minimal data dir, please run install.py again."
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
s3_utils.checkout_s3_data("INPUT_TARBALLS", "tacotron2-minimal.tar.gz", decompress=True)
|
import torch
from librosa.filters import mel as librosa_mel_fn
from .audio_processing import dynamic_range_compression
from .audio_processing import dynamic_range_decompression
from .stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=8000.0):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
|
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
|
import numpy as np
from scipy.io.wavfile import read
import torch
from pathlib import Path
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths).item()
ids = torch.arange(0, max_len, device=lengths.device)
mask = (ids < lengths.unsqueeze(1)).bool()
return mask
def load_wav_to_torch(full_path):
sampling_rate, data = read(full_path)
return torch.FloatTensor(data.astype(np.float32)), sampling_rate
def load_filepaths_and_text(filename, split="|"):
root = str(Path(__file__).parent)
with open(filename, encoding='utf-8') as f:
filepaths_and_text = []
for line in f:
filename, *text = line.strip().split(split)
filename = f'{root}/{filename}'
filepaths_and_text.append((filename, *text))
return filepaths_and_text
def to_gpu(x):
x = x.contiguous()
if torch.cuda.is_available():
x = x.cuda(non_blocking=True)
return torch.autograd.Variable(x)
|
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
cpu_sum = float(x.float().sum())
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import copy
import torch
from torch.autograd import Variable
import torch.nn.functional as F
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WaveGlowLoss(torch.nn.Module):
def __init__(self, sigma=1.0):
super(WaveGlowLoss, self).__init__()
self.sigma = sigma
def forward(self, model_output):
z, log_s_list, log_det_W_list = model_output
for i, log_s in enumerate(log_s_list):
if i == 0:
log_s_total = torch.sum(log_s)
log_det_W_total = log_det_W_list[i]
else:
log_s_total = log_s_total + torch.sum(log_s)
log_det_W_total += log_det_W_list[i]
loss = torch.sum(z*z)/(2*self.sigma*self.sigma) - log_s_total - log_det_W_total
return loss/(z.size(0)*z.size(1)*z.size(2))
class Invertible1x1Conv(torch.nn.Module):
"""
The layer outputs both the convolution, and the log determinant
of its weight matrix. If reverse=True it does convolution with
inverse
"""
def __init__(self, c):
super(Invertible1x1Conv, self).__init__()
self.conv = torch.nn.Conv1d(c, c, kernel_size=1, stride=1, padding=0,
bias=False)
# Sample a random orthonormal matrix to initialize weights
W = torch.qr(torch.FloatTensor(c, c).normal_())[0]
# Ensure determinant is 1.0 not -1.0
if torch.det(W) < 0:
W[:,0] = -1*W[:,0]
W = W.view(c, c, 1)
self.conv.weight.data = W
def forward(self, z, reverse=False):
# shape
batch_size, group_size, n_of_groups = z.size()
W = self.conv.weight.squeeze()
if reverse:
if not hasattr(self, 'W_inverse'):
# Reverse computation
W_inverse = W.float().inverse()
W_inverse = Variable(W_inverse[..., None])
if z.type() == 'torch.cuda.HalfTensor':
W_inverse = W_inverse.half()
self.W_inverse = W_inverse
z = F.conv1d(z, self.W_inverse, bias=None, stride=1, padding=0)
return z
else:
# Forward computation
log_det_W = batch_size * n_of_groups * torch.logdet(W)
z = self.conv(z)
return z, log_det_W
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
output = torch.zeros_like(audio)
n_channels_tensor = torch.IntTensor([self.n_channels])
spect = self.cond_layer(spect)
for i in range(self.n_layers):
spect_offset = i*2*self.n_channels
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
spect[:,spect_offset:spect_offset+2*self.n_channels,:],
n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = audio + res_skip_acts[:,:self.n_channels,:]
output = output + res_skip_acts[:,self.n_channels:,:]
else:
output = output + res_skip_acts
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group/2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size/2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
"""
forward_input[0] = mel_spectrogram: batch x n_mel_channels x frames
forward_input[1] = audio: batch x time
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
log_s_list = []
log_det_W_list = []
for k in range(self.n_flows):
if k % self.n_early_every == 0 and k > 0:
output_audio.append(audio[:,:self.n_early_size,:])
audio = audio[:,self.n_early_size:,:]
audio, log_det_W = self.convinv[k](audio)
log_det_W_list.append(log_det_W)
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
log_s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(log_s)*audio_1 + b
log_s_list.append(log_s)
audio = torch.cat([audio_0, audio_1],1)
output_audio.append(audio)
return torch.cat(output_audio,1), log_s_list, log_det_W_list
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == 'torch.cuda.HalfTensor':
audio = torch.cuda.HalfTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
else:
audio = torch.cuda.FloatTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma*audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1)/2)
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b)/torch.exp(s)
audio = torch.cat([audio_0, audio_1],1)
audio = self.convinv[k](audio, reverse=True)
if k % self.n_early_every == 0 and k > 0:
if spect.type() == 'torch.cuda.HalfTensor':
z = torch.cuda.HalfTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
else:
z = torch.cuda.FloatTensor(spect.size(0), self.n_early_size, spect.size(2)).normal_()
audio = torch.cat((sigma*z, audio),1)
audio = audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data
return audio
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layer = torch.nn.utils.remove_weight_norm(WN.cond_layer)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
def remove(conv_list):
new_conv_list = torch.nn.ModuleList()
for old_conv in conv_list:
old_conv = torch.nn.utils.remove_weight_norm(old_conv)
new_conv_list.append(old_conv)
return new_conv_list
|
import copy
import torch
from glow import Invertible1x1Conv, remove
@torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a+input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WN(torch.nn.Module):
"""
This is the WaveNet like layer for the affine coupling. The primary difference
from WaveNet is the convolutions need not be causal. There is also no dilation
size reset. The dilation only doubles on each layer
"""
def __init__(self, n_in_channels, n_mel_channels, n_layers, n_channels,
kernel_size):
super(WN, self).__init__()
assert(kernel_size % 2 == 1)
assert(n_channels % 2 == 0)
self.n_layers = n_layers
self.n_channels = n_channels
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.cond_layers = torch.nn.ModuleList()
start = torch.nn.Conv1d(n_in_channels, n_channels, 1)
start = torch.nn.utils.weight_norm(start, name='weight')
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(n_channels, 2*n_in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
for i in range(n_layers):
dilation = 2 ** i
padding = int((kernel_size*dilation - dilation)/2)
in_layer = torch.nn.Conv1d(n_channels, 2*n_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels, 1)
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
self.cond_layers.append(cond_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, forward_input):
audio, spect = forward_input
audio = self.start(audio)
for i in range(self.n_layers):
acts = fused_add_tanh_sigmoid_multiply(
self.in_layers[i](audio),
self.cond_layers[i](spect),
torch.IntTensor([self.n_channels]))
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
audio = res_skip_acts[:,:self.n_channels,:] + audio
skip_acts = res_skip_acts[:,self.n_channels:,:]
else:
skip_acts = res_skip_acts
if i == 0:
output = skip_acts
else:
output = skip_acts + output
return self.end(output)
class WaveGlow(torch.nn.Module):
def __init__(self, n_mel_channels, n_flows, n_group, n_early_every,
n_early_size, WN_config):
super(WaveGlow, self).__init__()
self.upsample = torch.nn.ConvTranspose1d(n_mel_channels,
n_mel_channels,
1024, stride=256)
assert(n_group % 2 == 0)
self.n_flows = n_flows
self.n_group = n_group
self.n_early_every = n_early_every
self.n_early_size = n_early_size
self.WN = torch.nn.ModuleList()
self.convinv = torch.nn.ModuleList()
n_half = int(n_group/2)
# Set up layers with the right sizes based on how many dimensions
# have been output already
n_remaining_channels = n_group
for k in range(n_flows):
if k % self.n_early_every == 0 and k > 0:
n_half = n_half - int(self.n_early_size/2)
n_remaining_channels = n_remaining_channels - self.n_early_size
self.convinv.append(Invertible1x1Conv(n_remaining_channels))
self.WN.append(WN(n_half, n_mel_channels*n_group, **WN_config))
self.n_remaining_channels = n_remaining_channels # Useful during inference
def forward(self, forward_input):
return None
"""
forward_input[0] = audio: batch x time
forward_input[1] = upsamp_spectrogram: batch x n_cond_channels x time
"""
"""
spect, audio = forward_input
# Upsample spectrogram to size of audio
spect = self.upsample(spect)
assert(spect.size(2) >= audio.size(1))
if spect.size(2) > audio.size(1):
spect = spect[:, :, :audio.size(1)]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
audio = audio.unfold(1, self.n_group, self.n_group).permute(0, 2, 1)
output_audio = []
s_list = []
s_conv_list = []
for k in range(self.n_flows):
if k%4 == 0 and k > 0:
output_audio.append(audio[:,:self.n_multi,:])
audio = audio[:,self.n_multi:,:]
# project to new basis
audio, s = self.convinv[k](audio)
s_conv_list.append(s)
n_half = int(audio.size(1)/2)
if k%2 == 0:
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
else:
audio_1 = audio[:,:n_half,:]
audio_0 = audio[:,n_half:,:]
output = self.nn[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = torch.exp(s)*audio_1 + b
s_list.append(s)
if k%2 == 0:
audio = torch.cat([audio[:,:n_half,:], audio_1],1)
else:
audio = torch.cat([audio_1, audio[:,n_half:,:]], 1)
output_audio.append(audio)
return torch.cat(output_audio,1), s_list, s_conv_list
"""
def infer(self, spect, sigma=1.0):
spect = self.upsample(spect)
# trim conv artifacts. maybe pad spec to kernel multiple
time_cutoff = self.upsample.kernel_size[0] - self.upsample.stride[0]
spect = spect[:, :, :-time_cutoff]
spect = spect.unfold(2, self.n_group, self.n_group).permute(0, 2, 1, 3)
spect = spect.contiguous().view(spect.size(0), spect.size(1), -1).permute(0, 2, 1)
if spect.type() == 'torch.cuda.HalfTensor':
audio = torch.cuda.HalfTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
else:
audio = torch.cuda.FloatTensor(spect.size(0),
self.n_remaining_channels,
spect.size(2)).normal_()
audio = torch.autograd.Variable(sigma*audio)
for k in reversed(range(self.n_flows)):
n_half = int(audio.size(1)/2)
if k%2 == 0:
audio_0 = audio[:,:n_half,:]
audio_1 = audio[:,n_half:,:]
else:
audio_1 = audio[:,:n_half,:]
audio_0 = audio[:,n_half:,:]
output = self.WN[k]((audio_0, spect))
s = output[:, n_half:, :]
b = output[:, :n_half, :]
audio_1 = (audio_1 - b)/torch.exp(s)
if k%2 == 0:
audio = torch.cat([audio[:,:n_half,:], audio_1],1)
else:
audio = torch.cat([audio_1, audio[:,n_half:,:]], 1)
audio = self.convinv[k](audio, reverse=True)
if k%4 == 0 and k > 0:
if spect.type() == 'torch.cuda.HalfTensor':
z = torch.cuda.HalfTensor(spect.size(0),
self.n_early_size,
spect.size(2)).normal_()
else:
z = torch.cuda.FloatTensor(spect.size(0),
self.n_early_size,
spect.size(2)).normal_()
audio = torch.cat((sigma*z, audio),1)
return audio.permute(0,2,1).contiguous().view(audio.size(0), -1).data
@staticmethod
def remove_weightnorm(model):
waveglow = model
for WN in waveglow.WN:
WN.start = torch.nn.utils.remove_weight_norm(WN.start)
WN.in_layers = remove(WN.in_layers)
WN.cond_layers = remove(WN.cond_layers)
WN.res_skip_layers = remove(WN.res_skip_layers)
return waveglow
|
import sys
sys.path.append('tacotron2')
import torch
from layers import STFT
class Denoiser(torch.nn.Module):
""" Removes model bias from audio produced with waveglow """
def __init__(self, waveglow, filter_length=1024, n_overlap=4,
win_length=1024, mode='zeros'):
super(Denoiser, self).__init__()
self.stft = STFT(filter_length=filter_length,
hop_length=int(filter_length/n_overlap),
win_length=win_length).cuda()
if mode == 'zeros':
mel_input = torch.zeros(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
elif mode == 'normal':
mel_input = torch.randn(
(1, 80, 88),
dtype=waveglow.upsample.weight.dtype,
device=waveglow.upsample.weight.device)
else:
raise Exception("Mode {} if not supported".format(mode))
with torch.no_grad():
bias_audio = waveglow.infer(mel_input, sigma=0.0).float()
bias_spec, _ = self.stft.transform(bias_audio)
self.register_buffer('bias_spec', bias_spec[:, :, 0][:, :, None])
def forward(self, audio, strength=0.1):
audio_spec, audio_angles = self.stft.transform(audio.cuda().float())
audio_spec_denoised = audio_spec - self.bias_spec * strength
audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0)
audio_denoised = self.stft.inverse(audio_spec_denoised, audio_angles)
return audio_denoised
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
import sys
import time
import subprocess
import argparse
import torch
import torch.distributed as dist
from torch.autograd import Variable
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= num_gpus
return rt
def init_distributed(rank, num_gpus, group_name, dist_backend, dist_url):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing Distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
dist.init_process_group(dist_backend, init_method=dist_url,
world_size=num_gpus, rank=rank,
group_name=group_name)
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
def apply_gradient_allreduce(module):
"""
Modifies existing model to do gradient allreduce, but doesn't change class
so you don't need "module"
"""
if not hasattr(dist, '_backend'):
module.warn_on_half = True
else:
module.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
for p in module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(module.needs_reduction):
module.needs_reduction = False
buckets = {}
for param in module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if module.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
module.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(module.parameters()):
def allreduce_hook(*unused):
Variable._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
dir(param)
def set_needs_reduction(self, input, output):
self.needs_reduction = True
module.register_forward_hook(set_needs_reduction)
return module
def main(config, stdout_dir, args_str):
args_list = ['train.py']
args_list += args_str.split(' ') if len(args_str) > 0 else []
args_list.append('--config={}'.format(config))
num_gpus = torch.cuda.device_count()
args_list.append('--num_gpus={}'.format(num_gpus))
args_list.append("--group_name=group_{}".format(time.strftime("%Y_%m_%d-%H%M%S")))
if not os.path.isdir(stdout_dir):
os.makedirs(stdout_dir)
os.chmod(stdout_dir, 0o775)
workers = []
for i in range(num_gpus):
args_list[-2] = '--rank={}'.format(i)
stdout = None if i == 0 else open(
os.path.join(stdout_dir, "GPU_{}.log".format(i)), "w")
print(args_list)
p = subprocess.Popen([str(sys.executable)]+args_list, stdout=stdout)
workers.append(p)
for p in workers:
p.wait()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str, required=True,
help='JSON file for configuration')
parser.add_argument('-s', '--stdout_dir', type=str, default=".",
help='directory to save stoud logs')
parser.add_argument(
'-a', '--args_str', type=str, default='',
help='double quoted string with space separated key value pairs')
args = parser.parse_args()
main(args.config, args.stdout_dir, args.args_str)
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import argparse
import json
import os
import torch
#=====START: ADDED FOR DISTRIBUTED======
from distributed import init_distributed, apply_gradient_allreduce, reduce_tensor
from torch.utils.data.distributed import DistributedSampler
#=====END: ADDED FOR DISTRIBUTED======
from torch.utils.data import DataLoader
from glow import WaveGlow, WaveGlowLoss
from mel2samp import Mel2Samp
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
iteration = checkpoint_dict['iteration']
optimizer.load_state_dict(checkpoint_dict['optimizer'])
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
print("Loaded checkpoint '{}' (iteration {})" .format(
checkpoint_path, iteration))
return model, optimizer, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
model_for_saving = WaveGlow(**waveglow_config).cuda()
model_for_saving.load_state_dict(model.state_dict())
torch.save({'model': model_for_saving,
'iteration': iteration,
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def train(num_gpus, rank, group_name, output_directory, epochs, learning_rate,
sigma, iters_per_checkpoint, batch_size, seed, fp16_run,
checkpoint_path, with_tensorboard):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
init_distributed(rank, num_gpus, group_name, **dist_config)
#=====END: ADDED FOR DISTRIBUTED======
criterion = WaveGlowLoss(sigma)
model = WaveGlow(**waveglow_config).cuda()
#=====START: ADDED FOR DISTRIBUTED======
if num_gpus > 1:
model = apply_gradient_allreduce(model)
#=====END: ADDED FOR DISTRIBUTED======
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if fp16_run:
from apex import amp
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
# Load checkpoint if one exists
iteration = 0
if checkpoint_path != "":
model, optimizer, iteration = load_checkpoint(checkpoint_path, model,
optimizer)
iteration += 1 # next iteration is iteration + 1
trainset = Mel2Samp(**data_config)
# =====START: ADDED FOR DISTRIBUTED======
train_sampler = DistributedSampler(trainset) if num_gpus > 1 else None
# =====END: ADDED FOR DISTRIBUTED======
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=batch_size,
pin_memory=False,
drop_last=True)
# Get shared output_directory ready
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
print("output directory", output_directory)
if with_tensorboard and rank == 0:
from tensorboardX import SummaryWriter
logger = SummaryWriter(os.path.join(output_directory, 'logs'))
model.train()
epoch_offset = max(0, int(iteration / len(train_loader)))
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
model.zero_grad()
mel, audio = batch
mel = torch.autograd.Variable(mel.cuda())
audio = torch.autograd.Variable(audio.cuda())
outputs = model((mel, audio))
loss = criterion(outputs)
if num_gpus > 1:
reduced_loss = reduce_tensor(loss.data, num_gpus).item()
else:
reduced_loss = loss.item()
if fp16_run:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
print("{}:\t{:.9f}".format(iteration, reduced_loss))
if with_tensorboard and rank == 0:
logger.add_scalar('training_loss', reduced_loss, i + len(train_loader) * epoch)
if (iteration % iters_per_checkpoint == 0):
if rank == 0:
checkpoint_path = "{}/waveglow_{}".format(
output_directory, iteration)
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-r', '--rank', type=int, default=0,
help='rank of process for distributed')
parser.add_argument('-g', '--group_name', type=str, default='',
help='name of group for distributed')
args = parser.parse_args()
# Parse configs. Globals nicer in this case
with open(args.config) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
global data_config
data_config = config["data_config"]
global dist_config
dist_config = config["dist_config"]
global waveglow_config
waveglow_config = config["waveglow_config"]
num_gpus = torch.cuda.device_count()
if num_gpus > 1:
if args.group_name == '':
print("WARNING: Multiple GPUs detected but no distributed group set")
print("Only running 1 GPU. Use distributed.py for multiple GPUs")
num_gpus = 1
if num_gpus == 1 and args.rank != 0:
raise Exception("Doing single GPU training on rank > 0")
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
train(num_gpus, args.rank, args.group_name, **train_config)
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************
import os
from scipy.io.wavfile import write
import torch
from mel2samp import files_to_list, MAX_WAV_VALUE
from denoiser import Denoiser
def main(mel_files, waveglow_path, sigma, output_dir, sampling_rate, is_fp16,
denoiser_strength):
mel_files = files_to_list(mel_files)
waveglow = torch.load(waveglow_path)['model']
waveglow = waveglow.remove_weightnorm(waveglow)
waveglow.cuda().eval()
if is_fp16:
from apex import amp
waveglow, _ = amp.initialize(waveglow, [], opt_level="O3")
if denoiser_strength > 0:
denoiser = Denoiser(waveglow).cuda()
for i, file_path in enumerate(mel_files):
file_name = os.path.splitext(os.path.basename(file_path))[0]
mel = torch.load(file_path)
mel = torch.autograd.Variable(mel.cuda())
mel = torch.unsqueeze(mel, 0)
mel = mel.half() if is_fp16 else mel
with torch.no_grad():
audio = waveglow.infer(mel, sigma=sigma)
if denoiser_strength > 0:
audio = denoiser(audio, denoiser_strength)
audio = audio * MAX_WAV_VALUE
audio = audio.squeeze()
audio = audio.cpu().numpy()
audio = audio.astype('int16')
audio_path = os.path.join(
output_dir, "{}_synthesis.wav".format(file_name))
write(audio_path, sampling_rate, audio)
print(audio_path)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filelist_path", required=True)
parser.add_argument('-w', '--waveglow_path',
help='Path to waveglow decoder checkpoint with model')
parser.add_argument('-o', "--output_dir", required=True)
parser.add_argument("-s", "--sigma", default=1.0, type=float)
parser.add_argument("--sampling_rate", default=22050, type=int)
parser.add_argument("--is_fp16", action="store_true")
parser.add_argument("-d", "--denoiser_strength", default=0.0, type=float,
help='Removes model bias. Start with 0.1 and adjust')
args = parser.parse_args()
main(args.filelist_path, args.waveglow_path, args.sigma, args.output_dir,
args.sampling_rate, args.is_fp16, args.denoiser_strength)
|
# *****************************************************************************
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the NVIDIA CORPORATION nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# *****************************************************************************\
import os
import random
import argparse
import json
import torch
import torch.utils.data
import sys
from scipy.io.wavfile import read
# We're using the audio processing from TacoTron2 to make sure it matches
sys.path.insert(0, 'tacotron2')
from tacotron2.layers import TacotronSTFT
MAX_WAV_VALUE = 32768.0
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
def load_wav_to_torch(full_path):
"""
Loads wavdata into torch array
"""
sampling_rate, data = read(full_path)
return torch.from_numpy(data).float(), sampling_rate
class Mel2Samp(torch.utils.data.Dataset):
"""
This is the main class that calculates the spectrogram and returns the
spectrogram, audio pair.
"""
def __init__(self, training_files, segment_length, filter_length,
hop_length, win_length, sampling_rate, mel_fmin, mel_fmax):
self.audio_files = files_to_list(training_files)
random.seed(1234)
random.shuffle(self.audio_files)
self.stft = TacotronSTFT(filter_length=filter_length,
hop_length=hop_length,
win_length=win_length,
sampling_rate=sampling_rate,
mel_fmin=mel_fmin, mel_fmax=mel_fmax)
self.segment_length = segment_length
self.sampling_rate = sampling_rate
def get_mel(self, audio):
audio_norm = audio / MAX_WAV_VALUE
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
return melspec
def __getitem__(self, index):
# Read audio
filename = self.audio_files[index]
audio, sampling_rate = load_wav_to_torch(filename)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
# Take segment
if audio.size(0) >= self.segment_length:
max_audio_start = audio.size(0) - self.segment_length
audio_start = random.randint(0, max_audio_start)
audio = audio[audio_start:audio_start+self.segment_length]
else:
audio = torch.nn.functional.pad(audio, (0, self.segment_length - audio.size(0)), 'constant').data
mel = self.get_mel(audio)
audio = audio / MAX_WAV_VALUE
return (mel, audio)
def __len__(self):
return len(self.audio_files)
# ===================================================================
# Takes directory of clean audio and makes directory of spectrograms
# Useful for making test sets
# ===================================================================
if __name__ == "__main__":
# Get defaults so it can work with no Sacred
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--filelist_path", required=True)
parser.add_argument('-c', '--config', type=str,
help='JSON file for configuration')
parser.add_argument('-o', '--output_dir', type=str,
help='Output directory')
args = parser.parse_args()
with open(args.config) as f:
data = f.read()
data_config = json.loads(data)["data_config"]
mel2samp = Mel2Samp(**data_config)
filepaths = files_to_list(args.filelist_path)
# Make directory if it doesn't exist
if not os.path.isdir(args.output_dir):
os.makedirs(args.output_dir)
os.chmod(args.output_dir, 0o775)
for filepath in filepaths:
audio, sr = load_wav_to_torch(filepath)
melspectrogram = mel2samp.get_mel(audio)
filename = os.path.basename(filepath)
new_filepath = args.output_dir + '/' + filename + '.pt'
print(new_filepath)
torch.save(melspectrogram, new_filepath)
|
import sys
import copy
import torch
def _check_model_old_version(model):
if hasattr(model.WN[0], 'res_layers') or hasattr(model.WN[0], 'cond_layers'):
return True
else:
return False
def _update_model_res_skip(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
wavenet.res_skip_layers = torch.nn.ModuleList()
for i in range(0, n_layers):
if i < n_layers - 1:
res_skip_channels = 2*n_channels
else:
res_skip_channels = n_channels
res_skip_layer = torch.nn.Conv1d(n_channels, res_skip_channels, 1)
skip_layer = torch.nn.utils.remove_weight_norm(wavenet.skip_layers[i])
if i < n_layers - 1:
res_layer = torch.nn.utils.remove_weight_norm(wavenet.res_layers[i])
res_skip_layer.weight = torch.nn.Parameter(torch.cat([res_layer.weight, skip_layer.weight]))
res_skip_layer.bias = torch.nn.Parameter(torch.cat([res_layer.bias, skip_layer.bias]))
else:
res_skip_layer.weight = torch.nn.Parameter(skip_layer.weight)
res_skip_layer.bias = torch.nn.Parameter(skip_layer.bias)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
wavenet.res_skip_layers.append(res_skip_layer)
del wavenet.res_layers
del wavenet.skip_layers
def _update_model_cond(old_model, new_model):
for idx in range(0, len(new_model.WN)):
wavenet = new_model.WN[idx]
n_channels = wavenet.n_channels
n_layers = wavenet.n_layers
n_mel_channels = wavenet.cond_layers[0].weight.shape[1]
cond_layer = torch.nn.Conv1d(n_mel_channels, 2*n_channels*n_layers, 1)
cond_layer_weight = []
cond_layer_bias = []
for i in range(0, n_layers):
_cond_layer = torch.nn.utils.remove_weight_norm(wavenet.cond_layers[i])
cond_layer_weight.append(_cond_layer.weight)
cond_layer_bias.append(_cond_layer.bias)
cond_layer.weight = torch.nn.Parameter(torch.cat(cond_layer_weight))
cond_layer.bias = torch.nn.Parameter(torch.cat(cond_layer_bias))
cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
wavenet.cond_layer = cond_layer
del wavenet.cond_layers
def update_model(old_model):
if not _check_model_old_version(old_model):
return old_model
new_model = copy.deepcopy(old_model)
if hasattr(old_model.WN[0], 'res_layers'):
_update_model_res_skip(old_model, new_model)
if hasattr(old_model.WN[0], 'cond_layers'):
_update_model_cond(old_model, new_model)
return new_model
if __name__ == '__main__':
old_model_path = sys.argv[1]
new_model_path = sys.argv[2]
model = torch.load(old_model_path, map_location='cpu')
model['model'] = update_model(model['model'])
torch.save(model, new_model_path)
|
import matplotlib
matplotlib.use("Agg")
import matplotlib.pylab as plt
import numpy as np
def save_figure_to_numpy(fig):
# save it to a numpy array.
data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
return data
def plot_alignment_to_numpy(alignment, info=None):
fig, ax = plt.subplots(figsize=(6, 4))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
fig.colorbar(im, ax=ax)
xlabel = 'Decoder timestep'
if info is not None:
xlabel += '\n\n' + info
plt.xlabel(xlabel)
plt.ylabel('Encoder timestep')
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_spectrogram_to_numpy(spectrogram):
fig, ax = plt.subplots(figsize=(12, 3))
im = ax.imshow(spectrogram, aspect="auto", origin="lower",
interpolation='none')
plt.colorbar(im, ax=ax)
plt.xlabel("Frames")
plt.ylabel("Channels")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
def plot_gate_outputs_to_numpy(gate_targets, gate_outputs):
fig, ax = plt.subplots(figsize=(12, 3))
ax.scatter(range(len(gate_targets)), gate_targets, alpha=0.5,
color='green', marker='+', s=1, label='target')
ax.scatter(range(len(gate_outputs)), gate_outputs, alpha=0.5,
color='red', marker='.', s=1, label='predicted')
plt.xlabel("Frames (Green target, Red predicted)")
plt.ylabel("Gate State")
plt.tight_layout()
fig.canvas.draw()
data = save_figure_to_numpy(fig)
plt.close()
return data
|
import tensorflow as tf
from text import symbols
def create_hparams(hparams_string=None, verbose=False):
"""Create model hyperparameters. Parse nondefault from given string."""
hparams = tf.contrib.training.HParams(
################################
# Experiment Parameters #
################################
epochs=500,
iters_per_checkpoint=500,
seed=1234,
dynamic_loss_scaling=True,
fp16_run=False,
distributed_run=False,
dist_backend="nccl",
dist_url="file://distributed.dpt",
cudnn_enabled=True,
cudnn_benchmark=False,
################################
# Data Parameters #
################################
load_mel_from_disk=False,
training_files='filelists/ljs_audio_text_train_filelist.txt',
validation_files='filelists/ljs_audio_text_val_filelist.txt',
text_cleaners=['english_cleaners'],
sort_by_length=False,
################################
# Audio Parameters #
################################
max_wav_value=32768.0,
sampling_rate=22050,
filter_length=1024,
hop_length=256,
win_length=1024,
n_mel_channels=80,
mel_fmin=0.0,
mel_fmax=None, # if None, half the sampling rate
################################
# Model Parameters #
################################
n_symbols=len(symbols),
symbols_embedding_dim=512,
# Encoder parameters
encoder_kernel_size=5,
encoder_n_convolutions=3,
encoder_embedding_dim=512,
# Decoder parameters
n_frames_per_step=1, # currently only 1 is supported
decoder_rnn_dim=1024,
prenet_dim=256,
max_decoder_steps=1000,
gate_threshold=0.6,
# Attention parameters
attention_rnn_dim=1024,
attention_dim=128,
# Location Layer parameters
attention_location_n_filters=32,
attention_location_kernel_size=31,
# Mel-post processing network parameters
postnet_embedding_dim=512,
postnet_kernel_size=5,
postnet_n_convolutions=5,
################################
# Optimization Hyperparameters #
################################
use_saved_learning_rate=False,
learning_rate=1e-3,
weight_decay=1e-6,
grad_clip_thresh=1,
batch_size=48,
mask_padding=False # set model's padded outputs to padded values
)
if hparams_string:
tf.logging.info('Parsing command line hparams: %s', hparams_string)
hparams.parse(hparams_string)
if verbose:
tf.logging.info('Final parsed hparams: %s', hparams.values())
return hparams
|
import torch
import numpy as np
from scipy.signal import get_window
import librosa.util as librosa_util
def window_sumsquare(window, n_frames, hop_length=200, win_length=800,
n_fft=800, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C
|
import random
import torch.nn.functional as F
from tensorboardX import SummaryWriter
from plotting_utils import plot_alignment_to_numpy, plot_spectrogram_to_numpy
from plotting_utils import plot_gate_outputs_to_numpy
class Tacotron2Logger(SummaryWriter):
def __init__(self, logdir):
super(Tacotron2Logger, self).__init__(logdir)
def log_training(self, reduced_loss, grad_norm, learning_rate, duration,
iteration):
self.add_scalar("training.loss", reduced_loss, iteration)
self.add_scalar("grad.norm", grad_norm, iteration)
self.add_scalar("learning.rate", learning_rate, iteration)
self.add_scalar("duration", duration, iteration)
def log_validation(self, reduced_loss, model, y, y_pred, iteration):
self.add_scalar("validation.loss", reduced_loss, iteration)
_, mel_outputs, gate_outputs, alignments = y_pred
mel_targets, gate_targets = y
# plot distribution of parameters
for tag, value in model.named_parameters():
tag = tag.replace('.', '/')
self.add_histogram(tag, value.data.cpu().numpy(), iteration)
# plot alignment, mel target and predicted, gate target and predicted
idx = random.randint(0, alignments.size(0) - 1)
self.add_image(
"alignment",
plot_alignment_to_numpy(alignments[idx].data.cpu().numpy().T),
iteration)
self.add_image(
"mel_target",
plot_spectrogram_to_numpy(mel_targets[idx].data.cpu().numpy()),
iteration)
self.add_image(
"mel_predicted",
plot_spectrogram_to_numpy(mel_outputs[idx].data.cpu().numpy()),
iteration)
self.add_image(
"gate",
plot_gate_outputs_to_numpy(
gate_targets[idx].data.cpu().numpy(),
F.sigmoid(gate_outputs[idx]).data.cpu().numpy()),
iteration)
|
import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from loss_scaler import DynamicLossScaler, LossScaler
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion)
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion)
class FP16_Module(nn.Module):
def __init__(self, module):
super(FP16_Module, self).__init__()
self.add_module('module', module.half())
def forward(self, *inputs, **kwargs):
return fp16_to_fp32(self.module(*(fp32_to_fp16(inputs)), **kwargs))
class FP16_Optimizer:
"""
FP16_Optimizer is designed to wrap an existing PyTorch optimizer,
and enable an fp16 model to be trained using a master copy of fp32 weights.
Args:
optimizer (torch.optim.optimizer): Existing optimizer containing initialized fp16 parameters. Internally, FP16_Optimizer replaces the passed optimizer's fp16 parameters with new fp32 parameters copied from the original ones. FP16_Optimizer also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy after each step.
static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale fp16 gradients computed by the model. Scaled gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so static_loss_scale should not affect learning rate.
dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any static_loss_scale option.
"""
def __init__(self, optimizer, static_loss_scale=1.0, dynamic_loss_scale=False):
if not torch.cuda.is_available:
raise SystemError('Cannot use fp16 without CUDA')
self.fp16_param_groups = []
self.fp32_param_groups = []
self.fp32_flattened_groups = []
for i, param_group in enumerate(optimizer.param_groups):
print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
for param in param_group['params']:
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
elif param.type() == 'torch.cuda.FloatTensor':
print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
fp32_flattened_this_group = None
if len(fp16_params_this_group) > 0:
fp32_flattened_this_group = _flatten_dense_tensors(
[param.detach().data.clone().float() for param in fp16_params_this_group])
fp32_flattened_this_group = Variable(fp32_flattened_this_group, requires_grad = True)
fp32_flattened_this_group.grad = fp32_flattened_this_group.new(
*fp32_flattened_this_group.size())
# python's lovely list concatenation via +
if fp32_flattened_this_group is not None:
param_group['params'] = [fp32_flattened_this_group] + fp32_params_this_group
else:
param_group['params'] = fp32_params_this_group
self.fp16_param_groups.append(fp16_params_this_group)
self.fp32_param_groups.append(fp32_params_this_group)
self.fp32_flattened_groups.append(fp32_flattened_this_group)
# print("self.fp32_flattened_groups = ", self.fp32_flattened_groups)
# print("self.fp16_param_groups = ", self.fp16_param_groups)
self.optimizer = optimizer.__class__(optimizer.param_groups)
# self.optimizer.load_state_dict(optimizer.state_dict())
self.param_groups = self.optimizer.param_groups
if dynamic_loss_scale:
self.dynamic_loss_scale = True
self.loss_scaler = DynamicLossScaler()
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
def zero_grad(self):
"""
Zero fp32 and fp16 parameter grads.
"""
self.optimizer.zero_grad()
for fp16_group in self.fp16_param_groups:
for param in fp16_group:
if param.grad is not None:
param.grad.detach_() # This does appear in torch.optim.optimizer.zero_grad(),
# but I'm not sure why it's needed.
param.grad.zero_()
def _check_overflow(self):
params = []
for group in self.fp16_param_groups:
for param in group:
params.append(param)
for group in self.fp32_param_groups:
for param in group:
params.append(param)
self.overflow = self.loss_scaler.has_overflow(params)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
def _copy_grads_fp16_to_fp32(self):
for fp32_group, fp16_group in zip(self.fp32_flattened_groups, self.fp16_param_groups):
if len(fp16_group) > 0:
# This might incur one more deep copy than is necessary.
fp32_group.grad.data.copy_(
_flatten_dense_tensors([fp16_param.grad.data for fp16_param in fp16_group]))
def _downscale_fp32(self):
if self.loss_scale != 1.0:
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
param.grad.data.mul_(1./self.loss_scale)
def clip_fp32_grads(self, clip=-1):
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
if clip > 0:
return torch.nn.utils.clip_grad_norm(fp32_params, clip)
def _copy_params_fp32_to_fp16(self):
for fp16_group, fp32_group in zip(self.fp16_param_groups, self.fp32_flattened_groups):
if len(fp16_group) > 0:
for fp16_param, fp32_data in zip(fp16_group,
_unflatten_dense_tensors(fp32_group.data, fp16_group)):
fp16_param.data.copy_(fp32_data)
def state_dict(self):
"""
Returns a dict containing the current state of this FP16_Optimizer instance.
This dict contains attributes of FP16_Optimizer, as well as the state_dict
of the contained Pytorch optimizer.
Untested.
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict.
Untested.
"""
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, step should be called after fp16_optimizer_obj.backward(loss).
step updates the fp32 master copy of parameters using the optimizer supplied to
FP16_Optimizer's constructor, then copies the updated fp32 params into the fp16 params
originally referenced by Fp16_Optimizer's constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, step may be called without a prior call to self.backward(loss).
However, the user should take care that any loss.backward() call within the closure
has been replaced by fp16_optimizer_obj.backward(loss).
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to FP16_Optimizer's constructor. closure should call zero_grad on the FP16_Optimizer object, compute the loss, call .backward(loss), and return the loss.
Closure example::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. note::
The only changes that need to be made compared to
`ordinary optimizer closures`_ are that "optimizer" itself should be an instance of
FP16_Optimizer, and that the call to loss.backward should be replaced by
optimizer.backward(loss).
.. warning::
Currently, calling step with a closure is not compatible with dynamic loss scaling.
.. _`ordinary optimizer closures`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
if closure is not None and isinstance(self.loss_scaler, DynamicLossScaler):
raise TypeError("Using step with a closure is currently not "
"compatible with dynamic loss scaling.")
scale = self.loss_scaler.loss_scale
self._update_scale(self.overflow)
if self.overflow:
print("OVERFLOW! Skipping step. Attempted loss scale: {}".format(scale))
return
if closure is not None:
self._step_with_closure(closure)
else:
self.optimizer.step()
self._copy_params_fp32_to_fp16()
return
def _step_with_closure(self, closure):
def wrapped_closure():
if self.first_closure_call_this_step:
"""
We expect that the fp16 params are initially fresh on entering self.step(),
so _copy_params_fp32_to_fp16() is unnecessary the first time wrapped_closure()
is called within self.optimizer.step().
"""
self.first_closure_call_this_step = False
else:
"""
If self.optimizer.step() internally calls wrapped_closure more than once,
it may update the fp32 params after each call. However, self.optimizer
doesn't know about the fp16 params at all. If the fp32 params get updated,
we can't rely on self.optimizer to refresh the fp16 params. We need
to handle that manually:
"""
self._copy_params_fp32_to_fp16()
"""
Our API expects the user to give us ownership of the backward() call by
replacing all calls to loss.backward() with optimizer.backward(loss).
This requirement holds whether or not the call to backward() is made within
a closure.
If the user is properly calling optimizer.backward(loss) within "closure,"
calling closure() here will give the fp32 master params fresh gradients
for the optimizer to play with,
so all wrapped_closure needs to do is call closure() and return the loss.
"""
temp_loss = closure()
return temp_loss
self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
def backward(self, loss, update_fp32_grads=True):
"""
fp16_optimizer_obj.backward performs the following conceptual operations:
fp32_loss = loss.float() (see first Note below)
scaled_loss = fp32_loss*loss_scale
scaled_loss.backward(), which accumulates scaled gradients into the .grad attributes of the
fp16 model's leaves.
fp16 grads are then copied to the stored fp32 params' .grad attributes (see second Note).
Finally, fp32 grads are divided by loss_scale.
In this way, after fp16_optimizer_obj.backward, the fp32 parameters have fresh gradients,
and fp16_optimizer_obj.step may be called.
.. note::
Converting the loss to fp32 before applying the loss scale provides some
additional safety against overflow if the user has supplied an fp16 value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
fp16_optimizer_obj.backward.
.. note::
The gradients found in an fp16 model's leaves after a call to
fp16_optimizer_obj.backward should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may silently change over time).
If the user wants to inspect gradients after a call to fp16_optimizer_obj.backward,
he/she should query the .grad attribute of FP16_Optimizer's stored fp32 parameters.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_fp32_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay this copy, which is useful to eliminate redundant fp16->fp32 grad copies if fp16_optimizer_obj.backward is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling fp16_optimizer_obj.update_fp32_grads before calling fp16_optimizer_obj.step.
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_fp32_grads=False)
optimizer.backward(loss2, update_fp32_grads=False)
optimizer.update_fp32_grads()
"""
self.loss_scaler.backward(loss.float())
if update_fp32_grads:
self.update_fp32_grads()
def update_fp32_grads(self):
"""
Copy the .grad attribute from stored references to fp16 parameters to
the .grad attribute of the master fp32 parameters that are directly
updated by the optimizer. :attr:`update_fp32_grads` only needs to be called if
fp16_optimizer_obj.backward was called with update_fp32_grads=False.
"""
if self.dynamic_loss_scale:
self._check_overflow()
if self.overflow: return
self._copy_grads_fp16_to_fp32()
self._downscale_fp32()
@property
def loss_scale(self):
return self.loss_scaler.loss_scale
|
import torch
from torch.autograd import Variable
from torch import nn
from torch.nn import functional as F
from layers import ConvNorm, LinearNorm
from utils import to_gpu, get_mask_from_lengths
from fp16_optimizer import fp32_to_fp16, fp16_to_fp32
class LocationLayer(nn.Module):
def __init__(self, attention_n_filters, attention_kernel_size,
attention_dim):
super(LocationLayer, self).__init__()
padding = int((attention_kernel_size - 1) / 2)
self.location_conv = ConvNorm(2, attention_n_filters,
kernel_size=attention_kernel_size,
padding=padding, bias=False, stride=1,
dilation=1)
self.location_dense = LinearNorm(attention_n_filters, attention_dim,
bias=False, w_init_gain='tanh')
def forward(self, attention_weights_cat):
processed_attention = self.location_conv(attention_weights_cat)
processed_attention = processed_attention.transpose(1, 2)
processed_attention = self.location_dense(processed_attention)
return processed_attention
class Attention(nn.Module):
def __init__(self, attention_rnn_dim, embedding_dim, attention_dim,
attention_location_n_filters, attention_location_kernel_size):
super(Attention, self).__init__()
self.query_layer = LinearNorm(attention_rnn_dim, attention_dim,
bias=False, w_init_gain='tanh')
self.memory_layer = LinearNorm(embedding_dim, attention_dim, bias=False,
w_init_gain='tanh')
self.v = LinearNorm(attention_dim, 1, bias=False)
self.location_layer = LocationLayer(attention_location_n_filters,
attention_location_kernel_size,
attention_dim)
self.score_mask_value = -float("inf")
def get_alignment_energies(self, query, processed_memory,
attention_weights_cat):
"""
PARAMS
------
query: decoder output (batch, n_mel_channels * n_frames_per_step)
processed_memory: processed encoder outputs (B, T_in, attention_dim)
attention_weights_cat: cumulative and prev. att weights (B, 2, max_time)
RETURNS
-------
alignment (batch, max_time)
"""
processed_query = self.query_layer(query.unsqueeze(1))
processed_attention_weights = self.location_layer(attention_weights_cat)
energies = self.v(F.tanh(
processed_query + processed_attention_weights + processed_memory))
energies = energies.squeeze(-1)
return energies
def forward(self, attention_hidden_state, memory, processed_memory,
attention_weights_cat, mask):
"""
PARAMS
------
attention_hidden_state: attention rnn last output
memory: encoder outputs
processed_memory: processed encoder outputs
attention_weights_cat: previous and cummulative attention weights
mask: binary mask for padded data
"""
alignment = self.get_alignment_energies(
attention_hidden_state, processed_memory, attention_weights_cat)
if mask is not None:
alignment.data.masked_fill_(mask, self.score_mask_value)
attention_weights = F.softmax(alignment, dim=1)
attention_context = torch.bmm(attention_weights.unsqueeze(1), memory)
attention_context = attention_context.squeeze(1)
return attention_context, attention_weights
class Prenet(nn.Module):
def __init__(self, in_dim, sizes):
super(Prenet, self).__init__()
in_sizes = [in_dim] + sizes[:-1]
self.layers = nn.ModuleList(
[LinearNorm(in_size, out_size, bias=False)
for (in_size, out_size) in zip(in_sizes, sizes)])
def forward(self, x):
for linear in self.layers:
x = F.dropout(F.relu(linear(x)), p=0.5, training=True)
return x
class Postnet(nn.Module):
"""Postnet
- Five 1-d convolution with 512 channels and kernel size 5
"""
def __init__(self, hparams):
super(Postnet, self).__init__()
self.dropout = nn.Dropout(0.5)
self.convolutions = nn.ModuleList()
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.n_mel_channels, hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
for i in range(1, hparams.postnet_n_convolutions - 1):
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim,
hparams.postnet_embedding_dim,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='tanh'),
nn.BatchNorm1d(hparams.postnet_embedding_dim))
)
self.convolutions.append(
nn.Sequential(
ConvNorm(hparams.postnet_embedding_dim, hparams.n_mel_channels,
kernel_size=hparams.postnet_kernel_size, stride=1,
padding=int((hparams.postnet_kernel_size - 1) / 2),
dilation=1, w_init_gain='linear'),
nn.BatchNorm1d(hparams.n_mel_channels))
)
def forward(self, x):
for i in range(len(self.convolutions) - 1):
x = self.dropout(F.tanh(self.convolutions[i](x)))
x = self.dropout(self.convolutions[-1](x))
return x
class Encoder(nn.Module):
"""Encoder module:
- Three 1-d convolution banks
- Bidirectional LSTM
"""
def __init__(self, hparams):
super(Encoder, self).__init__()
self.dropout = nn.Dropout(0.5)
convolutions = []
for _ in range(hparams.encoder_n_convolutions):
conv_layer = nn.Sequential(
ConvNorm(hparams.encoder_embedding_dim,
hparams.encoder_embedding_dim,
kernel_size=hparams.encoder_kernel_size, stride=1,
padding=int((hparams.encoder_kernel_size - 1) / 2),
dilation=1, w_init_gain='relu'),
nn.BatchNorm1d(hparams.encoder_embedding_dim))
convolutions.append(conv_layer)
self.convolutions = nn.ModuleList(convolutions)
self.lstm = nn.LSTM(hparams.encoder_embedding_dim,
int(hparams.encoder_embedding_dim / 2), 1,
batch_first=True, bidirectional=True)
def forward(self, x, input_lengths):
for conv in self.convolutions:
x = self.dropout(F.relu(conv(x)))
x = x.transpose(1, 2)
# pytorch tensor are not reversible, hence the conversion
input_lengths = input_lengths.cpu().numpy()
x = nn.utils.rnn.pack_padded_sequence(
x, input_lengths, batch_first=True)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
outputs, _ = nn.utils.rnn.pad_packed_sequence(
outputs, batch_first=True)
return outputs
def inference(self, x):
for conv in self.convolutions:
x = self.dropout(F.relu(conv(x)))
x = x.transpose(1, 2)
self.lstm.flatten_parameters()
outputs, _ = self.lstm(x)
return outputs
class Decoder(nn.Module):
def __init__(self, hparams):
super(Decoder, self).__init__()
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.encoder_embedding_dim = hparams.encoder_embedding_dim
self.attention_rnn_dim = hparams.attention_rnn_dim
self.decoder_rnn_dim = hparams.decoder_rnn_dim
self.prenet_dim = hparams.prenet_dim
self.max_decoder_steps = hparams.max_decoder_steps
self.gate_threshold = hparams.gate_threshold
self.prenet = Prenet(
hparams.n_mel_channels * hparams.n_frames_per_step,
[hparams.prenet_dim, hparams.prenet_dim])
self.attention_rnn = nn.LSTMCell(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim,
hparams.attention_rnn_dim)
self.attention_layer = Attention(
hparams.attention_rnn_dim, hparams.encoder_embedding_dim,
hparams.attention_dim, hparams.attention_location_n_filters,
hparams.attention_location_kernel_size)
self.decoder_rnn = nn.LSTMCell(
hparams.prenet_dim + hparams.encoder_embedding_dim,
hparams.decoder_rnn_dim, 1)
self.linear_projection = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim,
hparams.n_mel_channels*hparams.n_frames_per_step)
self.gate_layer = LinearNorm(
hparams.decoder_rnn_dim + hparams.encoder_embedding_dim, 1,
bias=True, w_init_gain='sigmoid')
def get_go_frame(self, memory):
""" Gets all zeros frames to use as first decoder input
PARAMS
------
memory: decoder outputs
RETURNS
-------
decoder_input: all zeros frames
"""
B = memory.size(0)
decoder_input = Variable(memory.data.new(
B, self.n_mel_channels * self.n_frames_per_step).zero_())
return decoder_input
def initialize_decoder_states(self, memory, mask):
""" Initializes attention rnn states, decoder rnn states, attention
weights, attention cumulative weights, attention context, stores memory
and stores processed memory
PARAMS
------
memory: Encoder outputs
mask: Mask for padded data if training, expects None for inference
"""
B = memory.size(0)
MAX_TIME = memory.size(1)
self.attention_hidden = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.attention_cell = Variable(memory.data.new(
B, self.attention_rnn_dim).zero_())
self.decoder_hidden = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.decoder_cell = Variable(memory.data.new(
B, self.decoder_rnn_dim).zero_())
self.attention_weights = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_weights_cum = Variable(memory.data.new(
B, MAX_TIME).zero_())
self.attention_context = Variable(memory.data.new(
B, self.encoder_embedding_dim).zero_())
self.memory = memory
self.processed_memory = self.attention_layer.memory_layer(memory)
self.mask = mask
def parse_decoder_inputs(self, decoder_inputs):
""" Prepares decoder inputs, i.e. mel outputs
PARAMS
------
decoder_inputs: inputs used for teacher-forced training, i.e. mel-specs
RETURNS
-------
inputs: processed decoder inputs
"""
# (B, n_mel_channels, T_out) -> (B, T_out, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(1, 2)
decoder_inputs = decoder_inputs.view(
decoder_inputs.size(0),
int(decoder_inputs.size(1)/self.n_frames_per_step), -1)
# (B, T_out, n_mel_channels) -> (T_out, B, n_mel_channels)
decoder_inputs = decoder_inputs.transpose(0, 1)
return decoder_inputs
def parse_decoder_outputs(self, mel_outputs, gate_outputs, alignments):
""" Prepares decoder outputs for output
PARAMS
------
mel_outputs:
gate_outputs: gate output energies
alignments:
RETURNS
-------
mel_outputs:
gate_outpust: gate output energies
alignments:
"""
# (T_out, B) -> (B, T_out)
alignments = torch.stack(alignments).transpose(0, 1)
# (T_out, B) -> (B, T_out)
gate_outputs = torch.stack(gate_outputs).transpose(0, 1)
gate_outputs = gate_outputs.contiguous()
# (T_out, B, n_mel_channels) -> (B, T_out, n_mel_channels)
mel_outputs = torch.stack(mel_outputs).transpose(0, 1).contiguous()
# decouple frames per step
mel_outputs = mel_outputs.view(
mel_outputs.size(0), -1, self.n_mel_channels)
# (B, T_out, n_mel_channels) -> (B, n_mel_channels, T_out)
mel_outputs = mel_outputs.transpose(1, 2)
return mel_outputs, gate_outputs, alignments
def decode(self, decoder_input):
""" Decoder step using stored states, attention and memory
PARAMS
------
decoder_input: previous mel output
RETURNS
-------
mel_output:
gate_output: gate output energies
attention_weights:
"""
cell_input = torch.cat((self.decoder_hidden, self.attention_context), -1)
self.attention_hidden, self.attention_cell = self.attention_rnn(
cell_input, (self.attention_hidden, self.attention_cell))
attention_weights_cat = torch.cat(
(self.attention_weights.unsqueeze(1),
self.attention_weights_cum.unsqueeze(1)), dim=1)
self.attention_context, self.attention_weights = self.attention_layer(
self.attention_hidden, self.memory, self.processed_memory,
attention_weights_cat, self.mask)
self.attention_weights_cum += self.attention_weights
prenet_output = self.prenet(decoder_input)
decoder_input = torch.cat((prenet_output, self.attention_context), -1)
self.decoder_hidden, self.decoder_cell = self.decoder_rnn(
decoder_input, (self.decoder_hidden, self.decoder_cell))
decoder_hidden_attention_context = torch.cat(
(self.decoder_hidden, self.attention_context), dim=1)
decoder_output = self.linear_projection(
decoder_hidden_attention_context)
gate_prediction = self.gate_layer(decoder_hidden_attention_context)
return decoder_output, gate_prediction, self.attention_weights
def forward(self, memory, decoder_inputs, memory_lengths):
""" Decoder forward pass for training
PARAMS
------
memory: Encoder outputs
decoder_inputs: Decoder inputs for teacher forcing. i.e. mel-specs
memory_lengths: Encoder output lengths for attention masking.
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
decoder_inputs = self.parse_decoder_inputs(decoder_inputs)
self.initialize_decoder_states(
memory, mask=~get_mask_from_lengths(memory_lengths))
mel_outputs, gate_outputs, alignments = [], [], []
while len(mel_outputs) < decoder_inputs.size(0):
mel_output, gate_output, attention_weights = self.decode(
decoder_input)
mel_outputs += [mel_output]
gate_outputs += [gate_output.squeeze(1)]
alignments += [attention_weights]
decoder_input = decoder_inputs[len(mel_outputs) - 1]
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
def inference(self, memory):
""" Decoder inference
PARAMS
------
memory: Encoder outputs
RETURNS
-------
mel_outputs: mel outputs from the decoder
gate_outputs: gate outputs from the decoder
alignments: sequence of attention weights from the decoder
"""
decoder_input = self.get_go_frame(memory)
self.initialize_decoder_states(memory, mask=None)
mel_outputs, gate_outputs, alignments = [], [], []
while True:
mel_output, gate_output, alignment = self.decode(decoder_input)
mel_outputs += [mel_output]
gate_outputs += [gate_output.squeeze(1)]
alignments += [alignment]
if F.sigmoid(gate_output.data) > self.gate_threshold:
break
elif len(mel_outputs) == self.max_decoder_steps:
print("Warning! Reached max decoder steps")
break
decoder_input = mel_output
mel_outputs, gate_outputs, alignments = self.parse_decoder_outputs(
mel_outputs, gate_outputs, alignments)
return mel_outputs, gate_outputs, alignments
class Tacotron2(nn.Module):
def __init__(self, hparams):
super(Tacotron2, self).__init__()
self.mask_padding = hparams.mask_padding
self.fp16_run = hparams.fp16_run
self.n_mel_channels = hparams.n_mel_channels
self.n_frames_per_step = hparams.n_frames_per_step
self.embedding = nn.Embedding(
hparams.n_symbols, hparams.symbols_embedding_dim)
torch.nn.init.xavier_uniform_(self.embedding.weight.data)
self.encoder = Encoder(hparams)
self.decoder = Decoder(hparams)
self.postnet = Postnet(hparams)
def parse_batch(self, batch):
text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths = batch
text_padded = to_gpu(text_padded).long()
max_len = int(torch.max(input_lengths.data).numpy())
input_lengths = to_gpu(input_lengths).long()
mel_padded = to_gpu(mel_padded).float()
gate_padded = to_gpu(gate_padded).float()
output_lengths = to_gpu(output_lengths).long()
return (
(text_padded, input_lengths, mel_padded, max_len, output_lengths),
(mel_padded, gate_padded))
def parse_input(self, inputs):
inputs = fp32_to_fp16(inputs) if self.fp16_run else inputs
return inputs
def parse_output(self, outputs, output_lengths=None):
if self.mask_padding and output_lengths is not None:
mask = ~get_mask_from_lengths(output_lengths+1) # +1 <stop> token
mask = mask.expand(self.n_mel_channels, mask.size(0), mask.size(1))
mask = mask.permute(1, 0, 2)
outputs[0].data.masked_fill_(mask, 0.0)
outputs[1].data.masked_fill_(mask, 0.0)
outputs[2].data.masked_fill_(mask[:, 0, :], 1e3) # gate energies
outputs = fp16_to_fp32(outputs) if self.fp16_run else outputs
return outputs
def forward(self, inputs):
inputs, input_lengths, targets, max_len, \
output_lengths = self.parse_input(inputs)
input_lengths, output_lengths = input_lengths.data, output_lengths.data
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder(embedded_inputs, input_lengths)
mel_outputs, gate_outputs, alignments = self.decoder(
encoder_outputs, targets, memory_lengths=input_lengths)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
# DataParallel expects equal sized inputs/outputs, hence padding
if input_lengths is not None:
alignments = alignments.unsqueeze(0)
alignments = nn.functional.pad(
alignments,
(0, max_len - alignments.size(3), 0, 0),
"constant", 0)
alignments = alignments.squeeze()
return self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments],
output_lengths)
def inference(self, inputs):
inputs = self.parse_input(inputs)
embedded_inputs = self.embedding(inputs).transpose(1, 2)
encoder_outputs = self.encoder.inference(embedded_inputs)
mel_outputs, gate_outputs, alignments = self.decoder.inference(
encoder_outputs)
mel_outputs_postnet = self.postnet(mel_outputs)
mel_outputs_postnet = mel_outputs + mel_outputs_postnet
outputs = self.parse_output(
[mel_outputs, mel_outputs_postnet, gate_outputs, alignments])
return outputs
|
"""
BSD 3-Clause License
Copyright (c) 2017, Prem Seetharaman
All rights reserved.
* Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from audio_processing import window_sumsquare
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=800, hop_length=200, win_length=800,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(win_length >= filter_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
|
import torch
import torch.distributed as dist
from torch.nn.modules import Module
def _flatten_dense_tensors(tensors):
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
same dense type.
Since inputs are dense, the resulting tensor will be a concatenated 1D
buffer. Element-wise operation on this buffer will be equivalent to
operating individually.
Arguments:
tensors (Iterable[Tensor]): dense tensors to flatten.
Returns:
A contiguous 1D buffer containing input tensors.
"""
if len(tensors) == 1:
return tensors[0].contiguous().view(-1)
flat = torch.cat([t.contiguous().view(-1) for t in tensors], dim=0)
return flat
def _unflatten_dense_tensors(flat, tensors):
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
same dense type, and that flat is given by _flatten_dense_tensors.
Arguments:
flat (Tensor): flattened dense tensors to unflatten.
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
unflatten flat.
Returns:
Unflattened dense tensors with sizes same as tensors and values from
flat.
"""
outputs = []
offset = 0
for tensor in tensors:
numel = tensor.numel()
outputs.append(flat.narrow(0, offset, numel).view_as(tensor))
offset += numel
return tuple(outputs)
'''
This version of DistributedDataParallel is designed to be used in conjunction with the multiproc.py
launcher included with this example. It assumes that your run is using multiprocess with 1
GPU/process, that the model is on the correct device, and that torch.set_device has been
used to set the device.
Parameters are broadcasted to the other processes on initialization of DistributedDataParallel,
and will be allreduced at the finish of the backward pass.
'''
class DistributedDataParallel(Module):
def __init__(self, module):
super(DistributedDataParallel, self).__init__()
#fallback for PyTorch 0.3
if not hasattr(dist, '_backend'):
self.warn_on_half = True
else:
self.warn_on_half = True if dist._backend == dist.dist_backend.GLOO else False
self.module = module
for p in self.module.state_dict().values():
if not torch.is_tensor(p):
continue
dist.broadcast(p, 0)
def allreduce_params():
if(self.needs_reduction):
self.needs_reduction = False
buckets = {}
for param in self.module.parameters():
if param.requires_grad and param.grad is not None:
tp = type(param.data)
if tp not in buckets:
buckets[tp] = []
buckets[tp].append(param)
if self.warn_on_half:
if torch.cuda.HalfTensor in buckets:
print("WARNING: gloo dist backend for half parameters may be extremely slow." +
" It is recommended to use the NCCL backend in this case. This currently requires" +
"PyTorch built from top of tree master.")
self.warn_on_half = False
for tp in buckets:
bucket = buckets[tp]
grads = [param.grad.data for param in bucket]
coalesced = _flatten_dense_tensors(grads)
dist.all_reduce(coalesced)
coalesced /= dist.get_world_size()
for buf, synced in zip(grads, _unflatten_dense_tensors(coalesced, grads)):
buf.copy_(synced)
for param in list(self.module.parameters()):
def allreduce_hook(*unused):
param._execution_engine.queue_callback(allreduce_params)
if param.requires_grad:
param.register_hook(allreduce_hook)
def forward(self, *inputs, **kwargs):
self.needs_reduction = True
return self.module(*inputs, **kwargs)
'''
def _sync_buffers(self):
buffers = list(self.module._all_buffers())
if len(buffers) > 0:
# cross-node buffer sync
flat_buffers = _flatten_dense_tensors(buffers)
dist.broadcast(flat_buffers, 0)
for buf, synced in zip(buffers, _unflatten_dense_tensors(flat_buffers, buffers)):
buf.copy_(synced)
def train(self, mode=True):
# Clear NCCL communicator and CUDA event cache of the default group ID,
# These cache will be recreated at the later call. This is currently a
# work-around for a potential NCCL deadlock.
if dist._backend == dist.dist_backend.NCCL:
dist._clear_group_cache()
super(DistributedDataParallel, self).train(mode)
self.module.train(mode)
'''
|
import random
import numpy as np
import torch
import torch.utils.data
import layers
from utils import load_wav_to_torch, load_filepaths_and_text
from text import text_to_sequence
class TextMelLoader(torch.utils.data.Dataset):
"""
1) loads audio,text pairs
2) normalizes text and converts them to sequences of one-hot vectors
3) computes mel-spectrograms from audio files.
"""
def __init__(self, audiopaths_and_text, hparams, shuffle=True):
self.audiopaths_and_text = load_filepaths_and_text(
audiopaths_and_text, hparams.sort_by_length)
self.text_cleaners = hparams.text_cleaners
self.max_wav_value = hparams.max_wav_value
self.sampling_rate = hparams.sampling_rate
self.load_mel_from_disk = hparams.load_mel_from_disk
self.stft = layers.TacotronSTFT(
hparams.filter_length, hparams.hop_length, hparams.win_length,
hparams.n_mel_channels, hparams.sampling_rate, hparams.mel_fmin,
hparams.mel_fmax)
random.seed(1234)
if shuffle:
random.shuffle(self.audiopaths_and_text)
def get_mel_text_pair(self, audiopath_and_text):
# separate filename and text
audiopath, text = audiopath_and_text[0], audiopath_and_text[1]
text = self.get_text(text)
mel = self.get_mel(audiopath)
return (text, mel)
def get_mel(self, filename):
if not self.load_mel_from_disk:
audio = load_wav_to_torch(filename, self.sampling_rate)
audio_norm = audio / self.max_wav_value
audio_norm = audio_norm.unsqueeze(0)
audio_norm = torch.autograd.Variable(audio_norm, requires_grad=False)
melspec = self.stft.mel_spectrogram(audio_norm)
melspec = torch.squeeze(melspec, 0)
else:
melspec = torch.from_numpy(np.load(filename))
assert melspec.size(0) == self.stft.n_mel_channels, (
'Mel dimension mismatch: given {}, expected {}'.format(
melspec.size(0), self.stft.n_mel_channels))
return melspec
def get_text(self, text):
text_norm = torch.IntTensor(text_to_sequence(text, self.text_cleaners))
return text_norm
def __getitem__(self, index):
return self.get_mel_text_pair(self.audiopaths_and_text[index])
def __len__(self):
return len(self.audiopaths_and_text)
class TextMelCollate():
""" Zero-pads model inputs and targets based on number of frames per setep
"""
def __init__(self, n_frames_per_step):
self.n_frames_per_step = n_frames_per_step
def __call__(self, batch):
"""Collate's training batch from normalized text and mel-spectrogram
PARAMS
------
batch: [text_normalized, mel_normalized]
"""
# Right zero-pad all one-hot text sequences to max input length
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([len(x[0]) for x in batch]),
dim=0, descending=True)
max_input_len = input_lengths[0]
text_padded = torch.LongTensor(len(batch), max_input_len)
text_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
text = batch[ids_sorted_decreasing[i]][0]
text_padded[i, :text.size(0)] = text
# Right zero-pad mel-spec with extra single zero vector to mark the end
num_mels = batch[0][1].size(0)
max_target_len = max([x[1].size(1) for x in batch]) + 1
if max_target_len % self.n_frames_per_step != 0:
max_target_len += self.n_frames_per_step - max_target_len % self.n_frames_per_step
assert max_target_len % self.n_frames_per_step == 0
# include mel padded and gate padded
mel_padded = torch.FloatTensor(len(batch), num_mels, max_target_len)
mel_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
mel = batch[ids_sorted_decreasing[i]][1]
mel_padded[i, :, :mel.size(1)] = mel
gate_padded[i, mel.size(1):] = 1
output_lengths[i] = mel.size(1)
return text_padded, input_lengths, mel_padded, gate_padded, \
output_lengths
|
from torch import nn
class Tacotron2Loss(nn.Module):
def __init__(self):
super(Tacotron2Loss, self).__init__()
def forward(self, model_output, targets):
mel_target, gate_target = targets[0], targets[1]
mel_target.requires_grad = False
gate_target.requires_grad = False
gate_target = gate_target.view(-1, 1)
mel_out, mel_out_postnet, gate_out, _ = model_output
gate_out = gate_out.view(-1, 1)
mel_loss = nn.MSELoss()(mel_out, mel_target) + \
nn.MSELoss()(mel_out_postnet, mel_target)
gate_loss = nn.BCEWithLogitsLoss()(gate_out, gate_target)
return mel_loss + gate_loss
|
import numpy as np
from scipy.io.wavfile import read
import torch
def get_mask_from_lengths(lengths):
max_len = torch.max(lengths)
ids = torch.arange(0, max_len).long().cuda()
mask = (ids < lengths.unsqueeze(1)).byte()
return mask
def load_wav_to_torch(full_path, sr):
sampling_rate, data = read(full_path)
assert sr == sampling_rate, "{} SR doesn't match {} on path {}".format(
sr, sampling_rate, full_path)
return torch.FloatTensor(data.astype(np.float32))
def load_filepaths_and_text(filename, sort_by_length, split="|"):
with open(filename, encoding='utf-8') as f:
filepaths_and_text = [line.strip().split(split) for line in f]
if sort_by_length:
filepaths_and_text.sort(key=lambda x: len(x[1]))
return filepaths_and_text
def to_gpu(x):
x = x.contiguous().cuda(async=True)
return torch.autograd.Variable(x)
|
import os
import time
import argparse
import math
from numpy import finfo
import torch
from distributed import DistributedDataParallel
from torch.utils.data.distributed import DistributedSampler
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from fp16_optimizer import FP16_Optimizer
from model import Tacotron2
from data_utils import TextMelLoader, TextMelCollate
from loss_function import Tacotron2Loss
from logger import Tacotron2Logger
from hparams import create_hparams
def batchnorm_to_float(module):
"""Converts batch norm modules to FP32"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
module.float()
for child in module.children():
batchnorm_to_float(child)
return module
def reduce_tensor(tensor, num_gpus):
rt = tensor.clone()
torch.distributed.all_reduce(rt, op=torch.distributed.reduce_op.SUM)
rt /= num_gpus
return rt
def init_distributed(hparams, n_gpus, rank, group_name):
assert torch.cuda.is_available(), "Distributed mode requires CUDA."
print("Initializing distributed")
# Set cuda device so everything is done on the right GPU.
torch.cuda.set_device(rank % torch.cuda.device_count())
# Initialize distributed communication
torch.distributed.init_process_group(
backend=hparams.dist_backend, init_method=hparams.dist_url,
world_size=n_gpus, rank=rank, group_name=group_name)
print("Done initializing distributed")
def prepare_dataloaders(hparams):
# Get data, data loaders and collate function ready
trainset = TextMelLoader(hparams.training_files, hparams)
valset = TextMelLoader(hparams.validation_files, hparams)
collate_fn = TextMelCollate(hparams.n_frames_per_step)
train_sampler = DistributedSampler(trainset) \
if hparams.distributed_run else None
train_loader = DataLoader(trainset, num_workers=1, shuffle=False,
sampler=train_sampler,
batch_size=hparams.batch_size, pin_memory=False,
drop_last=True, collate_fn=collate_fn)
return train_loader, valset, collate_fn
def prepare_directories_and_logger(output_directory, log_directory, rank):
if rank == 0:
if not os.path.isdir(output_directory):
os.makedirs(output_directory)
os.chmod(output_directory, 0o775)
logger = Tacotron2Logger(os.path.join(output_directory, log_directory))
else:
logger = None
return logger
def load_model(hparams):
model = Tacotron2(hparams).cuda()
if hparams.fp16_run:
model = batchnorm_to_float(model.half())
model.decoder.attention_layer.score_mask_value = float(finfo('float16').min)
if hparams.distributed_run:
model = DistributedDataParallel(model)
elif torch.cuda.device_count() > 1:
model = DataParallel(model)
return model
def warm_start_model(checkpoint_path, model):
assert os.path.isfile(checkpoint_path)
print("Warm starting model from checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
return model
def load_checkpoint(checkpoint_path, model, optimizer):
assert os.path.isfile(checkpoint_path)
print("Loading checkpoint '{}'".format(checkpoint_path))
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint_dict['state_dict'])
optimizer.load_state_dict(checkpoint_dict['optimizer'])
learning_rate = checkpoint_dict['learning_rate']
iteration = checkpoint_dict['iteration']
print("Loaded checkpoint '{}' from iteration {}" .format(
checkpoint_path, iteration))
return model, optimizer, learning_rate, iteration
def save_checkpoint(model, optimizer, learning_rate, iteration, filepath):
print("Saving model and optimizer state at iteration {} to {}".format(
iteration, filepath))
torch.save({'iteration': iteration,
'state_dict': model.state_dict(),
'optimizer': optimizer.state_dict(),
'learning_rate': learning_rate}, filepath)
def validate(model, criterion, valset, iteration, batch_size, n_gpus,
collate_fn, logger, distributed_run, rank):
"""Handles all the validation scoring and printing"""
model.eval()
with torch.no_grad():
val_sampler = DistributedSampler(valset) if distributed_run else None
val_loader = DataLoader(valset, sampler=val_sampler, num_workers=1,
shuffle=False, batch_size=batch_size,
pin_memory=False, collate_fn=collate_fn)
val_loss = 0.0
if distributed_run or torch.cuda.device_count() > 1:
batch_parser = model.module.parse_batch
else:
batch_parser = model.parse_batch
for i, batch in enumerate(val_loader):
x, y = batch_parser(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
reduced_val_loss = reduce_tensor(loss.data, n_gpus)[0] \
if distributed_run else loss.data[0]
val_loss += reduced_val_loss
val_loss = val_loss / (i + 1)
model.train()
return val_loss
def train(output_directory, log_directory, checkpoint_path, warm_start, n_gpus,
rank, group_name, hparams):
"""Training and validation logging results to tensorboard and stdout
Params
------
output_directory (string): directory to save checkpoints
log_directory (string) directory to save tensorboard logs
checkpoint_path(string): checkpoint path
n_gpus (int): number of gpus
rank (int): rank of current gpu
hparams (object): comma separated list of "name=value" pairs.
"""
if hparams.distributed_run:
init_distributed(hparams, n_gpus, rank, group_name)
torch.manual_seed(hparams.seed)
torch.cuda.manual_seed(hparams.seed)
model = load_model(hparams)
learning_rate = hparams.learning_rate
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate,
weight_decay=hparams.weight_decay)
if hparams.fp16_run:
optimizer = FP16_Optimizer(
optimizer, dynamic_loss_scale=hparams.dynamic_loss_scaling)
criterion = Tacotron2Loss()
logger = prepare_directories_and_logger(
output_directory, log_directory, rank)
train_loader, valset, collate_fn = prepare_dataloaders(hparams)
# Load checkpoint if one exists
iteration = 0
epoch_offset = 0
if checkpoint_path is not None:
if warm_start:
model = warm_start_model(checkpoint_path, model)
else:
model, optimizer, _learning_rate, iteration = load_checkpoint(
checkpoint_path, model, optimizer)
if hparams.use_saved_learning_rate:
learning_rate = _learning_rate
iteration += 1 # next iteration is iteration + 1
epoch_offset = max(0, int(iteration / len(train_loader)))
model.train()
if hparams.distributed_run or torch.cuda.device_count() > 1:
batch_parser = model.module.parse_batch
else:
batch_parser = model.parse_batch
# ================ MAIN TRAINNIG LOOP! ===================
for epoch in range(epoch_offset, hparams.epochs):
print("Epoch: {}".format(epoch))
for i, batch in enumerate(train_loader):
start = time.perf_counter()
for param_group in optimizer.param_groups:
param_group['lr'] = learning_rate
model.zero_grad()
x, y = batch_parser(batch)
y_pred = model(x)
loss = criterion(y_pred, y)
reduced_loss = reduce_tensor(loss.data, n_gpus)[0] \
if hparams.distributed_run else loss.data[0]
if hparams.fp16_run:
optimizer.backward(loss)
grad_norm = optimizer.clip_fp32_grads(hparams.grad_clip_thresh)
else:
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm(
model.parameters(), hparams.grad_clip_thresh)
optimizer.step()
overflow = optimizer.overflow if hparams.fp16_run else False
if not overflow and not math.isnan(reduced_loss) and rank == 0:
duration = time.perf_counter() - start
print("Train loss {} {:.6f} Grad Norm {:.6f} {:.2f}s/it".format(
iteration, reduced_loss, grad_norm, duration))
logger.log_training(
reduced_loss, grad_norm, learning_rate, duration, iteration)
if not overflow and (iteration % hparams.iters_per_checkpoint == 0):
reduced_val_loss = validate(
model, criterion, valset, iteration, hparams.batch_size,
n_gpus, collate_fn, logger, hparams.distributed_run, rank)
if rank == 0:
print("Validation loss {}: {:9f} ".format(
iteration, reduced_val_loss))
logger.log_validation(
reduced_val_loss, model, y, y_pred, iteration)
checkpoint_path = os.path.join(
output_directory, "checkpoint_{}".format(iteration))
save_checkpoint(model, optimizer, learning_rate, iteration,
checkpoint_path)
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-o', '--output_directory', type=str,
help='directory to save checkpoints')
parser.add_argument('-l', '--log_directory', type=str,
help='directory to save tensorboard logs')
parser.add_argument('-c', '--checkpoint_path', type=str, default=None,
required=False, help='checkpoint path')
parser.add_argument('--warm_start', action='store_true',
help='load the model only (warm start)')
parser.add_argument('--n_gpus', type=int, default=1,
required=False, help='number of gpus')
parser.add_argument('--rank', type=int, default=0,
required=False, help='rank of current gpu')
parser.add_argument('--group_name', type=str, default='group_name',
required=False, help='Distributed group name')
parser.add_argument('--hparams', type=str,
required=False, help='comma separated name=value pairs')
args = parser.parse_args()
hparams = create_hparams(args.hparams)
torch.backends.cudnn.enabled = hparams.cudnn_enabled
torch.backends.cudnn.benchmark = hparams.cudnn_benchmark
print("FP16 Run:", hparams.fp16_run)
print("Dynamic Loss Scaling:", hparams.dynamic_loss_scaling)
print("Distributed Run:", hparams.distributed_run)
print("cuDNN Enabled:", hparams.cudnn_enabled)
print("cuDNN Benchmark:", hparams.cudnn_benchmark)
train(args.output_directory, args.log_directory, args.checkpoint_path,
args.warm_start, args.n_gpus, args.rank, args.group_name, hparams)
|
import torch
from librosa.filters import mel as librosa_mel_fn
from audio_processing import dynamic_range_compression
from audio_processing import dynamic_range_decompression
from stft import STFT
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert(kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class TacotronSTFT(torch.nn.Module):
def __init__(self, filter_length=1024, hop_length=256, win_length=1024,
n_mel_channels=80, sampling_rate=22050, mel_fmin=0.0,
mel_fmax=None):
super(TacotronSTFT, self).__init__()
self.n_mel_channels = n_mel_channels
self.sampling_rate = sampling_rate
self.stft_fn = STFT(filter_length, hop_length, win_length)
mel_basis = librosa_mel_fn(
sampling_rate, filter_length, n_mel_channels, mel_fmin, mel_fmax)
mel_basis = torch.from_numpy(mel_basis).float()
self.register_buffer('mel_basis', mel_basis)
def spectral_normalize(self, magnitudes):
output = dynamic_range_compression(magnitudes)
return output
def spectral_de_normalize(self, magnitudes):
output = dynamic_range_decompression(magnitudes)
return output
def mel_spectrogram(self, y):
"""Computes mel-spectrograms from a batch of waves
PARAMS
------
y: Variable(torch.FloatTensor) with shape (B, T) in range [-1, 1]
RETURNS
-------
mel_output: torch.FloatTensor of shape (B, n_mel_channels, T)
"""
assert(torch.min(y.data) >= -1)
assert(torch.max(y.data) <= 1)
magnitudes, phases = self.stft_fn.transform(y)
magnitudes = magnitudes.data
mel_output = torch.matmul(self.mel_basis, magnitudes)
mel_output = self.spectral_normalize(mel_output)
return mel_output
|
import time
import torch
import sys
import subprocess
argslist = list(sys.argv)[1:]
num_gpus = torch.cuda.device_count()
argslist.append('--n_gpus={}'.format(num_gpus))
workers = []
job_id = time.strftime("%Y_%m_%d-%H%M%S")
argslist.append("--group_name=group_{}".format(job_id))
for i in range(num_gpus):
argslist.append('--rank={}'.format(i))
stdout = None if i == 0 else open("logs/{}_GPU_{}.log".format(job_id, i),
"w")
print(argslist)
p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout)
workers.append(p)
argslist = argslist[:-1]
for p in workers:
p.wait()
|
import torch
class LossScaler:
def __init__(self, scale=1):
self.cur_scale = scale
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
pass
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
class DynamicLossScaler:
def __init__(self,
init_scale=2**32,
scale_factor=2.,
scale_window=1000):
self.cur_scale = init_scale
self.cur_iter = 0
self.last_overflow_iter = -1
self.scale_factor = scale_factor
self.scale_window = scale_window
# `params` is a list / generator of torch.Variable
def has_overflow(self, params):
# return False
for p in params:
if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data):
return True
return False
# `x` is a torch.Tensor
def _has_inf_or_nan(x):
cpu_sum = float(x.float().sum())
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
# `overflow` is boolean indicating whether we overflowed in gradient
def update_scale(self, overflow):
if overflow:
#self.cur_scale /= self.scale_factor
self.cur_scale = max(self.cur_scale/self.scale_factor, 1)
self.last_overflow_iter = self.cur_iter
else:
if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0:
self.cur_scale *= self.scale_factor
# self.cur_scale = 1
self.cur_iter += 1
@property
def loss_scale(self):
return self.cur_scale
def scale_gradient(self, module, grad_in, grad_out):
return tuple(self.loss_scale * g for g in grad_in)
def backward(self, loss):
scaled_loss = loss*self.loss_scale
scaled_loss.backward()
##############################################################
# Example usage below here -- assuming it's in a separate file
##############################################################
if __name__ == "__main__":
import torch
from torch.autograd import Variable
from dynamic_loss_scaler import DynamicLossScaler
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random Tensors to hold inputs and outputs, and wrap them in Variables.
x = Variable(torch.randn(N, D_in), requires_grad=False)
y = Variable(torch.randn(N, D_out), requires_grad=False)
w1 = Variable(torch.randn(D_in, H), requires_grad=True)
w2 = Variable(torch.randn(H, D_out), requires_grad=True)
parameters = [w1, w2]
learning_rate = 1e-6
optimizer = torch.optim.SGD(parameters, lr=learning_rate)
loss_scaler = DynamicLossScaler()
for t in range(500):
y_pred = x.mm(w1).clamp(min=0).mm(w2)
loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale
print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale))
print('Iter {} scaled loss: {}'.format(t, loss.data[0]))
print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale))
# Run backprop
optimizer.zero_grad()
loss.backward()
# Check for overflow
has_overflow = DynamicLossScaler.has_overflow(parameters)
# If no overflow, unscale grad and update as usual
if not has_overflow:
for param in parameters:
param.grad.data.mul_(1. / loss_scaler.loss_scale)
optimizer.step()
# Otherwise, don't do anything -- ie, skip iteration
else:
print('OVERFLOW!')
# Update loss scale for next iteration
loss_scaler.update_scale(has_overflow)
|
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
|
""" from https://github.com/keithito/tacotron """
import re
from text import cleaners
from text.symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
# Append EOS token
sequence.append(_symbol_to_id['~'])
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s != '_' and s != '~'
|
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from text import cmudict
_pad = '_'
_eos = '~'
_characters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!\'(),-.:;? '
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad, _eos] + list(_characters) + _arpabet
|
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
""" from https://github.com/keithito/tacotron """
import re
valid_symbols = [
'AA', 'AA0', 'AA1', 'AA2', 'AE', 'AE0', 'AE1', 'AE2', 'AH', 'AH0', 'AH1', 'AH2',
'AO', 'AO0', 'AO1', 'AO2', 'AW', 'AW0', 'AW1', 'AW2', 'AY', 'AY0', 'AY1', 'AY2',
'B', 'CH', 'D', 'DH', 'EH', 'EH0', 'EH1', 'EH2', 'ER', 'ER0', 'ER1', 'ER2', 'EY',
'EY0', 'EY1', 'EY2', 'F', 'G', 'HH', 'IH', 'IH0', 'IH1', 'IH2', 'IY', 'IY0', 'IY1',
'IY2', 'JH', 'K', 'L', 'M', 'N', 'NG', 'OW', 'OW0', 'OW1', 'OW2', 'OY', 'OY0',
'OY1', 'OY2', 'P', 'R', 'S', 'SH', 'T', 'TH', 'UH', 'UH0', 'UH1', 'UH2', 'UW',
'UW0', 'UW1', 'UW2', 'V', 'W', 'Y', 'Z', 'ZH'
]
_valid_symbol_set = set(valid_symbols)
class CMUDict:
'''Thin wrapper around CMUDict data. http://www.speech.cs.cmu.edu/cgi-bin/cmudict'''
def __init__(self, file_or_path, keep_ambiguous=True):
if isinstance(file_or_path, str):
with open(file_or_path, encoding='latin-1') as f:
entries = _parse_cmudict(f)
else:
entries = _parse_cmudict(file_or_path)
if not keep_ambiguous:
entries = {word: pron for word, pron in entries.items() if len(pron) == 1}
self._entries = entries
def __len__(self):
return len(self._entries)
def lookup(self, word):
'''Returns list of ARPAbet pronunciations of the given word.'''
return self._entries.get(word.upper())
_alt_re = re.compile(r'\([0-9]+\)')
def _parse_cmudict(file):
cmudict = {}
for line in file:
if len(line) and (line[0] >= 'A' and line[0] <= 'Z' or line[0] == "'"):
parts = line.split(' ')
word = re.sub(_alt_re, '', parts[0])
pronunciation = _get_pronunciation(parts[1])
if pronunciation:
if word in cmudict:
cmudict[word].append(pronunciation)
else:
cmudict[word] = [pronunciation]
return cmudict
def _get_pronunciation(s):
parts = s.strip().split(' ')
for part in parts:
if part not in _valid_symbol_set:
return None
return ' '.join(parts)
|
""" from https://github.com/keithito/tacotron """
import re
from . import cleaners
from .symbols import symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
def text_to_sequence(text, cleaner_names):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(_clean_text(text, cleaner_names))
break
sequence += _symbols_to_sequence(_clean_text(m.group(1), cleaner_names))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s != '_' and s != '~'
|
""" from https://github.com/keithito/tacotron """
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
|
""" from https://github.com/keithito/tacotron """
'''
Defines the set of symbols used in text input to the model.
The default is a set of ASCII characters that works well for English or text that has been run through Unidecode. For other data, you can modify _characters. See TRAINING_DATA.md for details. '''
from . import cmudict
_pad = '_'
_punctuation = '!\'(),.:;? '
_special = '-'
_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
# Prepend "@" to ARPAbet symbols to ensure uniqueness (some are the same as uppercase letters):
_arpabet = ['@' + s for s in cmudict.valid_symbols]
# Export all symbols:
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters) + _arpabet
|
""" from https://github.com/keithito/tacotron """
'''
Cleaners are transformations that run over the input text at both training and eval time.
Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
hyperparameter. Some cleaners are English-specific. You'll typically want to use:
1. "english_cleaners" for English text
2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
the Unidecode library (https://pypi.python.org/pypi/Unidecode)
3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
the symbols in symbols.py to match your data).
'''
import re
from unidecode import unidecode
from .numbers import normalize_numbers
# Regular expression matching whitespace:
_whitespace_re = re.compile(r'\s+')
# List of (regular expression, replacement) pairs for abbreviations:
_abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
('mrs', 'misess'),
('mr', 'mister'),
('dr', 'doctor'),
('st', 'saint'),
('co', 'company'),
('jr', 'junior'),
('maj', 'major'),
('gen', 'general'),
('drs', 'doctors'),
('rev', 'reverend'),
('lt', 'lieutenant'),
('hon', 'honorable'),
('sgt', 'sergeant'),
('capt', 'captain'),
('esq', 'esquire'),
('ltd', 'limited'),
('col', 'colonel'),
('ft', 'fort'),
]]
def expand_abbreviations(text):
for regex, replacement in _abbreviations:
text = re.sub(regex, replacement, text)
return text
def expand_numbers(text):
return normalize_numbers(text)
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, ' ', text)
def convert_to_ascii(text):
return unidecode(text)
def basic_cleaners(text):
'''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
text = lowercase(text)
text = collapse_whitespace(text)
return text
def transliteration_cleaners(text):
'''Pipeline for non-English text that transliterates to ASCII.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text
def english_cleaners(text):
'''Pipeline for English text, including number and abbreviation expansion.'''
text = convert_to_ascii(text)
text = lowercase(text)
text = expand_numbers(text)
text = expand_abbreviations(text)
text = collapse_whitespace(text)
return text
|
import os
import json
import torch
import kaldi_io
import dataclasses
from .speech_transformer.transformer.decoder import Decoder
from .speech_transformer.transformer.encoder import Encoder
from .speech_transformer.transformer import Transformer
from .speech_transformer.transformer.optimizer import TransformerOptimizer
from .speech_transformer.transformer.loss import cal_performance
from .speech_transformer.utils import add_results_to_json, process_dict, IGNORE_ID
from .speech_transformer.data import build_LFR_features
from .speech_transformer.data import AudioDataLoader, AudioDataset
from torchbenchmark import DATA_PATH
@dataclasses.dataclass
class SpeechTransformerTrainConfig:
# Low Frame Rate
LFR_m = 4
LFR_n = 3
# Network Architecture - Encoder
d_input = 80
n_layers_enc = 6
n_head = 8
d_k = 64
d_v = 64
d_model = 512
d_inner = 2048
dropout = 0.1
pe_maxlen = 5000
d_word_vec = 512
n_layers_dec = 6
tgt_emb_prj_weight_sharing = 1
label_smoothing = 0.1
# minibatch
shuffle = 1
batch_frames = 15000
maxlen_in = 800
maxlen_out = 150
# don't use subprocess in dataloader
# because TorchBench is only running 1 batch
num_workers = 0
# original value
# num_workers = 4
# optimizer
k = 0.2
warmup_steps = 1
# solver configs
epochs = 5
save_folder = "output_data"
checkpoint = False
continue_from = False
model_path = 'final.pth.tar'
print_freq = 10
visdom = 0
visdom_lr = 0
visdom_epoch = 0
visdom_id = 0
cross_valid = False
# The input files. Their paths are relative to the directory of __file__
train_json = "input_data/train/data.json"
valid_json = "input_data/dev/data.json"
dict_txt = "input_data/lang_1char/train_chars.txt"
def __init__(self, prefetch=True, train_bs=32, num_train_batch=1, device='cuda'):
dir_path = os.path.join(DATA_PATH, "speech_transformer_inputs")
self.device = device
self.train_json = os.path.join(dir_path, self.train_json)
self.valid_json = os.path.join(dir_path, self.valid_json)
self.dict_txt = os.path.join(dir_path, self.dict_txt)
self.char_list, self.sos_id, self.eos_id = process_dict(self.dict_txt)
self.vocab_size = len(self.char_list)
self.tr_dataset = AudioDataset(self.train_json, train_bs,
self.maxlen_in, self.maxlen_out,
batch_frames=self.batch_frames)
self.cv_dataset = AudioDataset(self.valid_json, train_bs,
self.maxlen_in, self.maxlen_out,
batch_frames=self.batch_frames)
self.tr_loader = AudioDataLoader(self.tr_dataset, batch_size=train_bs,
num_workers=self.num_workers,
shuffle=self.shuffle,
LFR_m=self.LFR_m,
LFR_n=self.LFR_n)
self.cv_loader = AudioDataLoader(self.cv_dataset, batch_size=train_bs,
num_workers=self.num_workers,
LFR_m=self.LFR_m,
LFR_n=self.LFR_n)
self.data = {'tr_loader': self.tr_loader, 'cv_loader': self.cv_loader}
self.encoder = Encoder(self.d_input * self.LFR_m,
self.n_layers_enc,
self.n_head,
self.d_k, self.d_v,
self.d_model, self.d_inner,
dropout=self.dropout, pe_maxlen=self.pe_maxlen)
self.decoder = Decoder(self.sos_id, self.eos_id, self.vocab_size,
self.d_word_vec, self.n_layers_dec, self.n_head,
self.d_k, self.d_v, self.d_model, self.d_inner,
dropout=self.dropout,
tgt_emb_prj_weight_sharing=self.tgt_emb_prj_weight_sharing,
pe_maxlen=self.pe_maxlen)
self.tr_loss = torch.Tensor(self.epochs)
self.cv_loss = torch.Tensor(self.epochs)
self.model = Transformer(self.encoder, self.decoder)
self.optimizer = TransformerOptimizer(torch.optim.Adam(self.model.parameters(), betas=(0.9, 0.98), eps=1e-09),
self.k, self.d_model, self.warmup_steps)
self._reset()
self.data_loader = self.tr_loader if not SpeechTransformerTrainConfig.cross_valid else self.cv_loader
if prefetch:
result = []
for _batch_num, data in zip(range(num_train_batch), self.data_loader):
padded_input, input_lengths, padded_target = data
padded_input = padded_input.to(self.device)
input_lengths = input_lengths.to(self.device)
padded_target = padded_target.to(self.device)
result.append((padded_input, input_lengths, padded_target))
self.data_loader = result
def _reset(self):
self.prev_val_loss = float("inf")
self.best_val_loss = float("inf")
self.halving = False
def _run_one_epoch(self, cross_valid=False):
total_loss = 0
data_loader = self.data_loader
for i, (data) in enumerate(data_loader):
padded_input, input_lengths, padded_target = data
padded_input = padded_input.to(self.device)
input_lengths = input_lengths.to(self.device)
padded_target = padded_target.to(self.device)
pred, gold = self.model(padded_input, input_lengths, padded_target)
loss, n_correct = cal_performance(pred, gold,
smoothing=self.label_smoothing)
if not cross_valid:
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
total_loss += loss.item()
non_pad_mask = gold.ne(IGNORE_ID)
n_word = non_pad_mask.sum().item()
return total_loss / (i + 1)
def train(self, epoch = 1):
self.model.train()
tr_avg_loss = self._run_one_epoch()
# Cross validation
self.model.eval()
val_loss = self._run_one_epoch(cross_valid=SpeechTransformerTrainConfig.cross_valid)
self.tr_loss[epoch] = tr_avg_loss
self.cv_loss[epoch] = val_loss
if val_loss < self.best_val_loss:
self.best_val_loss = val_loss
# speech transformer has a TransformerOptimizer wrapping an inner Adam optimizer. This returns the TransformerOptimizer.
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
# Takes in an inner optimizer and wraps it in a TransformerOptimizer
def set_inner_optimizer(self, optimizer) -> None:
self.optimizer = TransformerOptimizer(optimizer, self.k, self.d_model, self.warmup_steps)
@dataclasses.dataclass
class SpeechTransformerEvalConfig:
beam_size = 5
nbest = 1
decode_max_len = 100
recog_word = 1
# The input files. Their paths are relative to the directory of __file__
recog_json = "input_data/test/data.json"
dict_txt = "input_data/lang_1char/train_chars.txt"
def __init__(self, traincfg, num_eval_batch=1, device='cuda'):
dir_path = os.path.join(DATA_PATH, "speech_transformer_inputs")
self.device = device
self.base_path = dir_path
self.recog_json = os.path.join(dir_path, self.recog_json)
self.dict_txt = os.path.join(dir_path, self.dict_txt)
# Construct the model
self.model, self.LFR_m, self.LFR_n = Transformer(traincfg.encoder, traincfg.decoder), traincfg.LFR_m, traincfg.LFR_n
self.char_list, self.sos_id, self.eos_id = process_dict(self.dict_txt)
assert self.model.decoder.sos_id == self.sos_id and self.model.decoder.eos_id == self.eos_id
# Read json data
with open(self.recog_json, "rb") as f:
self.js = json.load(f)['utts']
self.example_inputs = []
for idx, name in enumerate(list(self.js.keys())[:self.recog_word], 1):
feat_path = os.path.join(self.base_path, self.js[name]['input'][0]['feat'])
input = kaldi_io.read_mat(feat_path)
input = build_LFR_features(input, self.LFR_m, self.LFR_n)
input = torch.from_numpy(input).float()
input_length = torch.tensor([input.size(0)], dtype=torch.int)
input = input.to(self.device)
input_length = input_length.to(self.device)
self.example_inputs.append((input, input_length))
if len(self.example_inputs) == num_eval_batch:
break
def eval(self):
with torch.no_grad():
for input, input_length in self.example_inputs:
nbest_hyps = self.model.recognize(input, input_length, self.char_list, self)
return nbest_hyps
|
#!/usr/bin/env python
#
# The SpeechTransformer model copied from https://github.com/kaituoxu/Speech-Transformer, commit e684777.
# The model only supports CUDA and eager mode.
# The input data files in the input_data/ directory are generated with a minimized aishell data
# containing the following files in the original dataset:
# S0002.tar.gz, S0757.tar.gz, S0915.tar.gz
#
import os
import itertools
import torch
# set KALDI_ROOT to avoid spam message
os.environ["KALDI_ROOT"] = "/tmp"
from .config import SpeechTransformerTrainConfig, SpeechTransformerEvalConfig
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import SPEECH
from typing import Tuple
NUM_TRAIN_BATCH = 1
NUM_EVAL_BATCH = 1
class Model(BenchmarkModel):
task = SPEECH.RECOGNITION
# Original batch size: 32
# Source: https://github.com/kaituoxu/Speech-Transformer/blob/e6847772d6a786336e117a03c48c62ecbf3016f6/src/bin/train.py#L68
# This model does not support batch size customization
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.traincfg = SpeechTransformerTrainConfig(prefetch=True, train_bs=self.batch_size, num_train_batch=NUM_TRAIN_BATCH, device=self.device)
if test == "train":
self.traincfg.model.to(self.device)
self.traincfg.model.train()
elif test == "eval":
self.evalcfg = SpeechTransformerEvalConfig(self.traincfg, num_eval_batch=NUM_EVAL_BATCH, device=self.device)
self.evalcfg.model.to(self.device)
self.evalcfg.model.eval()
def get_module(self):
for data in self.traincfg.tr_loader:
padded_input, input_lengths, padded_target = data
if self.test == "train":
return self.traincfg.model, (padded_input.to(self.device), input_lengths.to(self.device), padded_target.to(self.device))
elif self.test == "eval":
return self.evalcfg.model, (padded_input.to(self.device), input_lengths.to(self.device), padded_target.to(self.device))
def set_module(self, new_model):
if self.test == "train":
self.traincfg.model = new_model
elif self.test == "eval":
self.evalcfg.model = new_model
def train(self):
self.traincfg.train(epoch=1)
def eval(self) -> Tuple[torch.Tensor]:
out = self.evalcfg.eval()
# only the first element of model output is a tensor
out = tuple(itertools.chain(*list(map(lambda x: x.values(), out))))
return (out[0], )
def get_optimizer(self):
return self.traincfg.get_optimizer()
def set_optimizer(self, optimizer) -> None:
return self.traincfg.set_optimizer(optimizer)
def set_raw_optimizer(self, optimizer) -> None:
return self.traincfg.set_raw_optimizer(optimizer)
|
import sys
import subprocess
from utils import s3_utils
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
s3_utils.checkout_s3_data("INPUT_TARBALLS", "speech_transformer_inputs.tar.gz", decompress=True)
pip_install_requirements()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .attention import MultiHeadAttention
from .module import PositionalEncoding, PositionwiseFeedForward
from ..utils import (IGNORE_ID, get_attn_key_pad_mask, get_attn_pad_mask,
get_non_pad_mask, get_subsequent_mask, pad_list)
class Decoder(nn.Module):
''' A decoder model with self attention mechanism. '''
def __init__(
self, sos_id, eos_id,
n_tgt_vocab, d_word_vec,
n_layers, n_head, d_k, d_v,
d_model, d_inner, dropout=0.1,
tgt_emb_prj_weight_sharing=True,
pe_maxlen=5000):
super(Decoder, self).__init__()
# parameters
self.sos_id = sos_id # Start of Sentence
self.eos_id = eos_id # End of Sentence
self.n_tgt_vocab = n_tgt_vocab
self.d_word_vec = d_word_vec
self.n_layers = n_layers
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_inner = d_inner
self.dropout = dropout
self.tgt_emb_prj_weight_sharing = tgt_emb_prj_weight_sharing
self.pe_maxlen = pe_maxlen
self.tgt_word_emb = nn.Embedding(n_tgt_vocab, d_word_vec)
self.positional_encoding = PositionalEncoding(d_model, max_len=pe_maxlen)
self.dropout = nn.Dropout(dropout)
self.layer_stack = nn.ModuleList([
DecoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
self.tgt_word_prj = nn.Linear(d_model, n_tgt_vocab, bias=False)
nn.init.xavier_normal_(self.tgt_word_prj.weight)
if tgt_emb_prj_weight_sharing:
# Share the weight matrix between target word embedding & the final logit dense layer
self.tgt_word_prj.weight = self.tgt_word_emb.weight
self.x_logit_scale = (d_model ** 0.5)
else:
self.x_logit_scale = 1.
def preprocess(self, padded_input):
"""Generate decoder input and output label from padded_input
Add <sos> to decoder input, and add <eos> to decoder output label
"""
ys = [y[y != IGNORE_ID] for y in padded_input] # parse padded ys
# prepare input and output word sequences with sos/eos IDs
eos = ys[0].new([self.eos_id])
sos = ys[0].new([self.sos_id])
ys_in = [torch.cat([sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, eos], dim=0) for y in ys]
# padding for ys with -1
# pys: utt x olen
ys_in_pad = pad_list(ys_in, self.eos_id)
ys_out_pad = pad_list(ys_out, IGNORE_ID)
assert ys_in_pad.size() == ys_out_pad.size()
return ys_in_pad, ys_out_pad
def forward(self, padded_input, encoder_padded_outputs,
encoder_input_lengths, return_attns=False):
"""
Args:
padded_input: N x To
encoder_padded_outputs: N x Ti x H
Returns:
"""
dec_slf_attn_list, dec_enc_attn_list = [], []
# Get Deocder Input and Output
ys_in_pad, ys_out_pad = self.preprocess(padded_input)
# Prepare masks
non_pad_mask = get_non_pad_mask(ys_in_pad, pad_idx=self.eos_id)
slf_attn_mask_subseq = get_subsequent_mask(ys_in_pad)
slf_attn_mask_keypad = get_attn_key_pad_mask(seq_k=ys_in_pad,
seq_q=ys_in_pad,
pad_idx=self.eos_id)
slf_attn_mask = (slf_attn_mask_keypad + slf_attn_mask_subseq).gt(0)
output_length = ys_in_pad.size(1)
dec_enc_attn_mask = get_attn_pad_mask(encoder_padded_outputs,
encoder_input_lengths,
output_length)
# Forward
dec_output = self.dropout(self.tgt_word_emb(ys_in_pad) * self.x_logit_scale +
self.positional_encoding(ys_in_pad))
for dec_layer in self.layer_stack:
dec_output, dec_slf_attn, dec_enc_attn = dec_layer(
dec_output, encoder_padded_outputs,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask,
dec_enc_attn_mask=dec_enc_attn_mask)
if return_attns:
dec_slf_attn_list += [dec_slf_attn]
dec_enc_attn_list += [dec_enc_attn]
# before softmax
seq_logit = self.tgt_word_prj(dec_output)
# Return
pred, gold = seq_logit, ys_out_pad
if return_attns:
return pred, gold, dec_slf_attn_list, dec_enc_attn_list
return pred, gold
def recognize_beam(self, encoder_outputs, char_list, args):
"""Beam search, decode one utterence now.
Args:
encoder_outputs: T x H
char_list: list of character
args: args.beam
Returns:
nbest_hyps:
"""
# search params
beam = args.beam_size
nbest = args.nbest
if args.decode_max_len == 0:
maxlen = encoder_outputs.size(0)
else:
maxlen = args.decode_max_len
encoder_outputs = encoder_outputs.unsqueeze(0)
# prepare sos
ys = torch.ones(1, 1).fill_(self.sos_id).type_as(encoder_outputs).long()
# yseq: 1xT
hyp = {'score': 0.0, 'yseq': ys}
hyps = [hyp]
ended_hyps = []
for i in range(maxlen):
hyps_best_kept = []
for hyp in hyps:
ys = hyp['yseq'] # 1 x i
# -- Prepare masks
non_pad_mask = torch.ones_like(ys).float().unsqueeze(-1) # 1xix1
slf_attn_mask = get_subsequent_mask(ys)
# -- Forward
dec_output = self.dropout(
self.tgt_word_emb(ys) * self.x_logit_scale +
self.positional_encoding(ys))
for dec_layer in self.layer_stack:
dec_output, _, _ = dec_layer(
dec_output, encoder_outputs,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask,
dec_enc_attn_mask=None)
seq_logit = self.tgt_word_prj(dec_output[:, -1])
local_scores = F.log_softmax(seq_logit, dim=1)
# topk scores
local_best_scores, local_best_ids = torch.topk(
local_scores, beam, dim=1)
for j in range(beam):
new_hyp = {}
new_hyp['score'] = hyp['score'] + local_best_scores[0, j]
new_hyp['yseq'] = torch.ones(1, (1+ys.size(1))).type_as(encoder_outputs).long()
new_hyp['yseq'][:, :ys.size(1)] = hyp['yseq']
new_hyp['yseq'][:, ys.size(1)] = int(local_best_ids[0, j])
# will be (2 x beam) hyps at most
hyps_best_kept.append(new_hyp)
hyps_best_kept = sorted(hyps_best_kept,
key=lambda x: x['score'],
reverse=True)[:beam]
# end for hyp in hyps
hyps = hyps_best_kept
# add eos in the final loop to avoid that there are no ended hyps
if i == maxlen - 1:
for hyp in hyps:
hyp['yseq'] = torch.cat([hyp['yseq'],
torch.ones(1, 1).fill_(self.eos_id).type_as(encoder_outputs).long()], dim=1)
# add ended hypothes to a final list, and removed them from current hypothes
# (this will be a probmlem, number of hyps < beam)
remained_hyps = []
for hyp in hyps:
if hyp['yseq'][0, -1] == self.eos_id:
ended_hyps.append(hyp)
else:
remained_hyps.append(hyp)
hyps = remained_hyps
if len(hyps) > 0:
# print('remeined hypothes: ' + str(len(hyps)))
pass
else:
print('no hypothesis. Finish decoding.')
break
# for hyp in hyps:
# print('hypo: ' + ''.join([char_list[int(x)]
# for x in hyp['yseq'][0, 1:]]))
# end for i in range(maxlen)
nbest_hyps = sorted(ended_hyps, key=lambda x: x['score'], reverse=True)[
:min(len(ended_hyps), nbest)]
# compitable with LAS implementation
for hyp in nbest_hyps:
hyp['yseq'] = hyp['yseq'][0].cpu().numpy().tolist()
return nbest_hyps
class DecoderLayer(nn.Module):
''' Compose with three layers '''
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(DecoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.enc_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)
def forward(self, dec_input, enc_output, non_pad_mask=None, slf_attn_mask=None, dec_enc_attn_mask=None):
dec_output, dec_slf_attn = self.slf_attn(
dec_input, dec_input, dec_input, mask=slf_attn_mask)
dec_output *= non_pad_mask
dec_output, dec_enc_attn = self.enc_attn(
dec_output, enc_output, enc_output, mask=dec_enc_attn_mask)
dec_output *= non_pad_mask
dec_output = self.pos_ffn(dec_output)
dec_output *= non_pad_mask
return dec_output, dec_slf_attn, dec_enc_attn
|
import numpy as np
import torch
import torch.nn as nn
class MultiHeadAttention(nn.Module):
''' Multi-Head Attention module '''
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super().__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = nn.Linear(d_model, n_head * d_k)
self.w_ks = nn.Linear(d_model, n_head * d_k)
self.w_vs = nn.Linear(d_model, n_head * d_v)
nn.init.normal_(self.w_qs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_ks.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_k)))
nn.init.normal_(self.w_vs.weight, mean=0, std=np.sqrt(2.0 / (d_model + d_v)))
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5),
attn_dropout=dropout)
self.layer_norm = nn.LayerNorm(d_model)
self.fc = nn.Linear(n_head * d_v, d_model)
nn.init.xavier_normal_(self.fc.weight)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
if mask is not None:
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class ScaledDotProductAttention(nn.Module):
''' Scaled Dot-Product Attention '''
def __init__(self, temperature, attn_dropout=0.1):
super().__init__()
self.temperature = temperature
self.dropout = nn.Dropout(attn_dropout)
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask.bool(), -np.inf)
attn = self.softmax(attn)
attn = self.dropout(attn)
output = torch.bmm(attn, v)
return output, attn
|
from .transformer import *
|
import torch.nn as nn
from .attention import MultiHeadAttention
from .module import PositionalEncoding, PositionwiseFeedForward
from ..utils import get_non_pad_mask, get_attn_pad_mask
class Encoder(nn.Module):
"""Encoder of Transformer including self-attention and feed forward.
"""
def __init__(self, d_input, n_layers, n_head, d_k, d_v,
d_model, d_inner, dropout=0.1, pe_maxlen=5000):
super(Encoder, self).__init__()
# parameters
self.d_input = d_input
self.n_layers = n_layers
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.d_model = d_model
self.d_inner = d_inner
self.dropout_rate = dropout
self.pe_maxlen = pe_maxlen
# use linear transformation with layer norm to replace input embedding
self.linear_in = nn.Linear(d_input, d_model)
self.layer_norm_in = nn.LayerNorm(d_model)
self.positional_encoding = PositionalEncoding(d_model, max_len=pe_maxlen)
self.dropout = nn.Dropout(dropout)
self.layer_stack = nn.ModuleList([
EncoderLayer(d_model, d_inner, n_head, d_k, d_v, dropout=dropout)
for _ in range(n_layers)])
def forward(self, padded_input, input_lengths, return_attns=False):
"""
Args:
padded_input: N x T x D
input_lengths: N
Returns:
enc_output: N x T x H
"""
enc_slf_attn_list = []
# Prepare masks
non_pad_mask = get_non_pad_mask(padded_input, input_lengths=input_lengths)
length = padded_input.size(1)
slf_attn_mask = get_attn_pad_mask(padded_input, input_lengths, length)
# Forward
enc_output = self.dropout(
self.layer_norm_in(self.linear_in(padded_input)) +
self.positional_encoding(padded_input))
for enc_layer in self.layer_stack:
enc_output, enc_slf_attn = enc_layer(
enc_output,
non_pad_mask=non_pad_mask,
slf_attn_mask=slf_attn_mask)
if return_attns:
enc_slf_attn_list += [enc_slf_attn]
if return_attns:
return enc_output, enc_slf_attn_list
return enc_output,
class EncoderLayer(nn.Module):
"""Compose with two sub-layers.
1. A multi-head self-attention mechanism
2. A simple, position-wise fully connected feed-forward network.
"""
def __init__(self, d_model, d_inner, n_head, d_k, d_v, dropout=0.1):
super(EncoderLayer, self).__init__()
self.slf_attn = MultiHeadAttention(
n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, dropout=dropout)
def forward(self, enc_input, non_pad_mask=None, slf_attn_mask=None):
enc_output, enc_slf_attn = self.slf_attn(
enc_input, enc_input, enc_input, mask=slf_attn_mask)
enc_output *= non_pad_mask
enc_output = self.pos_ffn(enc_output)
enc_output *= non_pad_mask
return enc_output, enc_slf_attn
|
import torch
import torch.nn.functional as F
from ..utils import IGNORE_ID
def cal_performance(pred, gold, smoothing=0.0):
"""Calculate cross entropy loss, apply label smoothing if needed.
Args:
pred: N x T x C, score before softmax
gold: N x T
"""
pred = pred.view(-1, pred.size(2))
gold = gold.contiguous().view(-1)
loss = cal_loss(pred, gold, smoothing)
pred = pred.max(1)[1]
non_pad_mask = gold.ne(IGNORE_ID)
n_correct = pred.eq(gold)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
return loss, n_correct
def cal_loss(pred, gold, smoothing=0.0):
"""Calculate cross entropy loss, apply label smoothing if needed.
"""
if smoothing > 0.0:
eps = smoothing
n_class = pred.size(1)
# Generate one-hot matrix: N x C.
# Only label position is 1 and all other positions are 0
# gold include -1 value (IGNORE_ID) and this will lead to assert error
gold_for_scatter = gold.ne(IGNORE_ID).long() * gold
one_hot = torch.zeros_like(pred).scatter(1, gold_for_scatter.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / n_class
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(IGNORE_ID)
n_word = non_pad_mask.sum().item()
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() / n_word
else:
loss = F.cross_entropy(pred, gold,
ignore_index=IGNORE_ID,
reduction='elementwise_mean')
return loss
|
import torch
import torch.nn as nn
from .decoder import Decoder
from .encoder import Encoder
class Transformer(nn.Module):
"""An encoder-decoder framework only includes attention.
"""
def __init__(self, encoder, decoder):
super(Transformer, self).__init__()
self.encoder = encoder
self.decoder = decoder
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, padded_input, input_lengths, padded_target):
"""
Args:
padded_input: N x Ti x D
input_lengths: N
padded_targets: N x To
"""
encoder_padded_outputs, *_ = self.encoder(padded_input, input_lengths)
# pred is score before softmax
pred, gold, *_ = self.decoder(padded_target, encoder_padded_outputs,
input_lengths)
return pred, gold
def recognize(self, input, input_length, char_list, args):
"""Sequence-to-Sequence beam search, decode one utterence now.
Args:
input: T x D
char_list: list of characters
args: args.beam
Returns:
nbest_hyps:
"""
encoder_outputs, *_ = self.encoder(input.unsqueeze(0), input_length)
nbest_hyps = self.decoder.recognize_beam(encoder_outputs[0],
char_list,
args)
return nbest_hyps
@classmethod
def load_model(cls, path):
# Load to CPU
package = torch.load(path, map_location=lambda storage, loc: storage)
model, LFR_m, LFR_n = cls.load_model_from_package(package)
return model, LFR_m, LFR_n
@classmethod
def load_model_from_package(cls, package):
encoder = Encoder(package['d_input'],
package['n_layers_enc'],
package['n_head'],
package['d_k'],
package['d_v'],
package['d_model'],
package['d_inner'],
dropout=package['dropout'],
pe_maxlen=package['pe_maxlen'])
decoder = Decoder(package['sos_id'],
package['eos_id'],
package['vocab_size'],
package['d_word_vec'],
package['n_layers_dec'],
package['n_head'],
package['d_k'],
package['d_v'],
package['d_model'],
package['d_inner'],
dropout=package['dropout'],
tgt_emb_prj_weight_sharing=package['tgt_emb_prj_weight_sharing'],
pe_maxlen=package['pe_maxlen'],
)
model = cls(encoder, decoder)
model.load_state_dict(package['state_dict'])
LFR_m, LFR_n = package['LFR_m'], package['LFR_n']
return model, LFR_m, LFR_n
@staticmethod
def serialize(model, optimizer, epoch, LFR_m, LFR_n, tr_loss=None, cv_loss=None):
package = {
# Low Frame Rate Feature
'LFR_m': LFR_m,
'LFR_n': LFR_n,
# encoder
'd_input': model.encoder.d_input,
'n_layers_enc': model.encoder.n_layers,
'n_head': model.encoder.n_head,
'd_k': model.encoder.d_k,
'd_v': model.encoder.d_v,
'd_model': model.encoder.d_model,
'd_inner': model.encoder.d_inner,
'dropout': model.encoder.dropout_rate,
'pe_maxlen': model.encoder.pe_maxlen,
# decoder
'sos_id': model.decoder.sos_id,
'eos_id': model.decoder.eos_id,
'vocab_size': model.decoder.n_tgt_vocab,
'd_word_vec': model.decoder.d_word_vec,
'n_layers_dec': model.decoder.n_layers,
'tgt_emb_prj_weight_sharing': model.decoder.tgt_emb_prj_weight_sharing,
# state
'state_dict': model.state_dict(),
'optim_dict': optimizer.state_dict(),
'epoch': epoch
}
if tr_loss is not None:
package['tr_loss'] = tr_loss
package['cv_loss'] = cv_loss
return package
|
"""A wrapper class for optimizer"""
import torch
class TransformerOptimizer:
"""A simple wrapper class for learning rate scheduling"""
def __init__(self, optimizer, k, d_model, warmup_steps=4000):
self.optimizer = optimizer
self.k = k
self.init_lr = d_model ** (-0.5)
self.warmup_steps = warmup_steps
self.step_num = 0
self.visdom_lr = None
def zero_grad(self):
self.optimizer.zero_grad()
def step(self):
self._update_lr()
self._visdom()
self.optimizer.step()
def _update_lr(self):
self.step_num += 1
lr = self.k * self.init_lr * min(self.step_num ** (-0.5),
self.step_num * (self.warmup_steps ** (-1.5)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def state_dict(self):
return self.optimizer.state_dict()
def set_k(self, k):
self.k = k
def set_visdom(self, visdom_lr, vis):
self.visdom_lr = visdom_lr # Turn on/off visdom of learning rate
self.vis = vis # visdom enviroment
self.vis_opts = dict(title='Learning Rate',
ylabel='Leanring Rate', xlabel='step')
self.vis_window = None
self.x_axis = torch.LongTensor()
self.y_axis = torch.FloatTensor()
def _visdom(self):
if self.visdom_lr is not None:
self.x_axis = torch.cat(
[self.x_axis, torch.LongTensor([self.step_num])])
self.y_axis = torch.cat(
[self.y_axis, torch.FloatTensor([self.optimizer.param_groups[0]['lr']])])
if self.vis_window is None:
self.vis_window = self.vis.line(X=self.x_axis, Y=self.y_axis,
opts=self.vis_opts)
else:
self.vis.line(X=self.x_axis, Y=self.y_axis, win=self.vis_window,
update='replace')
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class PositionalEncoding(nn.Module):
"""Implement the positional encoding (PE) function.
PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))
PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))
"""
def __init__(self, d_model, max_len=5000):
super(PositionalEncoding, self).__init__()
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model, requires_grad=False)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, input):
"""
Args:
input: N x T x D
"""
length = input.size(1)
return self.pe[:, :length]
class PositionwiseFeedForward(nn.Module):
"""Implements position-wise feedforward sublayer.
FFN(x) = max(0, xW1 + b1)W2 + b2
"""
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, x):
residual = x
output = self.w_2(F.relu(self.w_1(x)))
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
# Another implementation
class PositionwiseFeedForwardUseConv(nn.Module):
"""A two-feed-forward-layer module"""
def __init__(self, d_in, d_hid, dropout=0.1):
super(PositionwiseFeedForwardUseConv, self).__init__()
self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise
self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
|
#!/usr/bin/env python
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import json
import argparse
import logging
from utils import process_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('json', type=str, help='json files')
parser.add_argument('dict', type=str, help='dict')
parser.add_argument('ref', type=str, help='ref')
parser.add_argument('hyp', type=str, help='hyp')
args = parser.parse_args()
# logging info
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.info("reading %s", args.json)
with open(args.json, 'r') as f:
j = json.load(f)
logging.info("reading %s", args.dict)
char_list, sos_id, eos_id = process_dict(args.dict)
# with open(args.dict, 'r') as f:
# dictionary = f.readlines()
# char_list = [unicode(entry.split(' ')[0], 'utf_8') for entry in dictionary]
# char_list.insert(0, '<blank>')
# char_list.append('<eos>')
# print([x.encode('utf-8') for x in char_list])
logging.info("writing hyp trn to %s", args.hyp)
logging.info("writing ref trn to %s", args.ref)
h = open(args.hyp, 'w')
r = open(args.ref, 'w')
for x in j['utts']:
seq = [char_list[int(i)] for i in j['utts'][x]
['output'][0]['rec_tokenid'].split()]
h.write(" ".join(seq).replace('<eos>', '')),
h.write(
" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
seq = [char_list[int(i)] for i in j['utts'][x]
['output'][0]['tokenid'].split()]
r.write(" ".join(seq).replace('<eos>', '')),
r.write(
" (" + j['utts'][x]['utt2spk'].replace('-', '_') + "-" + x + ")\n")
|
#!/usr/bin/env python2
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import sys
import json
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--key', '-k', type=str,
help='key')
args = parser.parse_args()
l = {}
line = sys.stdin.readline()
while line:
x = unicode(line, 'utf_8').rstrip().split()
v = {args.key: ' '.join(x[1:]).encode('utf_8')}
l[x[0].encode('utf_8')] = v
line = sys.stdin.readline()
all_l = {'utts': l}
# ensure "ensure_ascii=False", which is a bug
jsonstring = json.dumps(all_l, indent=4, ensure_ascii=False)
print(jsonstring)
|
#!/usr/bin/env python
# Apache 2.0
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--exclude', '-v', dest='exclude',
action='store_true', help='exclude filter words')
parser.add_argument('filt', type=str, help='filter list')
parser.add_argument('infile', type=str, help='input file')
args = parser.parse_args()
vocab = set()
with open(args.filt) as vocabfile:
for line in vocabfile:
vocab.add(line.strip())
with open(args.infile) as textfile:
for line in textfile:
if args.exclude:
print(" ".join(
map(lambda word: word if not word in vocab else '', line.strip().split())))
# else:
# print(" ".join(map(lambda word: word if word in vocab else '<UNK>', unicode(line, 'utf_8').strip().split())).encode('utf_8'))
|
#!/usr/bin/env python2
# encoding: utf-8
# Copyright 2017 Johns Hopkins University (Shinji Watanabe)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import argparse
import json
import logging
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('jsons', type=str, nargs='+',
help='json files')
parser.add_argument('--multi', '-m', type=int,
help='Test the json file for multiple input/output', default=0)
parser.add_argument('--verbose', '-V', default=0, type=int,
help='Verbose option')
args = parser.parse_args()
# logging info
if args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
# make intersection set for utterance keys
js = []
intersec_ks = []
for x in args.jsons:
with open(x, 'r') as f:
j = json.load(f)
ks = j['utts'].keys()
logging.info(x + ': has ' + str(len(ks)) + ' utterances')
if len(intersec_ks) > 0:
intersec_ks = intersec_ks.intersection(set(ks))
else:
intersec_ks = set(ks)
js.append(j)
logging.info('new json has ' + str(len(intersec_ks)) + ' utterances')
old_dic = dict()
for k in intersec_ks:
v = js[0]['utts'][k]
for j in js[1:]:
v.update(j['utts'][k])
old_dic[k] = v
new_dic = dict()
for id in old_dic:
dic = old_dic[id]
in_dic = {}
if dic.has_key(unicode('idim', 'utf-8')):
in_dic[unicode('shape', 'utf-8')] = (int(dic[unicode('ilen', 'utf-8')]), int(dic[unicode('idim', 'utf-8')]))
in_dic[unicode('name', 'utf-8')] = unicode('input1', 'utf-8')
in_dic[unicode('feat', 'utf-8')] = dic[unicode('feat', 'utf-8')]
out_dic = {}
out_dic[unicode('name', 'utf-8')] = unicode('target1', 'utf-8')
out_dic[unicode('shape', 'utf-8')] = (int(dic[unicode('olen', 'utf-8')]), int(dic[unicode('odim', 'utf-8')]))
out_dic[unicode('text', 'utf-8')] = dic[unicode('text', 'utf-8')]
out_dic[unicode('token', 'utf-8')] = dic[unicode('token', 'utf-8')]
out_dic[unicode('tokenid', 'utf-8')] = dic[unicode('tokenid', 'utf-8')]
new_dic[id] = {unicode('input', 'utf-8'):[in_dic], unicode('output', 'utf-8'):[out_dic],
unicode('utt2spk', 'utf-8'):dic[unicode('utt2spk', 'utf-8')]}
# ensure "ensure_ascii=False", which is a bug
jsonstring = json.dumps({'utts': new_dic}, indent=4, ensure_ascii=False, sort_keys=True).encode('utf_8')
print(jsonstring)
|
from .utils import *
|
#!/usr/bin/env python3
IGNORE_ID = -1
def pad_list(xs, pad_value):
# From: espnet/src/nets/e2e_asr_th.py: pad_list()
n_batch = len(xs)
max_len = max(x.size(0) for x in xs)
pad = xs[0].new(n_batch, max_len, * xs[0].size()[1:]).fill_(pad_value)
for i in range(n_batch):
pad[i, :xs[i].size(0)] = xs[i]
return pad
def process_dict(dict_path):
with open(dict_path, 'rb') as f:
dictionary = f.readlines()
char_list = [entry.decode('utf-8').split(' ')[0]
for entry in dictionary]
sos_id = char_list.index('<sos>')
eos_id = char_list.index('<eos>')
return char_list, sos_id, eos_id
if __name__ == "__main__":
import sys
path = sys.argv[1]
char_list, sos_id, eos_id = process_dict(path)
print(char_list, sos_id, eos_id)
# * ------------------ recognition related ------------------ *
def parse_hypothesis(hyp, char_list):
"""Function to parse hypothesis
:param list hyp: recognition hypothesis
:param list char_list: list of characters
:return: recognition text strinig
:return: recognition token strinig
:return: recognition tokenid string
"""
# remove sos and get results
tokenid_as_list = list(map(int, hyp['yseq'][1:]))
token_as_list = [char_list[idx] for idx in tokenid_as_list]
score = float(hyp['score'])
# convert to string
tokenid = " ".join([str(idx) for idx in tokenid_as_list])
token = " ".join(token_as_list)
text = "".join(token_as_list).replace('<space>', ' ')
return text, token, tokenid, score
def add_results_to_json(js, nbest_hyps, char_list):
"""Function to add N-best results to json
:param dict js: groundtruth utterance dict
:param list nbest_hyps: list of hypothesis
:param list char_list: list of characters
:return: N-best results added utterance dict
"""
# copy old json info
new_js = dict()
new_js['utt2spk'] = js['utt2spk']
new_js['output'] = []
for n, hyp in enumerate(nbest_hyps, 1):
# parse hypothesis
rec_text, rec_token, rec_tokenid, score = parse_hypothesis(
hyp, char_list)
# copy ground-truth
out_dic = dict(js['output'][0].items())
# update name
out_dic['name'] += '[%d]' % n
# add recognition results
out_dic['rec_text'] = rec_text
out_dic['rec_token'] = rec_token
out_dic['rec_tokenid'] = rec_tokenid
out_dic['score'] = score
# add to list of N-best result dicts
new_js['output'].append(out_dic)
# show 1-best result
if n == 1:
print('groundtruth: %s' % out_dic['text'])
print('prediction : %s' % out_dic['rec_text'])
return new_js
# -- Transformer Related --
import torch
def get_non_pad_mask(padded_input, input_lengths=None, pad_idx=None):
"""padding position is set to 0, either use input_lengths or pad_idx
"""
assert input_lengths is not None or pad_idx is not None
if input_lengths is not None:
# padded_input: N x T x ..
N = padded_input.size(0)
non_pad_mask = padded_input.new_ones(padded_input.size()[:-1]) # N x T
for i in range(N):
non_pad_mask[i, input_lengths[i]:] = 0
if pad_idx is not None:
# padded_input: N x T
assert padded_input.dim() == 2
non_pad_mask = padded_input.ne(pad_idx).float()
# unsqueeze(-1) for broadcast
return non_pad_mask.unsqueeze(-1)
def get_subsequent_mask(seq):
''' For masking out the subsequent info. '''
sz_b, len_s = seq.size()
subsequent_mask = torch.triu(
torch.ones((len_s, len_s), device=seq.device, dtype=torch.uint8), diagonal=1)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(sz_b, -1, -1) # b x ls x ls
return subsequent_mask
def get_attn_key_pad_mask(seq_k, seq_q, pad_idx):
''' For masking out the padding part of key sequence. '''
# Expand to fit the shape of key query attention matrix.
len_q = seq_q.size(1)
padding_mask = seq_k.eq(pad_idx)
padding_mask = padding_mask.unsqueeze(1).expand(-1, len_q, -1) # b x lq x lk
return padding_mask
def get_attn_pad_mask(padded_input, input_lengths, expand_length):
"""mask position is set to 1"""
# N x Ti x 1
non_pad_mask = get_non_pad_mask(padded_input, input_lengths=input_lengths)
# N x Ti, lt(1) like not operation
pad_mask = non_pad_mask.squeeze(-1).lt(1)
attn_mask = pad_mask.unsqueeze(1).expand(-1, expand_length, -1)
return attn_mask
|
from .data import *
|
"""
Logic:
1. AudioDataLoader generate a minibatch from AudioDataset, the size of this
minibatch is AudioDataLoader's batchsize. For now, we always set
AudioDataLoader's batchsize as 1. The real minibatch size we care about is
set in AudioDataset's __init__(...). So actually, we generate the
information of one minibatch in AudioDataset.
2. After AudioDataLoader getting one minibatch from AudioDataset,
AudioDataLoader calls its collate_fn(batch) to process this minibatch.
"""
import json
from pathlib import Path
import numpy as np
import torch
import torch.utils.data as data
import kaldi_io
from ..utils import IGNORE_ID, pad_list
class AudioDataset(data.Dataset):
"""
TODO: this is a little HACK now, put batch_size here now.
remove batch_size to dataloader later.
"""
def __init__(self, data_json_path, batch_size, max_length_in, max_length_out,
num_batches=0, batch_frames=0):
# From: espnet/src/asr/asr_utils.py: make_batchset()
"""
Args:
data: espnet/espnet json format file.
num_batches: for debug. only use num_batches minibatch but not all.
"""
super(AudioDataset, self).__init__()
with open(data_json_path, 'rb') as f:
data = json.load(f)['utts']
# sort it by input lengths (long to short)
sorted_data = sorted(data.items(), key=lambda data: int(
data[1]['input'][0]['shape'][0]), reverse=True)
# change batchsize depending on the input and output length
minibatch = []
# Method 1: Generate minibatch based on batch_size
# i.e. each batch contains #batch_size utterances
if batch_frames == 0:
start = 0
while True:
ilen = int(sorted_data[start][1]['input'][0]['shape'][0])
olen = int(sorted_data[start][1]['output'][0]['shape'][0])
factor = max(int(ilen / max_length_in), int(olen / max_length_out))
# if ilen = 1000 and max_length_in = 800
# then b = batchsize / 2
# and max(1, .) avoids batchsize = 0
b = max(1, int(batch_size / (1 + factor)))
end = min(len(sorted_data), start + b)
minibatch.append(sorted_data[start:end])
# DEBUG
# total= 0
# for i in range(start, end):
# total += int(sorted_data[i][1]['input'][0]['shape'][0])
# print(total, end-start)
if end == len(sorted_data):
break
start = end
# Method 2: Generate minibatch based on batch_frames
# i.e. each batch contains approximately #batch_frames frames
else: # batch_frames > 0
start = 0
while True:
total_frames = 0
end = start
while total_frames < batch_frames and end < len(sorted_data):
ilen = int(sorted_data[end][1]['input'][0]['shape'][0])
total_frames += ilen
end += 1
# print(total_frames, end-start)
minibatch.append(sorted_data[start:end])
if end == len(sorted_data):
break
start = end
if num_batches > 0:
minibatch = minibatch[:num_batches]
self.minibatch = minibatch
def __getitem__(self, index):
return self.minibatch[index]
def __len__(self):
return len(self.minibatch)
class AudioDataLoader(data.DataLoader):
"""
NOTE: just use batchsize=1 here, so drop_last=True makes no sense here.
"""
def __init__(self, *args, LFR_m=1, LFR_n=1, **kwargs):
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = LFRCollate(LFR_m=LFR_m, LFR_n=LFR_n)
class LFRCollate:
"""Build this wrapper to pass arguments(LFR_m, LFR_n) to _collate_fn"""
def __init__(self, LFR_m=1, LFR_n=1):
self.LFR_m = LFR_m
self.LFR_n = LFR_n
def __call__(self, batch):
return _collate_fn(batch, LFR_m=self.LFR_m, LFR_n=self.LFR_n)
# From: espnet/src/asr/asr_pytorch.py: CustomConverter:__call__
def _collate_fn(batch, LFR_m=1, LFR_n=1):
"""
Args:
batch: list, len(batch) = 1. See AudioDataset.__getitem__()
Returns:
xs_pad: N x Ti x D, torch.Tensor
ilens : N, torch.Tentor
ys_pad: N x To, torch.Tensor
"""
# batch should be located in list
assert len(batch) == 1
batch = load_inputs_and_targets(batch[0], LFR_m=LFR_m, LFR_n=LFR_n)
xs, ys = batch
# TODO: perform subsamping
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
xs_pad = pad_list([torch.from_numpy(x).float() for x in xs], 0)
ilens = torch.from_numpy(ilens)
ys_pad = pad_list([torch.from_numpy(y).long() for y in ys], IGNORE_ID)
return xs_pad, ilens, ys_pad
# ------------------------------ utils ------------------------------------
def load_inputs_and_targets(batch, LFR_m=1, LFR_n=1):
# From: espnet/src/asr/asr_utils.py: load_inputs_and_targets
# load acoustic features and target sequence of token ids
# for b in batch:
# print(b[1]['input'][0]['feat'])
# TorchBench: Patch the input data with current file directory
# Input data path: TORCHBENCH_DATA_ROOT/speech_transformer_inputs/
from torchbenchmark import DATA_PATH
TORCHBENCH_DATA_ROOT = Path(DATA_PATH).joinpath("speech_transformer_inputs")
xs = [kaldi_io.read_mat(str(TORCHBENCH_DATA_ROOT.joinpath(b[1]['input'][0]['feat']).resolve())) for b in batch]
ys = [b[1]['output'][0]['tokenid'].split() for b in batch]
if LFR_m != 1 or LFR_n != 1:
# xs = build_LFR_features(xs, LFR_m, LFR_n)
xs = [build_LFR_features(x, LFR_m, LFR_n) for x in xs]
# get index of non-zero length samples
nonzero_idx = filter(lambda i: len(ys[i]) > 0, range(len(xs)))
# sort in input lengths
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
if len(nonzero_sorted_idx) != len(xs):
print("warning: Target sequences include empty tokenid")
# remove zero-lenght samples
xs = [xs[i] for i in nonzero_sorted_idx]
ys = [np.fromiter(map(int, ys[i]), dtype=np.int64)
for i in nonzero_sorted_idx]
return xs, ys
def build_LFR_features(inputs, m, n):
"""
Actually, this implements stacking frames and skipping frames.
if m = 1 and n = 1, just return the origin features.
if m = 1 and n > 1, it works like skipping.
if m > 1 and n = 1, it works like stacking but only support right frames.
if m > 1 and n > 1, it works like LFR.
Args:
inputs_batch: inputs is T x D np.ndarray
m: number of frames to stack
n: number of frames to skip
"""
# LFR_inputs_batch = []
# for inputs in inputs_batch:
LFR_inputs = []
T = inputs.shape[0]
T_lfr = int(np.ceil(T / n))
for i in range(T_lfr):
if m <= T - i * n:
LFR_inputs.append(np.hstack(inputs[i*n:i*n+m]))
else: # process last LFR frame
num_padding = m - (T - i * n)
frame = np.hstack(inputs[i*n:])
for _ in range(num_padding):
frame = np.hstack((frame, inputs[-1]))
LFR_inputs.append(frame)
return np.vstack(LFR_inputs)
# LFR_inputs_batch.append(np.vstack(LFR_inputs))
# return LFR_inputs_batch
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import List
import torch
from .tokenizer import Tokenizer
from .model import Transformer
class LLaMA:
def __init__(self, model: Transformer, tokenizer: Tokenizer):
self.model = model
self.tokenizer = tokenizer
def generate(
self,
prompts: List[str],
max_gen_len: int,
temperature: float = 0.8,
top_p: float = 0.95,
) -> List[str]:
bsz = len(prompts)
params = self.model.params
assert bsz <= params.max_batch_size, (bsz, params.max_batch_size)
prompt_tokens = [self.tokenizer.encode(x, bos=True, eos=False) for x in prompts]
min_prompt_size = min([len(t) for t in prompt_tokens])
max_prompt_size = max([len(t) for t in prompt_tokens])
total_len = min(params.max_seq_len, max_gen_len + max_prompt_size)
tokens = torch.full((bsz, total_len), self.tokenizer.pad_id).cuda().long()
for k, t in enumerate(prompt_tokens):
tokens[k, : len(t)] = torch.tensor(t).long()
input_text_mask = tokens != self.tokenizer.pad_id
start_pos = min_prompt_size
prev_pos = 0
for cur_pos in range(start_pos, total_len):
logits = self.model.forward(tokens[:, prev_pos:cur_pos], prev_pos)
if temperature > 0:
probs = torch.softmax(logits / temperature, dim=-1)
next_token = sample_top_p(probs, top_p)
else:
next_token = torch.argmax(logits, dim=-1)
next_token = next_token.reshape(-1)
# only replace token if prompt has already been generated
next_token = torch.where(
input_text_mask[:, cur_pos], tokens[:, cur_pos], next_token
)
tokens[:, cur_pos] = next_token
prev_pos = cur_pos
decoded = []
for i, t in enumerate(tokens.tolist()):
# cut to max gen len
t = t[: len(prompt_tokens[i]) + max_gen_len]
# cut to eos tok if any
try:
t = t[: t.index(self.tokenizer.eos_id)]
except ValueError:
pass
decoded.append(self.tokenizer.decode(t))
return decoded
def sample_top_p(probs, p):
probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True)
probs_sum = torch.cumsum(probs_sort, dim=-1)
mask = probs_sum - probs_sort > p
probs_sort[mask] = 0.0
probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True))
next_token = torch.multinomial(probs_sort, num_samples=1)
next_token = torch.gather(probs_idx, -1, next_token)
return next_token |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import torch
from .model import ModelArgs, Transformer
import torch
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args)
self.model_args = ModelArgs(vocab_size=32000,device=self.device)
torch.set_default_device(self.device)
self.model = Transformer(self.model_args).to(self.device)
self.seq_len = 32
self.example_inputs = (torch.ones([self.batch_size, self.seq_len], dtype=torch.int).to(self.device), 1)
def get_module(self):
return self.model, self.example_inputs
def train(self):
error_msg = """
As of March 6, 2023
The weights for this model are not publicly available and require a valid research reason to use
The publicly available github repo is inference only
https://github.com/facebookresearch/llama
"""
return NotImplementedError(error_msg)
def eval(self):
self.model.eval()
with torch.no_grad():
out=self.model(*self.example_inputs)
return (out,)
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from typing import Optional, Tuple
from dataclasses import dataclass
import math
import torch
from torch import nn
import torch.nn.functional as F
@dataclass
class ModelArgs:
dim: int = 512
n_layers: int = 8
n_heads: int = 8
vocab_size: int = 32000 # this is the max vocab size supported by sentencepiece
multiple_of: int = 256 # make SwiGLU hidden layer size multiple of large power of 2
norm_eps: float = 1e-5
max_batch_size: int = 32 # From the paper they use a batch size of 4M for training
max_seq_len: int = 1024
device: Optional[str] = None
class RMSNorm(torch.nn.Module):
def __init__(self, dim: int, eps: float = 1e-6):
super().__init__()
self.eps = eps
self.weight = nn.Parameter(torch.ones(dim))
def _norm(self, x):
return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
def forward(self, x):
output = self._norm(x.float()).type_as(x)
return output * self.weight
def precompute_freqs_cis(dim: int, end: int, theta: float = 10000.0):
freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
t = torch.arange(end, device=freqs.device) # type: ignore
freqs = torch.outer(t, freqs).float() # type: ignore
freqs_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
return freqs_cis
def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
ndim = x.ndim
assert 0 <= 1 < ndim
assert freqs_cis.shape == (x.shape[1], x.shape[-1])
shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
return freqs_cis.view(*shape)
def apply_rotary_emb(
xq: torch.Tensor,
xk: torch.Tensor,
freqs_cis: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
return xq_out.type_as(xq), xk_out.type_as(xk)
class Attention(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.n_local_heads = args.n_heads # Basically we just assume world size of 1 // fs_init.get_model_parallel_world_size()
self.head_dim = args.dim // args.n_heads
self.device = args.device
self.wq = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wk = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wv = nn.Linear(
args.dim,
args.n_heads * self.head_dim,
bias=False,
)
self.wo = nn.Linear(
args.n_heads * self.head_dim,
args.dim,
bias=False,
)
self.cache_k = torch.zeros(
(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.head_dim),device=self.device
)
self.cache_v = torch.zeros(
(args.max_batch_size, args.max_seq_len, self.n_local_heads, self.head_dim),device=self.device
)
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
bsz, seqlen, _ = x.shape
xq, xk, xv = self.wq(x), self.wk(x), self.wv(x)
xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xk = xk.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xv = xv.view(bsz, seqlen, self.n_local_heads, self.head_dim)
xq, xk = apply_rotary_emb(xq, xk, freqs_cis=freqs_cis)
self.cache_k = self.cache_k.to(xq)
self.cache_v = self.cache_v.to(xq)
with torch.no_grad():
# Modiying cache without no_grad causes the autograd engine to track
# the updates and leads to "RuntimeError: Trying to backward through
# the graph a second time"
# upstream PR - https://github.com/facebookresearch/llama/pull/304
self.cache_k[:bsz, start_pos : start_pos + seqlen] = xk
self.cache_v[:bsz, start_pos : start_pos + seqlen] = xv
keys = self.cache_k[:bsz, : start_pos + seqlen]
values = self.cache_v[:bsz, : start_pos + seqlen]
xq = xq.transpose(1, 2)
keys = keys.transpose(1, 2)
values = values.transpose(1, 2)
scores = torch.matmul(xq, keys.transpose(2, 3)) / math.sqrt(self.head_dim)
# TODO: RuntimeError: The size of tensor a (3) must match the size of tensor b (2) at non-singleton dimension 3
# if mask is not None:
# scores = scores + mask # (bs, n_local_heads, slen, cache_len + slen)
scores = F.softmax(scores.float(), dim=-1).type_as(xq)
output = torch.matmul(scores, values) # (bs, n_local_heads, slen, head_dim)
output = output.transpose(
1, 2
).contiguous().view(bsz, seqlen, -1)
return self.wo(output)
class FeedForward(nn.Module):
def __init__(
self,
dim: int,
hidden_dim: int,
multiple_of: int,
):
super().__init__()
hidden_dim = int(2 * hidden_dim / 3)
hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
self.w1 = nn.Linear(
dim, hidden_dim, bias=False
)
self.w2 = nn.Linear(
hidden_dim, dim, bias=False
)
self.w3 = nn.Linear(
dim, hidden_dim, bias=False
)
def forward(self, x):
return self.w2(F.silu(self.w1(x)) * self.w3(x))
class TransformerBlock(nn.Module):
def __init__(self, layer_id: int, args: ModelArgs):
super().__init__()
self.n_heads = args.n_heads
self.dim = args.dim
self.head_dim = args.dim // args.n_heads
self.attention = Attention(args)
self.feed_forward = FeedForward(
dim=args.dim, hidden_dim=4 * args.dim, multiple_of=args.multiple_of
)
self.layer_id = layer_id
self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
def forward(self, x: torch.Tensor, start_pos: int, freqs_cis: torch.Tensor, mask: Optional[torch.Tensor]):
h = x + self.attention.forward(self.attention_norm(x), start_pos, freqs_cis, mask)
out = h + self.feed_forward.forward(self.ffn_norm(h))
return out
class Transformer(nn.Module):
def __init__(self, params: ModelArgs):
super().__init__()
self.params = params
self.vocab_size = params.vocab_size
self.n_layers = params.n_layers
self.tok_embeddings = nn.Embedding(
params.vocab_size, params.dim,
)
self.layers = torch.nn.ModuleList()
for layer_id in range(params.n_layers):
self.layers.append(TransformerBlock(layer_id, params))
self.norm = RMSNorm(params.dim, eps=params.norm_eps)
self.output = nn.Linear(
params.dim, params.vocab_size, bias=False
)
self.freqs_cis = precompute_freqs_cis(
self.params.dim // self.params.n_heads, self.params.max_seq_len * 2
)
def forward(self, tokens: torch.Tensor, start_pos: int):
_ , seqlen = tokens.shape
h = self.tok_embeddings(tokens)
# Reference: https://github.com/facebookresearch/llama/pull/349
freqs_cis = self.freqs_cis.to(h.device)
freqs_cis = freqs_cis[start_pos : start_pos + seqlen]
mask = None
if seqlen > 1:
mask = torch.full((1, 1, seqlen, seqlen), float("-inf"), device=tokens.device)
mask = torch.triu(mask, diagonal=start_pos + 1).type_as(h)
for layer in self.layers:
h = layer(h, start_pos, freqs_cis, mask)
h = self.norm(h)
output = self.output(h[:, -1, :]) # only compute last logits
return output.float()
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the GNU General Public License version 3.
from sentencepiece import SentencePieceProcessor
from logging import getLogger
from typing import List
import os
logger = getLogger()
class Tokenizer:
def __init__(self, model_path: str):
# reload tokenizer
assert os.path.isfile(model_path), model_path
self.sp_model = SentencePieceProcessor(model_file=model_path)
logger.debug(f"Reloaded SentencePiece model from {model_path}")
# BOS / EOS token IDs
self.n_words: int = self.sp_model.vocab_size()
self.bos_id: int = self.sp_model.bos_id()
self.eos_id: int = self.sp_model.eos_id()
self.pad_id: int = self.sp_model.pad_id()
logger.debug(
f"#words: {self.n_words} - BOS ID: {self.bos_id} - EOS ID: {self.eos_id}"
)
assert self.sp_model.vocab_size() == self.sp_model.get_piece_size()
def encode(self, s: str, bos: bool, eos: bool) -> List[int]:
assert type(s) is str
t = self.sp_model.encode(s)
if bos:
t = [self.bos_id] + t
if eos:
t = t + [self.eos_id]
return t
def decode(self, t: List[int]) -> str:
return self.sp_model.decode(t)
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements() |
from torchbenchmark.util.framework.timm.model_factory import TimmModel
from torchbenchmark.tasks import COMPUTER_VISION
class Model(TimmModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 32
DEFAULT_EVAL_BSIZE = 32
def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(test=test, model_name='resnest14d', device=device,
batch_size=batch_size, extra_args=extra_args)
|
"""
Generate a fully specified benchmark configuration file, given a lightweight
specification and a complete source of benchmark data.
Specification File
------------------
Score hierarchy input intended to be as easy to construct as possible,
relying on automatic inference of unspecified weights, benchmark configs,
and normalization factors given a particular instance of benchmark data.
Structure:
Root _
- category | required:
- domain | 3 layers of organizational structure
- task _|
- benchmark name - keyword match for root name in benchmark,
omit children unless used
_
- train/eval | optional:
- device | provide specific weights or
- compiler/runtime _| exclude particular configs by omission
Rules for describing the weight hierarchy
- everything is a dict, since at any level you could specify a weight
- if a weight is not specified, it is computed automatically with respect
its direct siblings.
- if specific benchmark configurations are omitted under a benchmark name,
all configurations present in the normalization data json are weighted equally
Normalization Data
------------------
Used to 'fill in the gaps' in the human written specification.
- particular configurations (train/eval, device, compiler/runtime) present in
this data are used to compute benchmark weights
- measurements from this data are used as normalization factors in score computation
such that new data is scored relative to this data.
####
TODO
####
- handle multiple normalization files, one for models, one for synthetic, etc
- make explicit configuration choice for throughput vs runtime metrics
- assert same machine used for all normalization files and freeze that in
"""
import argparse
import json
import yaml
from collections import defaultdict
def generate_bench_cfg(spec, norm, target):
cfg = {
'target': target,
'benchmarks': {},
}
benchmark_names = [b['name'] for b in norm['benchmarks']]
benchmark_norms = {b['name']: b['stats']['mean'] for b in norm['benchmarks']}
assert len(spec['hierarchy']) > 0, "Must specify at least one category"
category_weight = 1.0 / len(spec['hierarchy'])
for category in spec['hierarchy']:
category_spec = spec['hierarchy'][category]
assert isinstance(category_spec, dict), f"Category {category} in spec must be non-empty"
assert 'weight' not in category_spec, "TODO implement manual category weights"
domain_weight = 1.0 / len(category_spec)
for domain in category_spec:
tasks = category_spec[domain]
assert isinstance(tasks, dict), f"Domain {category}:{domain} in spec must be non-empty"
assert 'weight' not in tasks, "TODO implement manual domain weights"
task_weight = 1.0 / len(tasks)
for task in tasks:
benchmarks = tasks[task]
assert isinstance(benchmarks, dict), f"Task {category}:{domain}:{task} in spec must be non-empty"
assert 'weight' not in benchmarks, "TODO implement manual task weights"
benchmark_weight = 1.0 / len(benchmarks)
for benchmark in benchmarks:
assert benchmarks[benchmark] is None, "TODO handle benchmark as dict of config specs"
# assert 'weight' not in benchmarks[benchmark], "TODO implement manual benchmark weights"
found_benchmarks = [name for name in benchmark_names if benchmark in name]
assert len(found_benchmarks) > 0, f"No normalization data found for {benchmark}"
config_weight = 1.0 / len(found_benchmarks)
for b in found_benchmarks:
weight = domain_weight * task_weight * benchmark_weight * config_weight
cfg['benchmarks'][b] = {
'weight': weight,
'norm': benchmark_norms[b],
}
return cfg
# Support generate a config from benchmark data that runs partial of the spec
def generate_bench_cfg_partial(spec, norm, target):
benchmark_names = [b['name'] for b in norm['benchmarks']]
rec_defaultdict = lambda: defaultdict(rec_defaultdict)
partial_spec = rec_defaultdict()
def gen_partial_spec(category, domain, task, benchmark):
found_benchmarks = [name for name in benchmark_names if benchmark in name]
if len(found_benchmarks) > 0:
partial_spec['hierarchy'][category][domain][task][benchmark] = None
def visit_each_benchmark(spec, func):
for category in spec['hierarchy']:
category_spec = spec['hierarchy'][category]
for domain in category_spec:
tasks = category_spec[domain]
for task in tasks:
benchmarks = tasks[task]
for benchmark in benchmarks:
func(category, domain, task, benchmark)
visit_each_benchmark(spec, gen_partial_spec)
return generate_bench_cfg(partial_spec, norm, target)
def check(spec):
assert len(spec['hierarchy']) > 0, "Must specify at least one category"
for category in spec['hierarchy']:
category_spec = spec['hierarchy'][category]
assert isinstance(category_spec, dict), f"Category {category} in spec must be non-empty"
assert 'weight' not in category_spec, "TODO implement manual category weights"
for domain in category_spec:
tasks = category_spec[domain]
assert isinstance(tasks, dict), f"Domain {category}:{domain} in spec must be non-empty"
assert 'weight' not in tasks, "TODO implement manual domain weights"
for task in tasks:
benchmarks = tasks[task]
assert isinstance(benchmarks, dict), f"Task {category}:{domain}:{task} in spec must be non-empty"
assert 'weight' not in benchmarks, "TODO implement manual task weights"
for benchmark in benchmarks:
assert benchmarks[benchmark] is None, "TODO handle benchmark as dict of config specs"
# assert 'weight' not in benchmarks[benchmark], "TODO implement manual benchmark weights"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--specification", required=True,
help="yaml file describing weight hierarchy")
parser.add_argument("--normalization_data", required=True,
help="pytest-benchmark json file used for generating normalization "
"values and filling in unspecified benchmark configurations")
parser.add_argument("--output_file", required=True,
help="generated complete benchmark configuration")
parser.add_argument("--target_score", default=1000,
help="target score value given these normalizations and specifications")
parser.add_argument("--partial",
action='store_true',
help="generates partial config if the benchmark only runs part of the spec."
"normally, the spec is supposed to define the set of benchmarks that's expected to exist,"
"and then the provided json data is expected to provide the norm values to match the spec."
"To simplify debugging, and not for normal score runs, we allow a convenience for producing"
"a score configuration that matches whatever json data is provided.")
args = parser.parse_args()
with open(args.specification) as spec_file:
spec = yaml.full_load(spec_file)
with open(args.normalization_data) as norm_file:
norm = json.load(norm_file)
with open(args.output_file, 'w') as out_file:
check(spec)
if args.partial:
bench_cfg = generate_bench_cfg_partial(spec, norm, args.target_score)
else:
bench_cfg = generate_bench_cfg(spec, norm, args.target_score)
yaml.dump(bench_cfg, out_file)
|
"""
Compute TorchBench Score V2.
"""
import re
import math
import yaml
import importlib
import itertools
from pathlib import Path
from typing import List, Optional
TORCHBENCH_V2_REF_DATA = Path(__file__).parent.joinpath("configs/v2/config-v2.yaml")
TORCHBENCH_V2_DEFAULT_THRESHOLD = 0.07
TORCHBENCH_V2_DEFAULT_TARGET = 1000.0
def _get_model_task(model_name):
"""
Helper function which extracts the task the model belongs to
by iterating over the Model attributes.
"""
try:
module = importlib.import_module(f'torchbenchmark.models.{model_name}', package=__name__)
except:
raise ValueError(f"Unable to get task for model: {model_name}")
Model = getattr(module, 'Model')
return Model.task
def _parse_test_name(name):
"""
Helper function which extracts test type (eval or train), model,
device, and mode from the test full name.
"""
test, model_name, device, mode = re.match(r"test_(.*)\[(.*)\-(.*)\-(.*)\]", name).groups()
return (test, model_name, device, mode)
class TorchBenchV2Test:
def __init__(self, test_name, test_item):
self._name = test_name
self._test_type, self._model, self._device, self._mode = _parse_test_name(self._name)
self._task = _get_model_task(self._model)
self._stable = test_item["stable"]
self._norm = test_item["norm"]
@property
def name(self) -> str:
return self._name
@property
def test_type(self) -> str:
return self._test_type
@property
def model(self) -> str:
return self._model
@property
def device(self) -> str:
return self._device
@property
def mode(self) -> str:
return self._mode
@property
def category(self) -> str:
return type(self._task).__name__
@property
def domain(self) -> str:
return self._task.name
@property
def norm(self) -> float:
return self._norm
@property
def stable(self) -> bool:
return self._stable
class TorchBenchV2Suite:
def __init__(self, norm):
self._tests = []
self._tests_dict = {}
self._threshold = norm["stable_threshold"]
self._target = norm["target"]
for test in norm["tests"]:
test_item = TorchBenchV2Test(test, norm["tests"][test])
self._add_test(test_item)
@property
def target(self) -> float:
return self._target
@property
def all_stable_tests(self) -> List[TorchBenchV2Test]:
return list(filter(lambda x: x.stable, self._tests))
@property
def threshold(self) -> float:
return self._threshold
def _add_test(self, test: TorchBenchV2Test):
self._tests.append(test)
self._tests_dict[test.name] = test
def get_test_by_name(self, name) -> TorchBenchV2Test:
return self._tests_dict[name]
class TorchBenchScoreV2:
# ref_data: the object read from reference YAML file or benchmark json file
def __init__(self, ref_data, _spec_file, _target):
if not ref_data:
with open(TORCHBENCH_V2_REF_DATA) as ref_file:
ref_data = yaml.full_load(ref_file)
# Build the suite
self.suite = TorchBenchV2Suite(self._setup_benchmark_norms(ref_data))
def _get_test_delta_weight(self, ref_norm, data_norm):
delta = (ref_norm - data_norm) / ref_norm
# Not a valid signal because it is below threshold
if abs(delta) <= self.suite.threshold:
return 0.0
return delta * 100
def _get_delta_score(self, data_norm):
"Compute V2 delta score"
delta = 0.0
for test in self.suite.all_stable_tests:
ref_norm = test.norm
data_test_norm = data_norm["tests"][test.name]["norm"]
delta_weight = self._get_test_delta_weight(ref_norm, data_test_norm)
delta += delta_weight
return delta
def _get_domain_score(self, data_norm, condition=None) -> Optional[float]:
"Compute V2 domain subscore or total score"
def _test_filter(test, condition) -> bool:
# Total score, condition is None
if not condition:
return True
device, test_type, domain = condition
in_device = device in test.name
in_type = test_type in test.name
in_domain = test.domain in domain or test.category in domain
return in_device and in_type and in_domain
score = 0.0
tests = self.suite.all_stable_tests
filtered_tests = list(filter(lambda x: _test_filter(x, condition), tests))
# Don't have any test in this category
if not len(filtered_tests):
return None
# Each test has equal weight
weight = 1.0 / len(filtered_tests)
for test in filtered_tests:
norm = data_norm["tests"][test.name]["norm"]
delta = (norm - test.norm) / test.norm
if abs(delta) <= self.suite.threshold:
norm = test.norm
score += weight * math.log(test.norm / norm)
return math.exp(score) * self.suite.target
def _setup_benchmark_norms(self, ref_data):
"""
Helper function which gets the normalization values per benchmark
by going through the reference data file.
If ref_data is a benchmark json object, construct the YAML norm file from it.
Otherwise, use it as-is.
"""
assert isinstance(ref_data, dict), "The type of ref_data must be a dict object."
# If the data contains machine_info key, it must be a benchmark json object
if "benchmarks" in ref_data and "machine_info" in ref_data:
ref_data = self._get_norm_from_ref_json_obj(ref_data)
return ref_data
def _get_norm_from_ref_json_obj(self, ref_json_obj):
"""
This function iterates over the reference benchmark json output
and calculates the normalization values based on the reference data.
"""
norm = dict()
norm["stable_threshold"] = TORCHBENCH_V2_DEFAULT_THRESHOLD
norm["target"] = TORCHBENCH_V2_DEFAULT_TARGET
norm["tests"] = dict()
for b in ref_json_obj['benchmarks']:
name = b['name']
norm['tests'].setdefault(name, dict())
norm['tests'][name]['norm'] = b['stats']['median']
norm['tests'][name]['stable'] = True
return norm
def get_norm(self, data):
return self._get_norm_from_ref_json_obj(data)
def compute_score(self, data):
"""
This API calculates the total V2 score for all the benchmark tests in the set.
"""
def domain_to_condition(all_domains, domain):
if domain == "OVERALL":
return all_domains[1:]
else:
return [domain]
# Check the input test set is the superset of the ref
data_norm = self._get_norm_from_ref_json_obj(data)
stable_tests = map(lambda x: x.name, self.suite.all_stable_tests)
diff_set = set(stable_tests) - set(data_norm["tests"].keys())
if diff_set:
raise ValueError(f"The request benchmark json doesn't include the V2 test: {diff_set}")
summary = {}
# overall score
summary["total"] = self._get_domain_score(data_norm)
# delta score
summary["delta"] = self._get_delta_score(data_norm)
# domain scores
summary["domain"] = {}
axis_device = ["cuda", "cpu"]
axis_test = ["train", "eval"]
axis_domain = ["OVERALL", "NLP", "CLASSIFICATION", "SEGMENTATION", "SPEECH", "RECOMMENDATION"]
for element in itertools.product(*[axis_device, axis_test, axis_domain]):
dev, tp, domain = element
cond = (dev, tp, domain_to_condition(axis_domain, domain))
summary["domain"][f"{dev}-{tp}-{domain.lower()}"] = self._get_domain_score(data_norm, cond)
return summary
|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
TARGET_SCORE_DEFAULT = 1000
SPEC_FILE_DEFAULT = Path(__file__).parent.joinpath("score.yml")
from .compute_score_v0 import TorchBenchScoreV0
from .compute_score_v1 import TorchBenchScoreV1
from .compute_score_v2 import TorchBenchScoreV2
class TorchBenchScore:
def __init__(self, ref_data=None, spec=SPEC_FILE_DEFAULT, target=TARGET_SCORE_DEFAULT, version="v1"):
active_versions = {"v0": TorchBenchScoreV0, "v1": TorchBenchScoreV1, "v2": TorchBenchScoreV2 }
if version not in active_versions:
print(f"We only support TorchBench score versions: {active_versions.keys()}")
self.score = active_versions[version](ref_data, spec, target)
def get_norm(self, data):
return self.score.get_norm(data)
def compute_score(self, data):
return self.score.compute_score(data)
|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from enum import Enum
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from typing import List
TORCHBENCH_V1_REF_DATA = Path(__file__).parent.joinpath("configs/v1/config-v1.yaml")
def _get_model_task(model_name):
"""
Helper function which extracts the task the model belongs to
by iterating over the Model attributes.
"""
try:
module = importlib.import_module(f'torchbenchmark.models.{model_name}', package=__name__)
except:
raise ValueError(f"Unable to get task for model: {model_name}")
Model = getattr(module, 'Model')
return Model.task
def _sanitize_name(name):
"""Test names no longer contain `-freeze`, but it still appears in some artifacts."""
return name.replace("-freeze", "", 1)
def _parse_test_name(name):
"""
Helper function which extracts test type (eval or train), model,
device, and mode from the test full name.
"""
name = _sanitize_name(name)
test, model_name, device, mode = re.match(r"test_(.*)\[(.*)\-(.*)\-(.*)\]", name).groups()
return (test, model_name, device, mode)
class TorchBenchV1Test:
def __init__(self, test_name):
self._name = test_name
self._test_type, self._model, self._device, self._mode = _parse_test_name(test_name)
self._task = _get_model_task(self._model)
@property
def name(self) -> str:
return self._name
@property
def test_type(self) -> str:
return self._test_type
@property
def model(self) -> str:
return self._model
@property
def device(self) -> str:
return self._device
@property
def mode(self) -> str:
return self._mode
@property
def category(self) -> str:
return type(self._task).__name__
@property
def domain(self) -> str:
return self._task.name
@property
def weight(self) -> float:
# config weight rule in V1: 1x CPU Training, 2x GPU Training, 2x CPU Inference, 2x GPU Inference
if self.test_type == "train" and self.device == "cpu":
return 1.0
return 2.0
class TorchBenchV1Suite:
def __init__(self):
self._suite_spec = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
self._tests = []
@property
def all_tests(self):
return self._tests
def add_test(self, test: TorchBenchV1Test):
self._suite_spec[test.category][test.domain][test.model].append(test)
self._tests.append(test)
def categories(self) -> List[str]:
return self._suite_spec.keys()
def domains(self, category: str) -> List[str]:
return self._suite_spec[category].keys()
def models(self, category: str, domain: str) -> List[str]:
return self._suite_spec[category][domain].keys()
def tests(self, category:str, domain: str, model: str) -> List[TorchBenchV1Test]:
return self._suite_spec[category][domain][model]
class TorchBenchScoreV1:
# ref_data: the YAML file or json file
def __init__(self, ref_data, spec_file, target):
if not ref_data:
with open(TORCHBENCH_V1_REF_DATA) as ref_file:
ref_data = yaml.full_load(ref_file)
self.norm = self._setup_benchmark_norms(ref_data)
self.norm_weights = self._setup_weights(self.norm)
# spec_file is not used in V1, this is just a placeholder
self.spec_file = spec_file
self.target = target
def _filter_jit_tests(self, norm):
result_ref = dict()
for jit_name in filter(lambda x: '-jit' in x, norm.keys()):
left, sep, right = jit_name.rpartition('-jit')
eager_name = left + "-eager" + right
# We assume if a jit test exists, there must be an eager test
assert eager_name in norm, f"Can't find eager test name {eager_test_name}"
result_ref[jit_name] = dict()
result_ref[jit_name]['jit_norm'] = norm[jit_name]['norm']
result_ref[jit_name]['eager_norm'] = norm[eager_name]['norm']
return result_ref
# Generate the domain weights from the ref object
def _setup_weights(self, ref):
domain_weights = defaultdict(float)
config_weights = defaultdict(float)
# Build the test suite
suite = TorchBenchV1Suite()
for name in ref:
test = TorchBenchV1Test(name)
suite.add_test(test)
# Setup domain weights
for test in suite.all_tests:
category_cnt = len(suite.categories())
domain_cnt = len(suite.domains(test.category))
model_cnt = len(suite.models(test.category, test.domain))
domain_weights[test.name] = (1.0 / category_cnt) * (1.0 / domain_cnt) * (1.0 / model_cnt)
# Setup config weights
for test in suite.all_tests:
category = test.category
domain = test.domain
model = test.model
model_tests = suite.tests(test.category, test.domain, test.model)
config_weights[test.name] = test.weight / sum(map(lambda x: x.weight, model_tests))
# Runtime check the weights constraint
sum_weight = 0.0
for test in suite.all_tests:
sum_weight += config_weights[test.name] * domain_weights[test.name]
assert(abs(sum_weight - 1.0) < 1e-6), f"The total weights sum ({sum_weight}) is not 1.0, please submit a bug report."
return (domain_weights, config_weights)
def _setup_benchmark_norms(self, ref_data):
"""
Helper function which gets the normalization values per benchmark
by going through the reference data file.
If ref_data is a benchmark json object, construct the YAML norm file from it.
Otherwise, use it as-is.
"""
assert isinstance(ref_data, dict), "The type of ref_data must be a dict object."
# If the data contains machine_info key, it must be a benchmark json object
if "benchmarks" in ref_data and "machine_info" in ref_data:
ref = self._get_norm_from_ref_json_obj(ref_data)
else:
ref = {_sanitize_name(k): v for k, v in ref_data.items()}
return ref
def _get_norm_from_ref_json_obj(self, ref_json_obj):
"""
This function iterates over the reference benchmark json output
and calculates the normalization values based on the reference data.
It also sets up the domain weights of the score.
"""
norm = dict()
for b in ref_json_obj['benchmarks']:
name = _sanitize_name(b['name'])
norm.setdefault(name, dict())
norm[name].setdefault('norm', dict())
norm[name]['norm'] = b['stats']['mean']
return norm
def _get_score(self, data, ref, ref_weights):
score = 0.0
(domain_weights, config_weights) = ref_weights
for name in data:
norm = data[name]['norm']
benchmark_score = domain_weights[name] * config_weights[name] * math.log(ref[name]['norm'] / norm)
score += benchmark_score
return math.exp(score)
def data_in_list(self, n, l):
for e in l:
if e not in n:
return False
return True
def _get_subscore(self, data, ref_norm, ref_weights, filters):
error_msg = "We only accept one of the following four subscores: [cpu, train], [cpu, eval], [cuda, train], [cuda, infer]."
assert len(filters) == 2, error_msg
assert "cpu" in filters or "cuda" in filters, error_msg
assert "train" in filters or "eval" in filters, error_msg
score = 0.0
(domain_weights, _) = ref_weights
for name in filter(lambda x: self.data_in_list(x, filters), data):
norm = data[name]['norm']
benchmark_score = domain_weights[name] * math.log(ref_norm[name]['norm'] / norm)
score += benchmark_score
return math.exp(score)
def compute_jit_speedup_score(self, data):
"""
This API calculates the V1 JIT speedup score for all
the benchmarks that enable JIT compilation.
The data argument is the json data object from the benchmark.
The JIT speedup score is the geometric mean of all JIT benchmarks speedup
comparing to corresponding non-JIT benchmarks. Its computation does not require reference data.
"""
score = 0.0
norm = self._setup_benchmark_norms(data)
norm_jit = self._filter_jit_tests(norm)
(domain_weights, config_weights) = self._setup_weights(norm_jit)
for name in norm_jit:
eager_norm = norm_jit[name]['eager_norm']
jit_norm = norm_jit[name]['jit_norm']
jit_speedup_score = domain_weights[name] * config_weights[name] * math.log(eager_norm / jit_norm)
score += jit_speedup_score
return math.exp(score)
def compute_score(self, data):
"""
This API calculates the total V1 score for all the
benchmarks that was run by reading the data (.json) file.
"""
# Check the input test set is the superset of the ref
data_norm = self._get_norm_from_ref_json_obj(data)
diff_set = set(self.norm.keys()) - set(data_norm.keys())
assert not diff_set, f"The request benchmark json doesn't have v1 test: {diff_set}"
summary = {}
summary["jit-speedup"] = self.compute_jit_speedup_score(data)
devices = ["cpu", "cuda"]
tests = ["train", "eval"]
filters = [(a, b) for a in devices for b in tests]
for f in filters:
key = f"subscore-{f[0]}-{f[1]}"
summary[key] = self._get_subscore(data_norm, self.norm, self.norm_weights, f) * self.target
summary["total"] = self._get_score(data_norm, self.norm, self.norm_weights) * self.target
return summary
def get_norm(self, data):
return self._get_norm_from_ref_json_obj(data)
|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import sys
import os
import re
import yaml
import importlib
from tabulate import tabulate
from pathlib import Path
from collections import defaultdict
from .generate_score_config import generate_bench_cfg
TORCHBENCH_V0_REF_DATA = Path(__file__).parent.joinpath("configs/v0/config-v0.yaml")
def _get_model_task(model_name):
"""
Helper function which extracts the task the model belongs to
by iterating over the Model attributes.
"""
try:
module = importlib.import_module(f'torchbenchmark.models.{model_name}', package=__name__)
except:
raise ValueError(f"Unable to get task for model: {model_name}")
Model = getattr(module, 'Model')
return Model.task.value
class TorchBenchScoreV0:
def __init__(self, ref_data, spec, target):
self.spec = spec
self.target = target
if not ref_data:
ref_data = TORCHBENCH_V0_REF_DATA
self.ref_data = ref_data
self.weights = None
self.norm = None
# V0: setup weights and benchmark norms
self._setup_weights()
self._setup_benchmark_norms()
def _setup_weights(self):
"""
Calculates the static benchmark weights by iterating the spec
file and constructs a dictionary with (key, value) pair
is (task, weight_for_benchmark_per_task)
"""
# Load the spec file
with open(self.spec) as spec_file:
self.spec = yaml.full_load(spec_file)
self.weights = defaultdict(float)
category_spec = self.spec['hierarchy']['model']
domain_weight = 1.0/ len(category_spec)
for domain in category_spec:
tasks = category_spec[domain]
task_weight = 1.0 / len(tasks)
for task in tasks:
benchmarks = tasks[task]
benchmark_weight = 1.0 / len(benchmarks)
self.weights[task] = domain_weight * task_weight * benchmark_weight
def _setup_benchmark_norms(self):
"""
Helper function which gets the normalization values per benchmark
by going through the reference data file.
"""
if self.ref_data == TORCHBENCH_V0_REF_DATA:
with open(self.ref_data) as ref_file:
ref = yaml.full_load(ref_file)
self.norm = {b: ref['benchmarks'][b]['norm'] for b in ref['benchmarks']}
else:
self.norm = {b['name']: b['stats']['mean'] for b in self.ref_data['benchmarks']}
def get_score_per_config(self, data, weighted_score=False):
"""
This function iterates over found benchmark dictionary
and calculates the weight_sum and benchmark_score.
A score_db is then constructed to calculate the cumulative
score per config. Here config refers to device, mode and test
configurations the benchmark was run on.
For eg., if the benchmark was run in eval mode on a GPU in Torchscript JIT,
config = (train, cuda, jit)
This helper returns the score_db .
"""
found_benchmarks = defaultdict(lambda: defaultdict(list))
score_db = defaultdict(float)
# Construct a benchmark database by going over through the data file
# for the run and update the dictionary by task and model_name
for b in data['benchmarks']:
name, mean = b['name'], b['stats']['mean']
test, model_name, device, mode = re.match(r"test_(.*)\[(.*)\-(.*)\-(.*)\]", name).groups()
config = (test, device, mode)
task = _get_model_task(model_name)
found_benchmarks[task][model_name].append((mean, config, name))
for task, models in found_benchmarks.items():
for name, all_configs in models.items():
weight = self.weights[task] * (1.0/len(all_configs))
for mean, config, benchmark in all_configs:
benchmark_score = weight * math.log(self.norm[benchmark] / mean)
score_db[config] += benchmark_score
# Get the weights per config and calibrate it to the
# target score
if weighted_score:
for config, score in score_db.items():
score_db[config] = score * 0.125
score_db[config] = self.target * math.exp(score)
return score_db
def compute_score(self, data):
"""
This API calculates the total V0 score for all the
benchmarks that was run by reading the data (.json) file.
The weights are then calibrated to the target score.
"""
score = 0.0
score_db = self.get_score_per_config(data)
score = sum(score_db.values())
score = self.target * math.exp(score)
return score
def get_norm(self, data):
return generate_bench_cfg(self.spec, data, self.target)
|
from accelerate.utils.dataclasses import DeepSpeedPlugin
import torch
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import NLP
import evaluate
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
default_data_collator,
get_scheduler,
)
from typing import Optional
from torchbenchmark.util.framework.transformers.text_classification.dataset import prep_dataset, preprocess_dataset, prep_labels
from torchbenchmark.util.framework.transformers.text_classification.args import parse_args, parse_torchbench_args
try:
import torch._dynamo
except ImportError:
pass
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = NLP.LANGUAGE_MODELING
DEFAULT_TRAIN_BSIZE: int = 32
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
# TODO: currently only support 1 GPU device
self.device = "cuda"
self.device_num = 1
# Parse the extra arguments
self.tb_args = parse_torchbench_args(self.extra_args)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# Parameters
model_name = "bert-base-cased"
max_seq_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3"
max_train_steps = "100" # overrides num_train_epochs to run faster
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
output_dir = os.path.join(CURRENT_DIR, ".output")
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
in_arg = ["--model_name_or_path", model_name, "--task_name", self.tb_args.task_name,
"--max_length", max_seq_length,
"--per_device_train_batch_size", str(self.batch_size),
"--per_device_eval_batch_size", str(self.batch_size),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--max_train_steps", max_train_steps,
"--output_dir", output_dir]
hf_args = parse_args(in_arg)
self.num_epochs = hf_args.num_train_epochs
# ideally we don't modify the model code directly, but attaching deepspeed
# must be done before self.prep initializes accelerator.
if self.tb_args.distributed not in ["deepspeed", "ddp", "fsdp", "none"]:
raise RuntimeError(f"Unsupported distributed scheme {self.tb_args.distributed} for model hf_t5")
if self.tb_args.distributed == "deepspeed":
zero_opt_cfg = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": 2e8,
"overlap_comm": True,
"contiguous_gradients": False
}
}
hf_args.deepspeed_plugin = DeepSpeedPlugin()
hf_args.deepspeed_plugin.deepspeed_config.update(zero_opt_cfg)
hf_args.distributed = self.tb_args.distributed # pass in distributed config to prep as a hf_arg
# setup other members
self.prep(hf_args)
if test == "train":
self.num_examples = len(self.train_dataloader) * self.batch_size
elif test == "eval":
self.num_examples = len(self.eval_dataloader) * self.batch_size
def prep(self, hf_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
if hf_args.distributed == "deepspeed":
# Note: self.tb_args.fp16 could be renamed to better clarify its meaning
assert self.tb_args.fp16=="amp", "deepspeed is only supported with bf16/amp enabled"
accelerator = Accelerator(deepspeed_plugin=hf_args.deepspeed_plugin, mixed_precision='bf16')
else:
accelerator = Accelerator(mixed_precision='fp16' if self.tb_args.fp16=='amp' else 'no')
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(hf_args)
num_labels, label_list, is_regression = prep_labels(hf_args, raw_datasets)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(hf_args.model_name_or_path, num_labels=num_labels, finetuning_task=hf_args.task_name)
tokenizer = AutoTokenizer.from_pretrained(hf_args.model_name_or_path, use_fast=not hf_args.use_slow_tokenizer)
model = AutoModelForSequenceClassification.from_pretrained(
hf_args.model_name_or_path,
from_tf=bool(".ckpt" in hf_args.model_name_or_path),
config=config,)
train_dataset, eval_dataset, self.mnli_eval_dataset = preprocess_dataset(hf_args, config, model, \
tokenizer, raw_datasets, num_labels, label_list, is_regression, accelerator)
# DataLoaders creation:
if hf_args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
self.data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
self.data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None))
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=self.data_collator, batch_size=hf_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=self.data_collator, batch_size=hf_args.per_device_eval_batch_size)
# transform model for DDP and FSDP
if hf_args.distributed == "ddp":
# prepare before wrap w/ DDP (or else error)
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
model = DDP(
model,
device_ids=[local_rank],
# If buffer broadcast is necessary, specific optimizations might be
# necessary to optimize performance. Disable it by default.
broadcast_buffers=False,
# Set gradient as bucket view to avoid unnecessary copies
gradient_as_bucket_view=True,
# TODO: tune bucket_cap_mb
static_graph=True,
)
elif hf_args.distributed == "fsdp":
# model needs to be prepared and wrapped w/ FSDP before optimizer is created, because FSDP flattens params
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
torch.cuda.set_device(local_rank)
model = FSDP(
model,
device_id = torch.cuda.current_device()
)
# Setup metrics
# Get the metric function
if hf_args.task_name is not None:
self.metric = evaluate.load("glue", hf_args.task_name)
else:
self.metric = evaluate.load("accuracy")
# Setup class members (model and the dataloaders will be updated in _prep_optimizer_and_scheduler() below)
self.hf_args = hf_args
self.is_regression = is_regression
self.accelerator = accelerator
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": hf_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = AdamW(optimizer_grouped_parameters, lr=hf_args.learning_rate)
self._update_everything_with_optimizer()
def _update_everything_with_optimizer(self) -> None:
# Prepare everything with our `accelerator` with deepspeed or non-distributed environment.
if self.hf_args.distributed == "deepspeed" or self.hf_args.distributed == "none":
# deepspeed will error unless all components prepared at the same time
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer = self.accelerator.prepare(
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer)
else:
# ddp and fsdp need model prepared before wrapping.
self.train_dataloader, self.eval_dataloader, self.optimizer = self.accelerator.prepare(
self.train_dataloader, self.eval_dataloader, self.optimizer)
# Note -> the training dataloader needs to be prepared before we grab his length below (cause its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if self.hf_args.max_train_steps is None:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
else:
self.hf_args.num_train_epochs = math.ceil(self.hf_args.max_train_steps / num_update_steps_per_epoch)
self.lr_scheduler = get_scheduler(
name=self.hf_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.hf_args.num_warmup_steps,
num_training_steps=self.hf_args.max_train_steps,
)
def train(self) -> Optional[dict]:
completed_steps = 0
eval_metric = None
for _epoch in range(self.hf_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
loss = self.run_forward(batch)
loss = loss / self.hf_args.gradient_accumulation_steps
self.run_backward(loss)
if step % self.hf_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.run_optimizer_step()
completed_steps += 1
if completed_steps >= self.hf_args.max_train_steps:
break
if self.tb_args.validate_in_train:
self.model.eval()
for step, batch in enumerate(self.eval_dataloader):
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze()
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
if self.tb_args.validate_in_train:
if self.hf_args.task_name == "mnli":
# Final evaluation on mismatched validation set
eval_dataset = self.mnli_eval_dataset
eval_dataloader = DataLoader(
eval_dataset, collate_fn=self.data_collator, batch_size=self.hf_args.per_device_eval_batch_size
)
eval_dataloader = self.accelerator.prepare(eval_dataloader)
self.model.eval()
for step, batch in enumerate(eval_dataloader):
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1)
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
# store accuracy results
if self.hf_args.task_name == "cola" and self.tb_args.validate_in_train:
self.accuracy = eval_metric["matthews_correlation"]
return eval_metric
def eval(self) -> Optional[dict]:
self.model.eval()
for _step, batch in enumerate(self.eval_dataloader):
with torch.no_grad():
outputs = self.run_eval(batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze()
self.metric.add_batch(
predictions=self.accelerator.gather(predictions),
references=self.accelerator.gather(batch["labels"]),
)
eval_metric = self.metric.compute()
return eval_metric
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._update_everything_with_optimizer()
def next_batch(self):
return next(iter(self.train_dataloader))
def run_forward(self, input):
"""
compute model forward and return loss
"""
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_forward)(input)
else:
return self._run_forward(input)
def _run_forward(self, input):
return self.model(**input).loss
def run_backward(self, loss):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_backward)(loss)
else:
return self._run_backward(loss)
def _run_backward(self, loss):
self.accelerator.backward(loss)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
def run_optimizer_step(self):
if self.dynamo and not self.opt_args.dynamo_disable_optimizer_step:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_optimizer_step)()
else:
return self._run_optimizer_step()
def _run_optimizer_step(self):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
def run_eval(self, input):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_eval)(input)
else:
return self._run_eval(input)
def _run_eval(self, input):
return self.model(**input)
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
import math
import os
from pathlib import Path
from torch.utils.data import DataLoader
from ...util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
from accelerate import Accelerator
from transformers import (
AdamW,
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
default_data_collator,
get_scheduler,
)
from torchbenchmark.util.framework.transformers.text_classification.dataset import prep_dataset, preprocess_dataset, prep_labels
from torchbenchmark.util.framework.transformers.text_classification.args import parse_args
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
OUTPUT_DIR = os.path.join(CURRENT_DIR, ".output")
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
class Model(BenchmarkModel):
task = NLP.LANGUAGE_MODELING
def __init__(self, device=None, train_bs=32, task_name="cola"):
super().__init__()
self.device = device
model_name = "bert-base-cased"
max_seq_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3"
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
output_dir = OUTPUT_DIR
in_arg = ["--model_name_or_path", model_name, "--task_name", task_name,
"--do_train", "--do_eval", "--max_seq_length", max_seq_length,
"--per_device_train_batch_size", str(train_bs),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--output_dir", OUTPUT_DIR]
model_args, data_args, training_args = parse_args(in_arg)
# setup other members
self.prep(model_args, data_args, training_args)
def prep(self, model_args, data_args, training_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
accelerator = Accelerator()
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(data_args, training_args)
num_labels, label_list, is_regression = prep_labels(data_args, raw_datasets)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
# cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
# cache_dir=model_args.cache_dir,
# revision=model_args.model_revision,
# use_auth_token=True if model_args.use_auth_token else None,
)
train_dataset, eval_dataset, _predict_dataset = preprocess_dataset(data_args, training_args, config, model, \
tokenizer, raw_datasets, num_labels, label_list, is_regression)
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=data_collator, batch_size=training_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, batch_size=training_args.per_device_eval_batch_size)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": training_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
# Set class members
self.optimizer = AdamW(optimizer_grouped_parameters, lr=training_args.learning_rate)
self.training_args = training_args
self.is_regression = is_regression
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
self.accelerator = accelerator
# Will set self.lr_scheduler
self._prepare_accelerator()
# Prepare everything with our `accelerator` and set the lr_scheduler
def _prepare_accelerator(self):
self.model, self.optimizer, self.train_dataloader, self.eval_dataloader = self.accelerator.prepare(
self.model, self.optimizer, self.train_dataloader, self.eval_dataloader
)
# Note -> the training dataloader needs to be prepared before we grab its length below (since its length will be
# shorter in multiprocess)
# Scheduler and math around the number of training steps.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.training_args.gradient_accumulation_steps)
if self.training_args.max_steps is None or self.training_args.max_steps == -1:
self.training_args.max_steps = self.training_args.num_train_epochs * num_update_steps_per_epoch
else:
self.training_args.num_train_epochs = math.ceil(self.training_args.max_steps / num_update_steps_per_epoch)
self.training_args.num_train_epochs = int(self.training_args.num_train_epochs)
self.lr_scheduler = get_scheduler(
name=self.training_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.training_args.warmup_steps,
num_training_steps=self.training_args.max_steps,
)
def get_module(self):
raise NotImplementedError("get_module is not supported by this model")
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._prepare_accelerator()
def train(self):
if self.jit:
raise NotImplementedError("JIT is not supported by this model")
if not self.device == "cuda":
raise NotImplementedError("Only CUDA is supported by this model")
assert self.training_args.do_train, "Must train with `do_train` arg being set"
completed_steps = 0
for _epoch in range(self.training_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
outputs = self.model(**batch)
loss = outputs.loss
loss = loss / self.training_args.gradient_accumulation_steps
self.accelerator.backward(loss)
if step % self.training_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
completed_steps += 1
if completed_steps >= self.training_args.max_steps:
break
self.model.eval()
for step, batch in enumerate(self.eval_dataloader):
outputs = self.model(**batch)
predictions = outputs.logits.argmax(dim=-1) if not self.is_regression else outputs.logits.squeeze() |
from accelerate.utils.dataclasses import DeepSpeedPlugin
import functools
import torch
import numpy as np
import math
import os
from pathlib import Path
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy
from torch.utils.data import DataLoader
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import NLP
import evaluate
from accelerate import Accelerator
from transformers import (
CONFIG_MAPPING,
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
default_data_collator,
get_scheduler,
MBartTokenizer,
MBartTokenizerFast
)
from transformers.models.t5.modeling_t5 import T5Block
from torchbenchmark.util.framework.transformers.translation.dataset import prep_dataset, preprocess_dataset
from torchbenchmark.util.framework.transformers.translation.args import parse_args, parse_torchbench_args, task_to_keys
try:
import torch._dynamo
except ImportError:
pass
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = NLP.TRANSLATION
DEFAULT_TRAIN_BSIZE: int = 32
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
self.device = "cuda"
self.device_num = 1
# Parse the extra arguments
self.tb_args = parse_torchbench_args(self.extra_args)
torch.manual_seed(1337)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
# Parameters
model_name = "t5-base"
max_source_length = "1024"
max_target_length = "128"
learning_rate = "2e-5"
num_train_epochs = "3" # this takes a rather long time for wmt-en-ro
max_train_steps = "100" # overrides num_train_epochs to run faster
checkpointing_steps = None # set to a string value, like "1000"
task_name = self.tb_args.task_name
task_args = task_to_keys[task_name] # dataset specific hf_args
# T5 requires source prefix to know what to translate
if task_name == "wmt-en-ro":
source_prefix = "translate English to Romanian: "
elif task_name == "wmt-en-de":
source_prefix = "translate English to German: "
else:
raise RuntimeError(f"Unsupported translation task {task_name} for model hf_t5")
task_args.extend(["--source_prefix", source_prefix])
# this benchmark runs on a single GPU
cuda_visible_devices = "0"
output_dir = os.path.join(CURRENT_DIR, ".output")
os.environ["CUDA_VISIBLE_DEVICES"] = cuda_visible_devices
in_arg = ["--model_name_or_path", model_name,
"--max_source_length", max_source_length,
"--max_target_length", max_target_length,
"--per_device_train_batch_size", str(self.batch_size),
"--per_device_eval_batch_size", str(self.batch_size),
"--learning_rate", learning_rate,
"--num_train_epochs", num_train_epochs,
"--max_train_steps", max_train_steps,
"--checkpointing_steps", checkpointing_steps,
"--output_dir", output_dir]
in_arg.extend(task_args)
hf_args = parse_args(in_arg)
self.num_epochs = hf_args.num_train_epochs
# ideally we don't modify the model code directly, but attaching deepspeed
# must be done before self.prep initialiazes accelerator.
hf_args.distributed = self.tb_args.distributed
# supported distributed backends
if hf_args.distributed not in ["deepspeed", "ddp", "fsdp", "none"]:
raise RuntimeError(f"Unsupported distributed scheme {self.tb_args.distributed} for model hf_t5")
# prep args for any distributed backend that needs it
if self.tb_args.distributed == "deepspeed":
zero_opt_cfg = {
"zero_optimization": {
"stage": 1,
"reduce_bucket_size": 2e8,
"overlap_comm": True,
"contiguous_gradients": False
}
}
hf_args.deepspeed_plugin = DeepSpeedPlugin()
hf_args.deepspeed_plugin.deepspeed_config.update(zero_opt_cfg)
# setup other members
self.prep(hf_args)
if test == "train":
self.num_examples = len(self.train_dataloader) * self.batch_size
elif test == "eval":
self.num_examples = len(self.eval_dataloader) * self.batch_size
def prep(self, hf_args):
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
if hf_args.distributed == "deepspeed":
# Note: self.tb_args.fp16 could be renamed to better clarify its meaning
assert self.tb_args.fp16=="amp", "deepspeed is only supported with bf16/amp enabled"
accelerator = Accelerator(deepspeed_plugin=hf_args.deepspeed_plugin, mixed_precision='bf16')
else:
accelerator = Accelerator(mixed_precision='fp16' if self.tb_args.fp16=='amp' else 'no')
# Handle the repository creation
if accelerator.is_main_process:
if hf_args.output_dir is not None:
os.makedirs(hf_args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
raw_datasets = prep_dataset(hf_args)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if hf_args.config_name:
config = AutoConfig.from_pretrained(hf_args.config_name)
elif hf_args.model_name_or_path:
config = AutoConfig.from_pretrained(hf_args.model_name_or_path)
else:
config = CONFIG_MAPPING[hf_args.model_type]()
# logger.warning("You are instantiating a new config instance from scratch.")
if hf_args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(hf_args.tokenizer_name, use_fast=not hf_args.use_slow_tokenizer)
elif hf_args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(hf_args.model_name_or_path, use_fast=not hf_args.use_slow_tokenizer)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
if hf_args.model_name_or_path:
model = AutoModelForSeq2SeqLM.from_pretrained(
hf_args.model_name_or_path,
from_tf=bool(".ckpt" in hf_args.model_name_or_path),
config=config,
)
else:
# logger.info("Training new model from scratch")
model = AutoModelForSeq2SeqLM.from_config(config)
model.resize_token_embeddings(len(tokenizer))
# Set decoder_start_token_id
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
assert (
hf_args.target_lang is not None and hf_args.source_lang is not None
), "mBart requires --target_lang and --source_lang"
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[hf_args.target_lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(hf_args.target_lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# For translation we set the codes of our source and target languages (only useful for mBART, the others will
# ignore those attributes).
if isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if hf_args.source_lang is not None:
tokenizer.src_lang = hf_args.source_lang
if hf_args.target_lang is not None:
tokenizer.tgt_lang = hf_args.target_lang
prefix = hf_args.source_prefix if hf_args.source_prefix is not None else ""
train_dataset, eval_dataset = preprocess_dataset(hf_args, raw_datasets, tokenizer, prefix, accelerator)
# # Log a few random samples from the training set:
# for index in random.sample(range(len(train_dataset)), 3):
# logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# DataLoaders creation:
label_pad_token_id = -100 if hf_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
if hf_args.pad_to_max_length:
# If padding was already done ot max length, we use the default data collator that will just convert everything
# to tensors.
self.data_collator = default_data_collator
else:
# Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by padding to the maximum length of
# the samples passed). When using mixed precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple
# of 8s, which will enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta).
self.data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if accelerator.use_fp16 else None,
)
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=self.data_collator, batch_size=hf_args.per_device_train_batch_size)
eval_dataloader = DataLoader(eval_dataset, collate_fn=self.data_collator, batch_size=hf_args.per_device_eval_batch_size)
# set distributed strategy before creating optimizer
if hf_args.distributed == "ddp":
model = accelerator.prepare(model)
local_rank = int(os.getenv("LOCAL_RANK", -1))
model = DDP(
model,
device_ids=[local_rank],
# If buffer broadcast is necessary, specific optimizations might be
# necessary to optimize performance. Disable it by default.
broadcast_buffers=False,
# Set gradient as bucket view to avoid unnecessary copies
gradient_as_bucket_view=True,
# TODO: tune bucket_cap_mb
static_graph=True,
)
elif hf_args.distributed == "fsdp":
model = accelerator.prepare(model)
transformer_auto_wrapper_policy = functools.partial(
transformer_auto_wrap_policy,
transformer_layer_cls={
T5Block,
},
)
local_rank = int(os.getenv("LOCAL_RANK", -1))
torch.cuda.set_device(local_rank)
model = FSDP(
model,
# TODO: seems to make benchmark slower? and profile doesn't work? investigate
# auto_wrap_policy=transformer_auto_wrapper_policy,
device_id = torch.cuda.current_device()
)
elif hf_args.distributed == "none":
model = accelerator.prepare(model)
# Figure out how many steps we should save the Accelerator states
if hasattr(hf_args.checkpointing_steps, "isdigit"):
hf_args.checkpointing_steps = hf_args.checkpointing_steps
if hf_args.checkpointing_steps.isdigit():
hf_args.checkpointing_steps = int(hf_args.checkpointing_steps)
else:
hf_args.checkpointing_steps = None
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
metric = evaluate.load("sacrebleu")
# Setup class members
self.hf_args = hf_args
self.model = model
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader
self.accelerator = accelerator
self.tokenizer = tokenizer
self.metric = metric
self.config = config
self.postprocess_text = postprocess_text
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "LayerNorm.weight", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": hf_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
self.optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=hf_args.learning_rate)
self._update_everything_with_optimizer()
def _update_everything_with_optimizer(self):
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if self.hf_args.max_train_steps is None:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
self.lr_scheduler = get_scheduler(
name=self.hf_args.lr_scheduler_type,
optimizer=self.optimizer,
num_warmup_steps=self.hf_args.num_warmup_steps,
num_training_steps=self.hf_args.max_train_steps,
)
# Prepare everything with our `accelerator`.
if self.hf_args.distributed == "deepspeed":
# deepspeed will error unless all components prepared at the same time
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.model, self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler)
else:
# ddp and fsdp need model prepared before wrapping.
self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler = self.accelerator.prepare(
self.train_dataloader, self.eval_dataloader, self.optimizer, self.lr_scheduler)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(self.train_dataloader) / self.hf_args.gradient_accumulation_steps)
if overrode_max_train_steps:
self.hf_args.max_train_steps = self.hf_args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
self.hf_args.num_train_epochs = math.ceil(self.hf_args.max_train_steps / num_update_steps_per_epoch)
def train(self):
completed_steps = 0
eval_metric = None
for epoch in range(self.hf_args.num_train_epochs):
self.model.train()
for step, batch in enumerate(self.train_dataloader):
loss = self.run_forward(batch)
loss = loss / self.hf_args.gradient_accumulation_steps
self.run_backward(loss)
if step % self.hf_args.gradient_accumulation_steps == 0 or step == len(self.train_dataloader) - 1:
self.run_optimizer_step()
completed_steps += 1
if isinstance(self.hf_args.checkpointing_steps, int):
if completed_steps % self.hf_args.checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if self.hf_args.output_dir is not None:
output_dir = os.path.join(self.hf_args.output_dir, output_dir)
self.accelerator.save_state(output_dir)
if completed_steps >= self.hf_args.max_train_steps:
break
if self.tb_args.validate_in_train:
eval_metric = self.eval() # run evaluation
# store accuracy results
if self.tb_args.validate_in_train:
self.accuracy = eval_metric["score"]
return eval_metric
def eval(self):
self.model.eval()
if self.hf_args.val_max_target_length is None:
self.hf_args.val_max_target_length = self.hf_args.max_target_length
if self.hf_args.num_beams is None:
self.hf_args.num_beams = 1
gen_kwargs = {
"max_length": self.hf_args.val_max_target_length if self.hf_args is not None else self.config.max_length,
"num_beams": self.hf_args.num_beams,
}
samples_seen = 0
for step, batch in enumerate(self.eval_dataloader):
with torch.no_grad():
generated_tokens = self.accelerator.unwrap_model(self.model).generate(
batch["input_ids"],
attention_mask=batch["attention_mask"],
**gen_kwargs,
)
generated_tokens = self.accelerator.pad_across_processes(
generated_tokens, dim=1, pad_index=self.tokenizer.pad_token_id
)
labels = batch["labels"]
if not self.hf_args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
labels = self.accelerator.pad_across_processes(batch["labels"], dim=1, pad_index=self.tokenizer.pad_token_id)
generated_tokens = self.accelerator.gather(generated_tokens).cpu().numpy()
labels = self.accelerator.gather(labels).cpu().numpy()
if self.hf_args.ignore_pad_token_for_loss:
# Replace -100 in the labels as we can't decode them.
labels = np.where(labels != -100, labels, self.tokenizer.pad_token_id)
decoded_preds = self.tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
decoded_labels = self.tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = self.postprocess_text(decoded_preds, decoded_labels)
# If we are in a multiprocess environment, the last batch has duplicates
if self.accelerator.num_processes > 1:
if step == len(self.eval_dataloader) - 1:
decoded_preds = decoded_preds[: len(self.eval_dataloader.dataset) - samples_seen]
decoded_labels = decoded_labels[: len(self.eval_dataloader.dataset) - samples_seen]
else:
samples_seen += len(decoded_labels)
self.metric.add_batch(predictions=decoded_preds, references=decoded_labels)
eval_metric = self.metric.compute()
# logger.info({"bleu": eval_metric["score"]})
return eval_metric
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self._update_everything_with_optimizer()
def next_batch(self):
return next(iter(self.train_dataloader))
def run_forward(self, input):
"""
compute model forward and return loss
"""
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_forward)(input)
else:
return self._run_forward(input)
def _run_forward(self, input):
return self.model(**input).loss
def run_backward(self, loss):
if self.dynamo:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_backward)(loss)
else:
return self._run_backward(loss)
def _run_backward(self, loss):
self.accelerator.backward(loss)
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
def run_optimizer_step(self):
if self.dynamo and not self.opt_args.dynamo_disable_optimizer_step:
backend = self.opt_args.torchdynamo
return torch._dynamo.optimize(backend)(self._run_optimizer_step)()
else:
return self._run_optimizer_step()
def _run_optimizer_step(self):
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
# upstream repo: https://github.com/kuangliu/pytorch-cifar
import torch
import torchvision
import torchvision.transforms as transforms
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from torchbenchmark.tasks import COMPUTER_VISION
import os
from tqdm import tqdm
from pathlib import Path
# setup environment variable
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
class Model(E2EBenchmarkModel):
task = COMPUTER_VISION.CLASSIFICATION
DEFAULT_TRAIN_BSIZE: int = 128
DEFAULT_EVAL_BSIZE: int = 1
def __init__(self, test, batch_size=None, extra_args=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
self.device = "cuda"
self.device_num = 1
data_root = CURRENT_DIR.joinpath(".data")
assert torch.cuda.is_available(), f"This model requires CUDA device available."
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR10(
root=str(data_root), train=True, download=True, transform=transform_train)
self.trainloader = torch.utils.data.DataLoader(
trainset, batch_size=self.batch_size, shuffle=True, num_workers=2)
self.num_examples = len(trainset)
testset = torchvision.datasets.CIFAR10(
root=str(data_root), train=False, download=True, transform=transform_test)
self.testloader = torch.utils.data.DataLoader(
testset, batch_size=self.batch_size, shuffle=False, num_workers=2)
self.classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
self.lr = 0.1
self.T_max = 200
# initialize accuracy
self.accuracy = 0.0
if self.test == "train":
# by default, run 200 epochs
self.num_epochs = 200
# use random init model for train
self.model = torchvision.models.resnet50().to(self.device)
from .resnet import ResNet50
self.model = ResNet50().to(self.device)
self.model.train()
self.criterion = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr,
momentum=0.9, weight_decay=5e-4)
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.T_max)
else:
# use pretrained model for eval
self.model = torchvision.models.resnet50(pretrained=True).to(self.device)
self.model.eval()
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, T_max=self.T_max)
def _test_loop(self):
self.model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for _batch_idx, (inputs, targets) in enumerate(self.testloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
test_loss += loss.item()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
self.accuracy = 100. * correct / total
def _train_loop(self):
for _batch_idx, (inputs, targets) in enumerate(self.trainloader):
inputs, targets = inputs.to(self.device), targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(inputs)
loss = self.criterion(outputs, targets)
loss.backward()
self.optimizer.step()
def train(self):
self.model.train()
# Train num_epochs
for _epoch in tqdm(range(self.num_epochs), desc = "Training epoch"):
self._train_loop()
# calculate total accuracy
self._test_loop()
def eval(self):
raise NotImplementedError("Eval is not yet implemented for this model.")
def get_optimizer(self):
return self.optimizer
def set_optimizer(self, optimizer) -> None:
self.optimizer = optimizer
|
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion *
planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3,
stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.linear = nn.Linear(512*block.expansion, num_classes)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
def ResNet18():
return ResNet(BasicBlock, [2, 2, 2, 2])
def ResNet34():
return ResNet(BasicBlock, [3, 4, 6, 3])
def ResNet50():
return ResNet(Bottleneck, [3, 4, 6, 3])
def ResNet101():
return ResNet(Bottleneck, [3, 4, 23, 3])
def ResNet152():
return ResNet(Bottleneck, [3, 8, 36, 3])
|
import os
import sys
import torch
import subprocess
from pathlib import Path
from dataclasses import dataclass
from torchbenchmark.util.e2emodel import E2EBenchmarkModel
from typing import Optional, List
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
FAMBENCH_ROOT = CURRENT_DIR.parent.parent.parent.joinpath("submodules", "FAMBench")
def _create_data_dir(data_dir: str):
data_dir = Path(data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
return data_dir
def _get_fambench_test_root(name: str):
xlmr_ootb_root = FAMBENCH_ROOT.joinpath("benchmarks")
assert xlmr_ootb_root.exists(), f"Can't find FAMBench source at {xlmr_ootb_root.absolute()}," \
"please check out the submodules."
return xlmr_ootb_root
@dataclass
class FAMBenchXLMREvalConfig:
"""
Original config reference:
https://github.com/facebookresearch/FAMBench/blob/main/benchmarks/run_xlmr_ootb.sh
"""
config_name = "default-config"
nbatches = 10
batchsize = 16
seqlength = 16
vocabsize = 250000
warmupbatches = 1
log_dir = os.path.join(CURRENT_DIR, ".data", "logs")
config_flags=["--inference-only", f"--num-batches={nbatches}", f"--batch-size={batchsize}", \
f"--sequence-length={seqlength}", f"--vocab-size={vocabsize}", \
f"--famconfig={config_name}", "--half-model", f"--warmup-batches={warmupbatches}", \
f"--logdir={log_dir}"]
class Model(E2EBenchmarkModel):
DEFAULT_EVAL_BSIZE = FAMBenchXLMREvalConfig.batchsize
CANNOT_SET_CUSTOM_OPTIMIZER = True
def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):
super().__init__(test=test, batch_size=batch_size, extra_args=extra_args)
if not torch.cuda.is_available():
raise NotImplementedError("FAMBench only support running on Nvidia GPU.")
self.device = "cuda"
self.device_num = torch.cuda.device_count()
self.name = "xlmr"
self.implementation = "ootb"
self.code_root = _get_fambench_test_root(self.name)
if test == "eval":
self.config = FAMBenchXLMREvalConfig()
self.config.batchsize = self.batch_size
self.num_examples = self.config.nbatches * self.batch_size
_create_data_dir(self.config.log_dir)
def train(self):
raise NotImplementedError("FAMBench XLMR train is not implemented yet.")
def eval(self):
prog_args = [sys.executable, f"{self.name}/{self.implementation}/{self.name}.py"]
prog_args.extend(self.config.config_flags)
subprocess.check_call(prog_args, cwd=self.code_root)
|
import sys
import subprocess
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == '__main__':
pip_install_requirements()
|
import importlib
import sys
from urllib import request
from typing import List, Dict
TORCH_DEPS = ['torch', 'torchvision', 'torchaudio']
proxy_suggestion = "Unable to verify https connectivity, " \
"required for setup.\n" \
"Do you need to use a proxy?"
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
def get_pkg_versions(packages: List[str], reload: bool=False) -> Dict[str, str]:
versions = {}
for module in packages:
module = importlib.import_module(module)
if reload:
module = importlib.reload(module)
versions[module.__name__] = module.__version__
return versions
def _test_https(test_url: str = 'https://github.com', timeout: float = 0.5) -> bool:
try:
request.urlopen(test_url, timeout=timeout)
except OSError:
return False
return True
|
"""gitutils.py
Utils for getting git-related information.
"""
import git
import re
import os
import time
import subprocess
from datetime import datetime
from typing import Optional, List
# Assume the nightly branch commit message is in the following format
# Hash in the parentheses links to the commit on the master branch
NIGHTLY_COMMIT_MSG = "nightly release \((.*)\)"
def get_torch_main_commit(pytorch_repo: str, nightly_commit: str):
repo = git.Repo(pytorch_repo)
msg = repo.commit(nightly_commit).message
# There are two possibilities of the hash `nightly_commit`:
# 1. The hash belongs to the nightly branch
# If so, the git commit message should match `NIGHTLY_COMMIT_MSG`
# 2. The hash belongs to the master/main branch
# We can directly use this hash in this case
nightly_commit_regex = re.compile(NIGHTLY_COMMIT_MSG)
search_result = nightly_commit_regex.search(msg)
if search_result:
return search_result.group(1)
# We now believe the commit now belongs to the master/main branch
# Unfortunately, there is no way to map a commit back to a branch with gitpython
return nightly_commit
def clean_git_repo(repo: str) -> bool:
try:
command = f"git clean -xdf"
subprocess.check_call(command, cwd=repo, shell=True)
return True
except subprocess.CalledProcessError:
print(f"Failed to cleanup git repo {repo}")
return None
def update_git_repo_branch(repo: str, branch: str) -> bool:
try:
command = f"git pull origin {branch}"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to update git repo {repo}, branch {branch}")
return None
def get_git_commit_on_date(repo: str, date: datetime) -> Optional[str]:
try:
# Get the first commit since date
formatted_date = date.strftime("%Y-%m-%d")
command = f"git log --until={formatted_date} -1 --oneline | cut -d ' ' -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get the last commit on date {formatted_date} in repo {repo}")
return None
def check_git_exist_local_branch(repo: str, branch: str) -> bool:
command = f"git rev-parse --verify {branch} &> /dev/null "
retcode = subprocess.call(command, cwd=repo, shell=True)
return (retcode == 0)
def get_git_commit_date(repo: str, commit: str) -> str:
try:
command = f"git show -s --format=%ci {commit}"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get date of commit {commit} in repo {repo}")
return None
def checkout_git_branch(repo: str, branch: str) -> bool:
try:
if check_git_exist_local_branch(repo, branch):
command = f"git checkout {branch} &> /dev/null "
else:
command = f"git checkout --track origin/{branch} &> /dev/null"
retcode = subprocess.call(command, cwd=repo, shell=True)
return (retcode == 0)
except subprocess.CalledProcessError:
print(f"Failed to checkout git repo {repo}, branch {branch}")
return None
def get_current_branch(repo: str) -> Optional[str]:
try:
command = "git branch --show-current"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get current branch name for repo {repo}")
return None
def get_git_origin(repo: str) -> Optional[str]:
try:
command = "git remote get-url origin"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except:
print(f"git command {command} returns non-zero status in repo {repo}")
return None
def get_git_commits(repo: str, start: str, end: str) -> Optional[List[str]]:
try:
command = f"git log --reverse --oneline --ancestry-path {start}^..{end} | cut -d \" \" -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip().split("\n")
if out == ['']:
out = None
return out
except subprocess.CalledProcessError:
print(f"git command {command} returns non-zero status in repo {repo}")
return None
def get_current_commit(repo: str) -> Optional[str]:
try:
command = f"git log --reverse --oneline -1 | cut -d \" \" -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get the current commit in repo {repo}")
return None
def cleanup_local_changes(repo: str):
print("Resetting git repository to HEAD...", end="", flush=True)
command = ["git", "reset", "--hard", "HEAD"]
subprocess.check_call(command, cwd=repo, shell=False)
print("Done", flush=True)
def checkout_git_commit(repo: str, commit: str) -> bool:
try:
assert len(commit) != 0
cleanup_local_changes(repo)
command = ["git", "checkout", "--recurse-submodules", commit]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
# Sleep 5 seconds for concurrent git process, remove the index.lock file if exists, and try again
try:
time.sleep(5)
index_lock = os.path.join(repo, ".git", "index.lock")
if os.path.exists(index_lock):
os.remove(index_lock)
cleanup_local_changes(repo)
command = ["git", "checkout", "--recurse-submodules", commit]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
print(f"Failed to checkout commit {commit} in repo {repo}")
return False
def update_git_repo(repo: str, branch: str="main") -> bool:
try:
print(f"======================= [TORCHBENCH] Updating repository {repo} branch {branch} =======================")
assert len(branch) != 0
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "pull"]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "submodule", "update", "--init", "--recursive"]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
# Sleep 5 seconds for concurrent git process, remove the index.lock file if exists, and try again
try:
time.sleep(5)
print(f"======================= [TORCHBENCH] Updating repository {repo} branch {branch} (2nd try) =======================")
index_lock = os.path.join(repo, ".git", "index.lock")
if os.path.exists(index_lock):
os.remove(index_lock)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "pull"]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "submodule", "update", "--init", "--recursive"]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
print(f"Failed to update to branch {branch} in repo {repo}")
return False
|
from typing import Any, List, Optional
import boto3
import os
import json
import yaml
from pathlib import Path
USERBENCHMARK_S3_BUCKET = "ossci-metrics"
USERBENCHMARK_S3_OBJECT = "torchbench-userbenchmark"
REPO_ROOT = Path(__file__).parent.parent
class S3Client:
def __init__(self, bucket, object):
self.s3 = boto3.client('s3')
self.bucket = bucket
self.object = object
def download_file(self, key: str, dest_dir: str) -> None:
filename = S3Client.get_filename_from_key(key)
assert filename, f"Expected non-empty filename from key {key}."
with open(os.path.join(dest_dir, filename), 'wb') as f:
self.s3.download_fileobj(self.bucket, key, f)
def upload_file(self, prefix: str, file_path: Path) -> None:
file_name = file_path.name
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
response = self.s3.upload_file(str(file_path), self.bucket, s3_key)
print(f"S3 client response: {response}")
def get_file_as_json(self, key: str) -> Any:
obj = self.s3.get_object(Bucket=self.bucket, Key=key)
return json.loads(obj['Body'].read().decode('utf-8'))
def get_file_as_yaml(self, key: str) -> Any:
obj = self.s3.get_object(Bucket=self.bucket, Key=key)
return yaml.safe_load(obj['Body'].read().decode('utf-8'))
def exists(self, prefix: str, file_name: str) -> Optional[str]:
"""Test if the key object/prefix/file_name exists in the S3 bucket.
If True, return the S3 object key. Return None otherwise. """
s3_key = f"{self.object}/{prefix}/{file_name}" if prefix else f"{self.object}/{file_name}"
result = self.s3.list_objects_v2(Bucket=self.bucket, Prefix=s3_key)
if 'Contents' in result:
return s3_key
return None
def list_directory(self, directory=None) -> List[str]:
"""List the directory files in the S3 bucket path.
If the directory doesn't exist, report an error. """
prefix = f"{self.object}/{directory}/" if directory else f"{self.object}/"
pages = self.s3.get_paginator("list_objects").paginate(Bucket=self.bucket, Prefix=prefix)
keys = filter(lambda x: not x == prefix, [e['Key'] for p in pages for e in p['Contents']])
return list(keys)
def get_filename_from_key(object_key: str) -> str:
filename = object_key.split('/')[-1]
return filename
def decompress_s3_data(s3_tarball_path: Path):
assert str(s3_tarball_path.absolute()).endswith(".tar.gz"), f"Expected .tar.gz file path but get {s3_tarball_path}."
import tarfile
data_dir = os.path.join(REPO_ROOT, "torchbenchmark", "data")
# Hide decompressed file in .data directory so that they won't be checked in
decompress_dir = os.path.join(data_dir, ".data")
os.makedirs(decompress_dir, exist_ok=True)
# Decompress tar.gz file
directory_name = s3_tarball_path.stem
target_directory_path = Path(os.path.join(decompress_dir, directory_name))
# If the directory already exists, we assume it has been decompressed before
# skip decompression in this case
if target_directory_path.exists():
print("OK")
return
print(f"decompressing input tarball: {s3_tarball_path}...", end="", flush=True)
tar = tarfile.open(s3_tarball_path)
tar.extractall(path=decompress_dir)
tar.close()
print("OK")
def checkout_s3_data(data_type: str, name: str, decompress: bool=True):
S3_URL_BASE = "https://ossci-datasets.s3.amazonaws.com/torchbench"
download_dir = REPO_ROOT.joinpath("torchbenchmark")
index_file = REPO_ROOT.joinpath("torchbenchmark", "data", "index.yaml")
import requests
with open(index_file, "r") as ind:
index = yaml.safe_load(ind)
assert data_type == "INPUT_TARBALLS" or data_type == "MODEL_PKLS", \
f"Expected data type either INPUT_TARBALLS or MODEL_PKLS, get {data_type}."
assert name in index[data_type], f"Cannot find specified file name {name} in {index_file}."
data_file = name
data_path_segment = f"data/{data_file}" if data_type == "INPUT_TARBALLS" else \
f"models/{data_file}"
full_path = download_dir.joinpath(data_path_segment)
s3_url = f"{S3_URL_BASE}/{data_path_segment}"
# Download if the tarball file does not exist
if not full_path.exists():
r = requests.get(s3_url, allow_redirects=True)
with open(str(full_path.absolute()), "wb") as output:
print(f"Checking out {s3_url} to {full_path}")
output.write(r.content)
if decompress:
decompress_s3_data(full_path)
|
"""
Utilities for building pytorch and torch* domain packages
"""
import os
import sys
import shutil
import subprocess
from dataclasses import dataclass
from pathlib import Path
from typing import List, Dict
CLEANUP_ROUND = 5
@dataclass
class TorchRepo:
name: str
origin_url: str
main_branch: str
src_path: Path
cur_commit: str
build_command: List[str]
def setup_bisection_build_env(env: Dict[str, str]) -> Dict[str, str]:
env["USE_CUDA"] = "1"
env["BUILD_CAFFE2_OPS"] = "0"
# Do not build the test
env["BUILD_TEST"] = "0"
env["USE_MKLDNN"] = "1"
env["USE_MKL"] = "1"
env["USE_CUDNN"] = "1"
# Do not depend on ffmpeg, which requires conda-forge
env["USE_FFMPEG"] = "0"
# Torchaudio SOX build has failures, skip it
env["BUILD_SOX"] = "0"
# Disable Torchaudio KALDI build
env["BUILD_KALDI"] = "0"
env["CMAKE_PREFIX_PATH"] = env["CONDA_PREFIX"]
return env
def _print_info(info: str):
print(f"=========================== {info} ===========================", flush=True)
def build_pytorch_repo(repo: TorchRepo, build_env: Dict[str, str]):
# Check if version.py exists, if it does, remove it.
# This is to force pytorch to update the version.py file upon incremental compilation
version_py_path = os.path.join(repo.src_path.absolute(), "torch/version.py")
if os.path.exists(version_py_path):
os.remove(version_py_path)
try:
subprocess.check_call(repo.build_command, cwd=repo.src_path.absolute(), env=build_env)
command_testbuild = [sys.executable, "-c", "'import torch'"]
subprocess.check_call(command_testbuild, cwd=os.environ["HOME"], env=build_env)
except subprocess.CalledProcessError:
_print_info(f"BUILDING {repo.name.upper()} commit {repo.cur_commit} 2ND TRY")
# Remove the build directory, then try building it again
build_path = os.path.join(repo.src_path.absolute(), "build")
if os.path.exists(build_path):
shutil.rmtree(build_path)
subprocess.check_call(repo.build_command, cwd=repo.src_path.absolute(), env=build_env)
def build_repo(repo: TorchRepo, build_env: Dict[str, str]):
_print_info(f"BUILDING {repo.name.upper()} commit {repo.cur_commit} START")
if repo.name == "pytorch":
build_pytorch_repo(repo, build_env)
else:
subprocess.check_call(repo.build_command, cwd=repo.src_path, env=build_env)
_print_info(f"BUILDING {repo.name.upper()} commit {repo.cur_commit} END")
def cleanup_torch_packages(pkgs: List[str]=[]):
if not len(pkgs):
pkgs = ["torch", "torchvision", "torchaudio", "torchdata"]
for _ in range(CLEANUP_ROUND):
command = "pip uninstall -y " + " ".join(pkgs) + " || true"
subprocess.check_call(command, shell=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.